gitea/modules/repository/repo.go

536 lines
17 KiB
Go
Raw Normal View History

// Copyright 2019 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package repository
import (
"context"
"errors"
"fmt"
"io"
"net/http"
"path"
"strings"
"time"
"code.gitea.io/gitea/models/db"
git_model "code.gitea.io/gitea/models/git"
"code.gitea.io/gitea/models/organization"
repo_model "code.gitea.io/gitea/models/repo"
user_model "code.gitea.io/gitea/models/user"
"code.gitea.io/gitea/modules/container"
"code.gitea.io/gitea/modules/git"
Add LFS Migration and Mirror (#14726) * Implemented LFS client. * Implemented scanning for pointer files. * Implemented downloading of lfs files. * Moved model-dependent code into services. * Removed models dependency. Added TryReadPointerFromBuffer. * Migrated code from service to module. * Centralised storage creation. * Removed dependency from models. * Moved ContentStore into modules. * Share structs between server and client. * Moved method to services. * Implemented lfs download on clone. * Implemented LFS sync on clone and mirror update. * Added form fields. * Updated templates. * Fixed condition. * Use alternate endpoint. * Added missing methods. * Fixed typo and make linter happy. * Detached pointer parser from gogit dependency. * Fixed TestGetLFSRange test. * Added context to support cancellation. * Use ReadFull to probably read more data. * Removed duplicated code from models. * Moved scan implementation into pointer_scanner_nogogit. * Changed method name. * Added comments. * Added more/specific log/error messages. * Embedded lfs.Pointer into models.LFSMetaObject. * Moved code from models to module. * Moved code from models to module. * Moved code from models to module. * Reduced pointer usage. * Embedded type. * Use promoted fields. * Fixed unexpected eof. * Added unit tests. * Implemented migration of local file paths. * Show an error on invalid LFS endpoints. * Hide settings if not used. * Added LFS info to mirror struct. * Fixed comment. * Check LFS endpoint. * Manage LFS settings from mirror page. * Fixed selector. * Adjusted selector. * Added more tests. * Added local filesystem migration test. * Fixed typo. * Reset settings. * Added special windows path handling. * Added unit test for HTTPClient. * Added unit test for BasicTransferAdapter. * Moved into util package. * Test if LFS endpoint is allowed. * Added support for git:// * Just use a static placeholder as the displayed url may be invalid. * Reverted to original code. * Added "Advanced Settings". * Updated wording. * Added discovery info link. * Implemented suggestion. * Fixed missing format parameter. * Added Pointer.IsValid(). * Always remove model on error. * Added suggestions. * Use channel instead of array. * Update routers/repo/migrate.go * fmt Signed-off-by: Andrew Thornton <art27@cantab.net> Co-authored-by: zeripath <art27@cantab.net>
2021-04-09 00:25:57 +02:00
"code.gitea.io/gitea/modules/lfs"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/migration"
"code.gitea.io/gitea/modules/setting"
"code.gitea.io/gitea/modules/timeutil"
"code.gitea.io/gitea/modules/util"
"gopkg.in/ini.v1" //nolint:depguard
)
/*
GitHub, GitLab, Gogs: *.wiki.git
BitBucket: *.git/wiki
*/
var commonWikiURLSuffixes = []string{".wiki.git", ".git/wiki"}
// WikiRemoteURL returns accessible repository URL for wiki if exists.
// Otherwise, it returns an empty string.
func WikiRemoteURL(ctx context.Context, remote string) string {
remote = strings.TrimSuffix(remote, ".git")
for _, suffix := range commonWikiURLSuffixes {
wikiURL := remote + suffix
if git.IsRepoURLAccessible(ctx, wikiURL) {
return wikiURL
}
}
return ""
}
// MigrateRepositoryGitData starts migrating git related data after created migrating repository
func MigrateRepositoryGitData(ctx context.Context, u *user_model.User,
repo *repo_model.Repository, opts migration.MigrateOptions,
httpTransport *http.Transport,
) (*repo_model.Repository, error) {
repoPath := repo_model.RepoPath(u.Name, opts.RepoName)
if u.IsOrganization() {
t, err := organization.OrgFromUser(u).GetOwnerTeam(ctx)
if err != nil {
return nil, err
}
repo.NumWatches = t.NumMembers
} else {
repo.NumWatches = 1
}
migrateTimeout := time.Duration(setting.Git.Timeout.Migrate) * time.Second
var err error
if err = util.RemoveAll(repoPath); err != nil {
return repo, fmt.Errorf("Failed to remove %s: %w", repoPath, err)
}
if err = git.Clone(ctx, opts.CloneAddr, repoPath, git.CloneRepoOptions{
Mirror: true,
Quiet: true,
Timeout: migrateTimeout,
SkipTLSVerify: setting.Migrations.SkipTLSVerify,
}); err != nil {
if errors.Is(err, context.DeadlineExceeded) {
return repo, fmt.Errorf("Clone timed out. Consider increasing [git.timeout] MIGRATE in app.ini. Underlying Error: %w", err)
}
return repo, fmt.Errorf("Clone: %w", err)
}
if err := git.WriteCommitGraph(ctx, repoPath); err != nil {
return repo, err
}
if opts.Wiki {
wikiPath := repo_model.WikiPath(u.Name, opts.RepoName)
wikiRemotePath := WikiRemoteURL(ctx, opts.CloneAddr)
if len(wikiRemotePath) > 0 {
if err := util.RemoveAll(wikiPath); err != nil {
return repo, fmt.Errorf("Failed to remove %s: %w", wikiPath, err)
}
2022-04-30 14:50:56 +02:00
if err := git.Clone(ctx, wikiRemotePath, wikiPath, git.CloneRepoOptions{
Mirror: true,
Quiet: true,
Timeout: migrateTimeout,
Branch: "master",
SkipTLSVerify: setting.Migrations.SkipTLSVerify,
}); err != nil {
log.Warn("Clone wiki: %v", err)
if err := util.RemoveAll(wikiPath); err != nil {
return repo, fmt.Errorf("Failed to remove %s: %w", wikiPath, err)
}
2022-04-30 14:50:56 +02:00
} else {
if err := git.WriteCommitGraph(ctx, wikiPath); err != nil {
return repo, err
}
}
}
}
if repo.OwnerID == u.ID {
repo.Owner = u
}
if err = CheckDaemonExportOK(ctx, repo); err != nil {
return repo, fmt.Errorf("checkDaemonExportOK: %w", err)
}
if stdout, _, err := git.NewCommand(ctx, "update-server-info").
SetDescription(fmt.Sprintf("MigrateRepositoryGitData(git update-server-info): %s", repoPath)).
RunStdString(&git.RunOpts{Dir: repoPath}); err != nil {
log.Error("MigrateRepositoryGitData(git update-server-info) in %v: Stdout: %s\nError: %v", repo, stdout, err)
return repo, fmt.Errorf("error in MigrateRepositoryGitData(git update-server-info): %w", err)
}
gitRepo, err := git.OpenRepository(ctx, repoPath)
if err != nil {
return repo, fmt.Errorf("OpenRepository: %w", err)
}
defer gitRepo.Close()
repo.IsEmpty, err = gitRepo.IsEmpty()
if err != nil {
return repo, fmt.Errorf("git.IsEmpty: %w", err)
}
if !repo.IsEmpty {
if len(repo.DefaultBranch) == 0 {
// Try to get HEAD branch and set it as default branch.
headBranch, err := gitRepo.GetHEADBranch()
if err != nil {
return repo, fmt.Errorf("GetHEADBranch: %w", err)
}
if headBranch != nil {
repo.DefaultBranch = headBranch.Name
}
}
if !opts.Releases {
Improve sync performance for pull-mirrors (#19125) This addresses https://github.com/go-gitea/gitea/issues/18352 It aims to improve performance (and resource use) of the `SyncReleasesWithTags` operation for pull-mirrors. For large repositories with many tags, `SyncReleasesWithTags` can be a costly operation (taking several minutes to complete). The reason is two-fold: 1. on sync, every upstream repo tag is compared (for changes) against existing local entries in the release table to ensure that they are up-to-date. 2. the procedure for getting _each tag_ involves a series of git operations ```bash git show-ref --tags -- v8.2.4477 git cat-file -t 29ab6ce9f36660cffaad3c8789e71162e5db5d2f git cat-file -p 29ab6ce9f36660cffaad3c8789e71162e5db5d2f git rev-list --count 29ab6ce9f36660cffaad3c8789e71162e5db5d2f ``` of which the `git rev-list --count` can be particularly heavy. This PR optimizes performance for pull-mirrors. We utilize the fact that a pull-mirror is always identical to its upstream and rebuild the entire release table on every sync and use a batch `git for-each-ref .. refs/tags` call to retrieve all tags in one go. For large mirror repos, with hundreds of annotated tags, this brings down the duration of the sync operation from several minutes to a few seconds. A few unscientific examples run on my local machine: - https://github.com/spring-projects/spring-boot (223 tags) - before: `0m28,673s` - after: `0m2,244s` - https://github.com/kubernetes/kubernetes (890 tags) - before: `8m00s` - after: `0m8,520s` - https://github.com/vim/vim (13954 tags) - before: `14m20,383s` - after: `0m35,467s` I added a `foreachref` package which contains a flexible way of specifying which reference fields are of interest (`git-for-each-ref(1)`) and to produce a parser for the expected output. These could be reused in other places where `for-each-ref` is used. I'll add unit tests for those if the overall PR looks promising.
2022-03-31 14:30:40 +02:00
// note: this will greatly improve release (tag) sync
// for pull-mirrors with many tags
repo.IsMirror = opts.Mirror
if err = SyncReleasesWithTags(repo, gitRepo); err != nil {
log.Error("Failed to synchronize tags to releases for repository: %v", err)
}
}
Add LFS Migration and Mirror (#14726) * Implemented LFS client. * Implemented scanning for pointer files. * Implemented downloading of lfs files. * Moved model-dependent code into services. * Removed models dependency. Added TryReadPointerFromBuffer. * Migrated code from service to module. * Centralised storage creation. * Removed dependency from models. * Moved ContentStore into modules. * Share structs between server and client. * Moved method to services. * Implemented lfs download on clone. * Implemented LFS sync on clone and mirror update. * Added form fields. * Updated templates. * Fixed condition. * Use alternate endpoint. * Added missing methods. * Fixed typo and make linter happy. * Detached pointer parser from gogit dependency. * Fixed TestGetLFSRange test. * Added context to support cancellation. * Use ReadFull to probably read more data. * Removed duplicated code from models. * Moved scan implementation into pointer_scanner_nogogit. * Changed method name. * Added comments. * Added more/specific log/error messages. * Embedded lfs.Pointer into models.LFSMetaObject. * Moved code from models to module. * Moved code from models to module. * Moved code from models to module. * Reduced pointer usage. * Embedded type. * Use promoted fields. * Fixed unexpected eof. * Added unit tests. * Implemented migration of local file paths. * Show an error on invalid LFS endpoints. * Hide settings if not used. * Added LFS info to mirror struct. * Fixed comment. * Check LFS endpoint. * Manage LFS settings from mirror page. * Fixed selector. * Adjusted selector. * Added more tests. * Added local filesystem migration test. * Fixed typo. * Reset settings. * Added special windows path handling. * Added unit test for HTTPClient. * Added unit test for BasicTransferAdapter. * Moved into util package. * Test if LFS endpoint is allowed. * Added support for git:// * Just use a static placeholder as the displayed url may be invalid. * Reverted to original code. * Added "Advanced Settings". * Updated wording. * Added discovery info link. * Implemented suggestion. * Fixed missing format parameter. * Added Pointer.IsValid(). * Always remove model on error. * Added suggestions. * Use channel instead of array. * Update routers/repo/migrate.go * fmt Signed-off-by: Andrew Thornton <art27@cantab.net> Co-authored-by: zeripath <art27@cantab.net>
2021-04-09 00:25:57 +02:00
if opts.LFS {
endpoint := lfs.DetermineEndpoint(opts.CloneAddr, opts.LFSEndpoint)
lfsClient := lfs.NewClient(endpoint, httpTransport)
if err = StoreMissingLfsObjectsInRepository(ctx, repo, gitRepo, lfsClient); err != nil {
Add LFS Migration and Mirror (#14726) * Implemented LFS client. * Implemented scanning for pointer files. * Implemented downloading of lfs files. * Moved model-dependent code into services. * Removed models dependency. Added TryReadPointerFromBuffer. * Migrated code from service to module. * Centralised storage creation. * Removed dependency from models. * Moved ContentStore into modules. * Share structs between server and client. * Moved method to services. * Implemented lfs download on clone. * Implemented LFS sync on clone and mirror update. * Added form fields. * Updated templates. * Fixed condition. * Use alternate endpoint. * Added missing methods. * Fixed typo and make linter happy. * Detached pointer parser from gogit dependency. * Fixed TestGetLFSRange test. * Added context to support cancellation. * Use ReadFull to probably read more data. * Removed duplicated code from models. * Moved scan implementation into pointer_scanner_nogogit. * Changed method name. * Added comments. * Added more/specific log/error messages. * Embedded lfs.Pointer into models.LFSMetaObject. * Moved code from models to module. * Moved code from models to module. * Moved code from models to module. * Reduced pointer usage. * Embedded type. * Use promoted fields. * Fixed unexpected eof. * Added unit tests. * Implemented migration of local file paths. * Show an error on invalid LFS endpoints. * Hide settings if not used. * Added LFS info to mirror struct. * Fixed comment. * Check LFS endpoint. * Manage LFS settings from mirror page. * Fixed selector. * Adjusted selector. * Added more tests. * Added local filesystem migration test. * Fixed typo. * Reset settings. * Added special windows path handling. * Added unit test for HTTPClient. * Added unit test for BasicTransferAdapter. * Moved into util package. * Test if LFS endpoint is allowed. * Added support for git:// * Just use a static placeholder as the displayed url may be invalid. * Reverted to original code. * Added "Advanced Settings". * Updated wording. * Added discovery info link. * Implemented suggestion. * Fixed missing format parameter. * Added Pointer.IsValid(). * Always remove model on error. * Added suggestions. * Use channel instead of array. * Update routers/repo/migrate.go * fmt Signed-off-by: Andrew Thornton <art27@cantab.net> Co-authored-by: zeripath <art27@cantab.net>
2021-04-09 00:25:57 +02:00
log.Error("Failed to store missing LFS objects for repository: %v", err)
}
}
}
ctx, committer, err := db.TxContext(db.DefaultContext)
if err != nil {
return nil, err
}
defer committer.Close()
if opts.Mirror {
mirrorModel := repo_model.Mirror{
RepoID: repo.ID,
Interval: setting.Mirror.DefaultInterval,
EnablePrune: true,
NextUpdateUnix: timeutil.TimeStampNow().AddDuration(setting.Mirror.DefaultInterval),
Add LFS Migration and Mirror (#14726) * Implemented LFS client. * Implemented scanning for pointer files. * Implemented downloading of lfs files. * Moved model-dependent code into services. * Removed models dependency. Added TryReadPointerFromBuffer. * Migrated code from service to module. * Centralised storage creation. * Removed dependency from models. * Moved ContentStore into modules. * Share structs between server and client. * Moved method to services. * Implemented lfs download on clone. * Implemented LFS sync on clone and mirror update. * Added form fields. * Updated templates. * Fixed condition. * Use alternate endpoint. * Added missing methods. * Fixed typo and make linter happy. * Detached pointer parser from gogit dependency. * Fixed TestGetLFSRange test. * Added context to support cancellation. * Use ReadFull to probably read more data. * Removed duplicated code from models. * Moved scan implementation into pointer_scanner_nogogit. * Changed method name. * Added comments. * Added more/specific log/error messages. * Embedded lfs.Pointer into models.LFSMetaObject. * Moved code from models to module. * Moved code from models to module. * Moved code from models to module. * Reduced pointer usage. * Embedded type. * Use promoted fields. * Fixed unexpected eof. * Added unit tests. * Implemented migration of local file paths. * Show an error on invalid LFS endpoints. * Hide settings if not used. * Added LFS info to mirror struct. * Fixed comment. * Check LFS endpoint. * Manage LFS settings from mirror page. * Fixed selector. * Adjusted selector. * Added more tests. * Added local filesystem migration test. * Fixed typo. * Reset settings. * Added special windows path handling. * Added unit test for HTTPClient. * Added unit test for BasicTransferAdapter. * Moved into util package. * Test if LFS endpoint is allowed. * Added support for git:// * Just use a static placeholder as the displayed url may be invalid. * Reverted to original code. * Added "Advanced Settings". * Updated wording. * Added discovery info link. * Implemented suggestion. * Fixed missing format parameter. * Added Pointer.IsValid(). * Always remove model on error. * Added suggestions. * Use channel instead of array. * Update routers/repo/migrate.go * fmt Signed-off-by: Andrew Thornton <art27@cantab.net> Co-authored-by: zeripath <art27@cantab.net>
2021-04-09 00:25:57 +02:00
LFS: opts.LFS,
}
if opts.LFS {
mirrorModel.LFSEndpoint = opts.LFSEndpoint
}
if opts.MirrorInterval != "" {
parsedInterval, err := time.ParseDuration(opts.MirrorInterval)
if err != nil {
log.Error("Failed to set Interval: %v", err)
return repo, err
}
if parsedInterval == 0 {
mirrorModel.Interval = 0
mirrorModel.NextUpdateUnix = 0
} else if parsedInterval < setting.Mirror.MinInterval {
Use the type RefName for all the needed places and fix pull mirror sync bugs (#24634) This PR replaces all string refName as a type `git.RefName` to make the code more maintainable. Fix #15367 Replaces #23070 It also fixed a bug that tags are not sync because `git remote --prune origin` will not remove local tags if remote removed. We in fact should use `git fetch --prune --tags origin` but not `git remote update origin` to do the sync. Some answer from ChatGPT as ref. > If the git fetch --prune --tags command is not working as expected, there could be a few reasons why. Here are a few things to check: > >Make sure that you have the latest version of Git installed on your system. You can check the version by running git --version in your terminal. If you have an outdated version, try updating Git and see if that resolves the issue. > >Check that your Git repository is properly configured to track the remote repository's tags. You can check this by running git config --get-all remote.origin.fetch and verifying that it includes +refs/tags/*:refs/tags/*. If it does not, you can add it by running git config --add remote.origin.fetch "+refs/tags/*:refs/tags/*". > >Verify that the tags you are trying to prune actually exist on the remote repository. You can do this by running git ls-remote --tags origin to list all the tags on the remote repository. > >Check if any local tags have been created that match the names of tags on the remote repository. If so, these local tags may be preventing the git fetch --prune --tags command from working properly. You can delete local tags using the git tag -d command. --------- Co-authored-by: delvh <dev.lh@web.de>
2023-05-26 03:04:48 +02:00
err := fmt.Errorf("interval %s is set below Minimum Interval of %s", parsedInterval, setting.Mirror.MinInterval)
log.Error("Interval: %s is too frequent", opts.MirrorInterval)
return repo, err
} else {
mirrorModel.Interval = parsedInterval
mirrorModel.NextUpdateUnix = timeutil.TimeStampNow().AddDuration(parsedInterval)
}
}
if err = repo_model.InsertMirror(ctx, &mirrorModel); err != nil {
return repo, fmt.Errorf("InsertOne: %w", err)
}
repo.IsMirror = true
if err = UpdateRepository(ctx, repo, false); err != nil {
return nil, err
}
Use the type RefName for all the needed places and fix pull mirror sync bugs (#24634) This PR replaces all string refName as a type `git.RefName` to make the code more maintainable. Fix #15367 Replaces #23070 It also fixed a bug that tags are not sync because `git remote --prune origin` will not remove local tags if remote removed. We in fact should use `git fetch --prune --tags origin` but not `git remote update origin` to do the sync. Some answer from ChatGPT as ref. > If the git fetch --prune --tags command is not working as expected, there could be a few reasons why. Here are a few things to check: > >Make sure that you have the latest version of Git installed on your system. You can check the version by running git --version in your terminal. If you have an outdated version, try updating Git and see if that resolves the issue. > >Check that your Git repository is properly configured to track the remote repository's tags. You can check this by running git config --get-all remote.origin.fetch and verifying that it includes +refs/tags/*:refs/tags/*. If it does not, you can add it by running git config --add remote.origin.fetch "+refs/tags/*:refs/tags/*". > >Verify that the tags you are trying to prune actually exist on the remote repository. You can do this by running git ls-remote --tags origin to list all the tags on the remote repository. > >Check if any local tags have been created that match the names of tags on the remote repository. If so, these local tags may be preventing the git fetch --prune --tags command from working properly. You can delete local tags using the git tag -d command. --------- Co-authored-by: delvh <dev.lh@web.de>
2023-05-26 03:04:48 +02:00
// this is necessary for sync local tags from remote
configName := fmt.Sprintf("remote.%s.fetch", mirrorModel.GetRemoteName())
if stdout, _, err := git.NewCommand(ctx, "config").
AddOptionValues("--add", configName, `+refs/tags/*:refs/tags/*`).
RunStdString(&git.RunOpts{Dir: repoPath}); err != nil {
log.Error("MigrateRepositoryGitData(git config --add <remote> +refs/tags/*:refs/tags/*) in %v: Stdout: %s\nError: %v", repo, stdout, err)
return repo, fmt.Errorf("error in MigrateRepositoryGitData(git config --add <remote> +refs/tags/*:refs/tags/*): %w", err)
}
} else {
if err = UpdateRepoSize(ctx, repo); err != nil {
log.Error("Failed to update size for repository: %v", err)
}
if repo, err = CleanUpMigrateInfo(ctx, repo); err != nil {
return nil, err
}
}
return repo, committer.Commit()
}
// cleanUpMigrateGitConfig removes mirror info which prevents "push --all".
// This also removes possible user credentials.
func cleanUpMigrateGitConfig(configPath string) error {
cfg, err := ini.Load(configPath) // FIXME: the ini package doesn't really work with git config files
if err != nil {
return fmt.Errorf("open config file: %w", err)
}
cfg.DeleteSection("remote \"origin\"")
if err = cfg.SaveToIndent(configPath, "\t"); err != nil {
return fmt.Errorf("save config file: %w", err)
}
return nil
}
// CleanUpMigrateInfo finishes migrating repository and/or wiki with things that don't need to be done for mirrors.
func CleanUpMigrateInfo(ctx context.Context, repo *repo_model.Repository) (*repo_model.Repository, error) {
repoPath := repo.RepoPath()
if err := createDelegateHooks(repoPath); err != nil {
return repo, fmt.Errorf("createDelegateHooks: %w", err)
}
if repo.HasWiki() {
if err := createDelegateHooks(repo.WikiPath()); err != nil {
return repo, fmt.Errorf("createDelegateHooks.(wiki): %w", err)
}
}
_, _, err := git.NewCommand(ctx, "remote", "rm", "origin").RunStdString(&git.RunOpts{Dir: repoPath})
if err != nil && !strings.HasPrefix(err.Error(), "exit status 128 - fatal: No such remote ") {
return repo, fmt.Errorf("CleanUpMigrateInfo: %w", err)
}
if repo.HasWiki() {
if err := cleanUpMigrateGitConfig(path.Join(repo.WikiPath(), "config")); err != nil {
return repo, fmt.Errorf("cleanUpMigrateGitConfig (wiki): %w", err)
}
}
return repo, UpdateRepository(ctx, repo, false)
}
// SyncReleasesWithTags synchronizes release table with repository tags
func SyncReleasesWithTags(repo *repo_model.Repository, gitRepo *git.Repository) error {
Improve sync performance for pull-mirrors (#19125) This addresses https://github.com/go-gitea/gitea/issues/18352 It aims to improve performance (and resource use) of the `SyncReleasesWithTags` operation for pull-mirrors. For large repositories with many tags, `SyncReleasesWithTags` can be a costly operation (taking several minutes to complete). The reason is two-fold: 1. on sync, every upstream repo tag is compared (for changes) against existing local entries in the release table to ensure that they are up-to-date. 2. the procedure for getting _each tag_ involves a series of git operations ```bash git show-ref --tags -- v8.2.4477 git cat-file -t 29ab6ce9f36660cffaad3c8789e71162e5db5d2f git cat-file -p 29ab6ce9f36660cffaad3c8789e71162e5db5d2f git rev-list --count 29ab6ce9f36660cffaad3c8789e71162e5db5d2f ``` of which the `git rev-list --count` can be particularly heavy. This PR optimizes performance for pull-mirrors. We utilize the fact that a pull-mirror is always identical to its upstream and rebuild the entire release table on every sync and use a batch `git for-each-ref .. refs/tags` call to retrieve all tags in one go. For large mirror repos, with hundreds of annotated tags, this brings down the duration of the sync operation from several minutes to a few seconds. A few unscientific examples run on my local machine: - https://github.com/spring-projects/spring-boot (223 tags) - before: `0m28,673s` - after: `0m2,244s` - https://github.com/kubernetes/kubernetes (890 tags) - before: `8m00s` - after: `0m8,520s` - https://github.com/vim/vim (13954 tags) - before: `14m20,383s` - after: `0m35,467s` I added a `foreachref` package which contains a flexible way of specifying which reference fields are of interest (`git-for-each-ref(1)`) and to produce a parser for the expected output. These could be reused in other places where `for-each-ref` is used. I'll add unit tests for those if the overall PR looks promising.
2022-03-31 14:30:40 +02:00
log.Debug("SyncReleasesWithTags: in Repo[%d:%s/%s]", repo.ID, repo.OwnerName, repo.Name)
// optimized procedure for pull-mirrors which saves a lot of time (in
// particular for repos with many tags).
if repo.IsMirror {
return pullMirrorReleaseSync(repo, gitRepo)
}
existingRelTags := make(container.Set[string])
opts := repo_model.FindReleasesOptions{
IncludeDrafts: true,
IncludeTags: true,
ListOptions: db.ListOptions{PageSize: 50},
}
for page := 1; ; page++ {
API add/generalize pagination (#9452) * paginate results * fixed deadlock * prevented breaking change * updated swagger * go fmt * fixed find topic * go mod tidy * go mod vendor with go1.13.5 * fixed repo find topics * fixed unit test * added Limit method to Engine struct; use engine variable when provided; fixed gitignore * use ItemsPerPage for default pagesize; fix GetWatchers, getOrgUsersByOrgID and GetStargazers; fix GetAllCommits headers; reverted some changed behaviors * set Page value on Home route * improved memory allocations * fixed response headers * removed logfiles * fixed import order * import order * improved swagger * added function to get models.ListOptions from context * removed pagesize diff on unit test * fixed imports * removed unnecessary struct field * fixed go fmt * scoped PR * code improvements * code improvements * go mod tidy * fixed import order * fixed commit statuses session * fixed files headers * fixed headers; added pagination for notifications * go mod tidy * go fmt * removed Private from user search options; added setting.UI.IssuePagingNum as default valeu on repo's issues list * Apply suggestions from code review Co-Authored-By: 6543 <6543@obermui.de> Co-Authored-By: zeripath <art27@cantab.net> * fixed build error * CI.restart() * fixed merge conflicts resolve * fixed conflicts resolve * improved FindTrackedTimesOptions.ToOptions() method * added backwards compatibility on ListReleases request; fixed issue tracked time ToSession * fixed build error; fixed swagger template * fixed swagger template * fixed ListReleases backwards compatibility * added page to user search route Co-authored-by: techknowlogick <matti@mdranta.net> Co-authored-by: 6543 <6543@obermui.de> Co-authored-by: zeripath <art27@cantab.net>
2020-01-24 20:00:29 +01:00
opts.Page = page
rels, err := repo_model.GetReleasesByRepoID(gitRepo.Ctx, repo.ID, opts)
if err != nil {
return fmt.Errorf("unable to GetReleasesByRepoID in Repo[%d:%s/%s]: %w", repo.ID, repo.OwnerName, repo.Name, err)
}
if len(rels) == 0 {
break
}
for _, rel := range rels {
if rel.IsDraft {
continue
}
commitID, err := gitRepo.GetTagCommitID(rel.TagName)
if err != nil && !git.IsErrNotExist(err) {
return fmt.Errorf("unable to GetTagCommitID for %q in Repo[%d:%s/%s]: %w", rel.TagName, repo.ID, repo.OwnerName, repo.Name, err)
}
if git.IsErrNotExist(err) || commitID != rel.Sha1 {
if err := repo_model.PushUpdateDeleteTag(repo, rel.TagName); err != nil {
return fmt.Errorf("unable to PushUpdateDeleteTag: %q in Repo[%d:%s/%s]: %w", rel.TagName, repo.ID, repo.OwnerName, repo.Name, err)
}
} else {
existingRelTags.Add(strings.ToLower(rel.TagName))
}
}
}
_, err := gitRepo.WalkReferences(git.ObjectTag, 0, 0, func(sha1, refname string) error {
tagName := strings.TrimPrefix(refname, git.TagPrefix)
if existingRelTags.Contains(strings.ToLower(tagName)) {
return nil
}
Add context cache as a request level cache (#22294) To avoid duplicated load of the same data in an HTTP request, we can set a context cache to do that. i.e. Some pages may load a user from a database with the same id in different areas on the same page. But the code is hidden in two different deep logic. How should we share the user? As a result of this PR, now if both entry functions accept `context.Context` as the first parameter and we just need to refactor `GetUserByID` to reuse the user from the context cache. Then it will not be loaded twice on an HTTP request. But of course, sometimes we would like to reload an object from the database, that's why `RemoveContextData` is also exposed. The core context cache is here. It defines a new context ```go type cacheContext struct { ctx context.Context data map[any]map[any]any lock sync.RWMutex } var cacheContextKey = struct{}{} func WithCacheContext(ctx context.Context) context.Context { return context.WithValue(ctx, cacheContextKey, &cacheContext{ ctx: ctx, data: make(map[any]map[any]any), }) } ``` Then you can use the below 4 methods to read/write/del the data within the same context. ```go func GetContextData(ctx context.Context, tp, key any) any func SetContextData(ctx context.Context, tp, key, value any) func RemoveContextData(ctx context.Context, tp, key any) func GetWithContextCache[T any](ctx context.Context, cacheGroupKey string, cacheTargetID any, f func() (T, error)) (T, error) ``` Then let's take a look at how `system.GetString` implement it. ```go func GetSetting(ctx context.Context, key string) (string, error) { return cache.GetWithContextCache(ctx, contextCacheKey, key, func() (string, error) { return cache.GetString(genSettingCacheKey(key), func() (string, error) { res, err := GetSettingNoCache(ctx, key) if err != nil { return "", err } return res.SettingValue, nil }) }) } ``` First, it will check if context data include the setting object with the key. If not, it will query from the global cache which may be memory or a Redis cache. If not, it will get the object from the database. In the end, if the object gets from the global cache or database, it will be set into the context cache. An object stored in the context cache will only be destroyed after the context disappeared.
2023-02-15 14:37:34 +01:00
if err := PushUpdateAddTag(db.DefaultContext, repo, gitRepo, tagName, sha1, refname); err != nil {
return fmt.Errorf("unable to PushUpdateAddTag: %q to Repo[%d:%s/%s]: %w", tagName, repo.ID, repo.OwnerName, repo.Name, err)
}
return nil
})
return err
}
// PushUpdateAddTag must be called for any push actions to add tag
Add context cache as a request level cache (#22294) To avoid duplicated load of the same data in an HTTP request, we can set a context cache to do that. i.e. Some pages may load a user from a database with the same id in different areas on the same page. But the code is hidden in two different deep logic. How should we share the user? As a result of this PR, now if both entry functions accept `context.Context` as the first parameter and we just need to refactor `GetUserByID` to reuse the user from the context cache. Then it will not be loaded twice on an HTTP request. But of course, sometimes we would like to reload an object from the database, that's why `RemoveContextData` is also exposed. The core context cache is here. It defines a new context ```go type cacheContext struct { ctx context.Context data map[any]map[any]any lock sync.RWMutex } var cacheContextKey = struct{}{} func WithCacheContext(ctx context.Context) context.Context { return context.WithValue(ctx, cacheContextKey, &cacheContext{ ctx: ctx, data: make(map[any]map[any]any), }) } ``` Then you can use the below 4 methods to read/write/del the data within the same context. ```go func GetContextData(ctx context.Context, tp, key any) any func SetContextData(ctx context.Context, tp, key, value any) func RemoveContextData(ctx context.Context, tp, key any) func GetWithContextCache[T any](ctx context.Context, cacheGroupKey string, cacheTargetID any, f func() (T, error)) (T, error) ``` Then let's take a look at how `system.GetString` implement it. ```go func GetSetting(ctx context.Context, key string) (string, error) { return cache.GetWithContextCache(ctx, contextCacheKey, key, func() (string, error) { return cache.GetString(genSettingCacheKey(key), func() (string, error) { res, err := GetSettingNoCache(ctx, key) if err != nil { return "", err } return res.SettingValue, nil }) }) } ``` First, it will check if context data include the setting object with the key. If not, it will query from the global cache which may be memory or a Redis cache. If not, it will get the object from the database. In the end, if the object gets from the global cache or database, it will be set into the context cache. An object stored in the context cache will only be destroyed after the context disappeared.
2023-02-15 14:37:34 +01:00
func PushUpdateAddTag(ctx context.Context, repo *repo_model.Repository, gitRepo *git.Repository, tagName, sha1, refname string) error {
tag, err := gitRepo.GetTagWithID(sha1, tagName)
if err != nil {
return fmt.Errorf("unable to GetTag: %w", err)
}
commit, err := tag.Commit(gitRepo)
if err != nil {
return fmt.Errorf("unable to get tag Commit: %w", err)
}
sig := tag.Tagger
if sig == nil {
sig = commit.Author
}
if sig == nil {
sig = commit.Committer
}
var author *user_model.User
createdAt := time.Unix(1, 0)
if sig != nil {
Add context cache as a request level cache (#22294) To avoid duplicated load of the same data in an HTTP request, we can set a context cache to do that. i.e. Some pages may load a user from a database with the same id in different areas on the same page. But the code is hidden in two different deep logic. How should we share the user? As a result of this PR, now if both entry functions accept `context.Context` as the first parameter and we just need to refactor `GetUserByID` to reuse the user from the context cache. Then it will not be loaded twice on an HTTP request. But of course, sometimes we would like to reload an object from the database, that's why `RemoveContextData` is also exposed. The core context cache is here. It defines a new context ```go type cacheContext struct { ctx context.Context data map[any]map[any]any lock sync.RWMutex } var cacheContextKey = struct{}{} func WithCacheContext(ctx context.Context) context.Context { return context.WithValue(ctx, cacheContextKey, &cacheContext{ ctx: ctx, data: make(map[any]map[any]any), }) } ``` Then you can use the below 4 methods to read/write/del the data within the same context. ```go func GetContextData(ctx context.Context, tp, key any) any func SetContextData(ctx context.Context, tp, key, value any) func RemoveContextData(ctx context.Context, tp, key any) func GetWithContextCache[T any](ctx context.Context, cacheGroupKey string, cacheTargetID any, f func() (T, error)) (T, error) ``` Then let's take a look at how `system.GetString` implement it. ```go func GetSetting(ctx context.Context, key string) (string, error) { return cache.GetWithContextCache(ctx, contextCacheKey, key, func() (string, error) { return cache.GetString(genSettingCacheKey(key), func() (string, error) { res, err := GetSettingNoCache(ctx, key) if err != nil { return "", err } return res.SettingValue, nil }) }) } ``` First, it will check if context data include the setting object with the key. If not, it will query from the global cache which may be memory or a Redis cache. If not, it will get the object from the database. In the end, if the object gets from the global cache or database, it will be set into the context cache. An object stored in the context cache will only be destroyed after the context disappeared.
2023-02-15 14:37:34 +01:00
author, err = user_model.GetUserByEmail(ctx, sig.Email)
if err != nil && !user_model.IsErrUserNotExist(err) {
return fmt.Errorf("unable to GetUserByEmail for %q: %w", sig.Email, err)
}
createdAt = sig.When
}
commitsCount, err := commit.CommitsCount()
if err != nil {
return fmt.Errorf("unable to get CommitsCount: %w", err)
}
rel := repo_model.Release{
RepoID: repo.ID,
TagName: tagName,
LowerTagName: strings.ToLower(tagName),
Sha1: commit.ID.String(),
NumCommits: commitsCount,
CreatedUnix: timeutil.TimeStamp(createdAt.Unix()),
IsTag: true,
}
if author != nil {
rel.PublisherID = author.ID
}
return repo_model.SaveOrUpdateTag(repo, &rel)
}
Add LFS Migration and Mirror (#14726) * Implemented LFS client. * Implemented scanning for pointer files. * Implemented downloading of lfs files. * Moved model-dependent code into services. * Removed models dependency. Added TryReadPointerFromBuffer. * Migrated code from service to module. * Centralised storage creation. * Removed dependency from models. * Moved ContentStore into modules. * Share structs between server and client. * Moved method to services. * Implemented lfs download on clone. * Implemented LFS sync on clone and mirror update. * Added form fields. * Updated templates. * Fixed condition. * Use alternate endpoint. * Added missing methods. * Fixed typo and make linter happy. * Detached pointer parser from gogit dependency. * Fixed TestGetLFSRange test. * Added context to support cancellation. * Use ReadFull to probably read more data. * Removed duplicated code from models. * Moved scan implementation into pointer_scanner_nogogit. * Changed method name. * Added comments. * Added more/specific log/error messages. * Embedded lfs.Pointer into models.LFSMetaObject. * Moved code from models to module. * Moved code from models to module. * Moved code from models to module. * Reduced pointer usage. * Embedded type. * Use promoted fields. * Fixed unexpected eof. * Added unit tests. * Implemented migration of local file paths. * Show an error on invalid LFS endpoints. * Hide settings if not used. * Added LFS info to mirror struct. * Fixed comment. * Check LFS endpoint. * Manage LFS settings from mirror page. * Fixed selector. * Adjusted selector. * Added more tests. * Added local filesystem migration test. * Fixed typo. * Reset settings. * Added special windows path handling. * Added unit test for HTTPClient. * Added unit test for BasicTransferAdapter. * Moved into util package. * Test if LFS endpoint is allowed. * Added support for git:// * Just use a static placeholder as the displayed url may be invalid. * Reverted to original code. * Added "Advanced Settings". * Updated wording. * Added discovery info link. * Implemented suggestion. * Fixed missing format parameter. * Added Pointer.IsValid(). * Always remove model on error. * Added suggestions. * Use channel instead of array. * Update routers/repo/migrate.go * fmt Signed-off-by: Andrew Thornton <art27@cantab.net> Co-authored-by: zeripath <art27@cantab.net>
2021-04-09 00:25:57 +02:00
// StoreMissingLfsObjectsInRepository downloads missing LFS objects
func StoreMissingLfsObjectsInRepository(ctx context.Context, repo *repo_model.Repository, gitRepo *git.Repository, lfsClient lfs.Client) error {
Add LFS Migration and Mirror (#14726) * Implemented LFS client. * Implemented scanning for pointer files. * Implemented downloading of lfs files. * Moved model-dependent code into services. * Removed models dependency. Added TryReadPointerFromBuffer. * Migrated code from service to module. * Centralised storage creation. * Removed dependency from models. * Moved ContentStore into modules. * Share structs between server and client. * Moved method to services. * Implemented lfs download on clone. * Implemented LFS sync on clone and mirror update. * Added form fields. * Updated templates. * Fixed condition. * Use alternate endpoint. * Added missing methods. * Fixed typo and make linter happy. * Detached pointer parser from gogit dependency. * Fixed TestGetLFSRange test. * Added context to support cancellation. * Use ReadFull to probably read more data. * Removed duplicated code from models. * Moved scan implementation into pointer_scanner_nogogit. * Changed method name. * Added comments. * Added more/specific log/error messages. * Embedded lfs.Pointer into models.LFSMetaObject. * Moved code from models to module. * Moved code from models to module. * Moved code from models to module. * Reduced pointer usage. * Embedded type. * Use promoted fields. * Fixed unexpected eof. * Added unit tests. * Implemented migration of local file paths. * Show an error on invalid LFS endpoints. * Hide settings if not used. * Added LFS info to mirror struct. * Fixed comment. * Check LFS endpoint. * Manage LFS settings from mirror page. * Fixed selector. * Adjusted selector. * Added more tests. * Added local filesystem migration test. * Fixed typo. * Reset settings. * Added special windows path handling. * Added unit test for HTTPClient. * Added unit test for BasicTransferAdapter. * Moved into util package. * Test if LFS endpoint is allowed. * Added support for git:// * Just use a static placeholder as the displayed url may be invalid. * Reverted to original code. * Added "Advanced Settings". * Updated wording. * Added discovery info link. * Implemented suggestion. * Fixed missing format parameter. * Added Pointer.IsValid(). * Always remove model on error. * Added suggestions. * Use channel instead of array. * Update routers/repo/migrate.go * fmt Signed-off-by: Andrew Thornton <art27@cantab.net> Co-authored-by: zeripath <art27@cantab.net>
2021-04-09 00:25:57 +02:00
contentStore := lfs.NewContentStore()
pointerChan := make(chan lfs.PointerBlob)
errChan := make(chan error, 1)
go lfs.SearchPointerBlobs(ctx, gitRepo, pointerChan, errChan)
downloadObjects := func(pointers []lfs.Pointer) error {
err := lfsClient.Download(ctx, pointers, func(p lfs.Pointer, content io.ReadCloser, objectError error) error {
if objectError != nil {
return objectError
Add LFS Migration and Mirror (#14726) * Implemented LFS client. * Implemented scanning for pointer files. * Implemented downloading of lfs files. * Moved model-dependent code into services. * Removed models dependency. Added TryReadPointerFromBuffer. * Migrated code from service to module. * Centralised storage creation. * Removed dependency from models. * Moved ContentStore into modules. * Share structs between server and client. * Moved method to services. * Implemented lfs download on clone. * Implemented LFS sync on clone and mirror update. * Added form fields. * Updated templates. * Fixed condition. * Use alternate endpoint. * Added missing methods. * Fixed typo and make linter happy. * Detached pointer parser from gogit dependency. * Fixed TestGetLFSRange test. * Added context to support cancellation. * Use ReadFull to probably read more data. * Removed duplicated code from models. * Moved scan implementation into pointer_scanner_nogogit. * Changed method name. * Added comments. * Added more/specific log/error messages. * Embedded lfs.Pointer into models.LFSMetaObject. * Moved code from models to module. * Moved code from models to module. * Moved code from models to module. * Reduced pointer usage. * Embedded type. * Use promoted fields. * Fixed unexpected eof. * Added unit tests. * Implemented migration of local file paths. * Show an error on invalid LFS endpoints. * Hide settings if not used. * Added LFS info to mirror struct. * Fixed comment. * Check LFS endpoint. * Manage LFS settings from mirror page. * Fixed selector. * Adjusted selector. * Added more tests. * Added local filesystem migration test. * Fixed typo. * Reset settings. * Added special windows path handling. * Added unit test for HTTPClient. * Added unit test for BasicTransferAdapter. * Moved into util package. * Test if LFS endpoint is allowed. * Added support for git:// * Just use a static placeholder as the displayed url may be invalid. * Reverted to original code. * Added "Advanced Settings". * Updated wording. * Added discovery info link. * Implemented suggestion. * Fixed missing format parameter. * Added Pointer.IsValid(). * Always remove model on error. * Added suggestions. * Use channel instead of array. * Update routers/repo/migrate.go * fmt Signed-off-by: Andrew Thornton <art27@cantab.net> Co-authored-by: zeripath <art27@cantab.net>
2021-04-09 00:25:57 +02:00
}
defer content.Close()
Add LFS Migration and Mirror (#14726) * Implemented LFS client. * Implemented scanning for pointer files. * Implemented downloading of lfs files. * Moved model-dependent code into services. * Removed models dependency. Added TryReadPointerFromBuffer. * Migrated code from service to module. * Centralised storage creation. * Removed dependency from models. * Moved ContentStore into modules. * Share structs between server and client. * Moved method to services. * Implemented lfs download on clone. * Implemented LFS sync on clone and mirror update. * Added form fields. * Updated templates. * Fixed condition. * Use alternate endpoint. * Added missing methods. * Fixed typo and make linter happy. * Detached pointer parser from gogit dependency. * Fixed TestGetLFSRange test. * Added context to support cancellation. * Use ReadFull to probably read more data. * Removed duplicated code from models. * Moved scan implementation into pointer_scanner_nogogit. * Changed method name. * Added comments. * Added more/specific log/error messages. * Embedded lfs.Pointer into models.LFSMetaObject. * Moved code from models to module. * Moved code from models to module. * Moved code from models to module. * Reduced pointer usage. * Embedded type. * Use promoted fields. * Fixed unexpected eof. * Added unit tests. * Implemented migration of local file paths. * Show an error on invalid LFS endpoints. * Hide settings if not used. * Added LFS info to mirror struct. * Fixed comment. * Check LFS endpoint. * Manage LFS settings from mirror page. * Fixed selector. * Adjusted selector. * Added more tests. * Added local filesystem migration test. * Fixed typo. * Reset settings. * Added special windows path handling. * Added unit test for HTTPClient. * Added unit test for BasicTransferAdapter. * Moved into util package. * Test if LFS endpoint is allowed. * Added support for git:// * Just use a static placeholder as the displayed url may be invalid. * Reverted to original code. * Added "Advanced Settings". * Updated wording. * Added discovery info link. * Implemented suggestion. * Fixed missing format parameter. * Added Pointer.IsValid(). * Always remove model on error. * Added suggestions. * Use channel instead of array. * Update routers/repo/migrate.go * fmt Signed-off-by: Andrew Thornton <art27@cantab.net> Co-authored-by: zeripath <art27@cantab.net>
2021-04-09 00:25:57 +02:00
_, err := git_model.NewLFSMetaObject(ctx, &git_model.LFSMetaObject{Pointer: p, RepositoryID: repo.ID})
Add LFS Migration and Mirror (#14726) * Implemented LFS client. * Implemented scanning for pointer files. * Implemented downloading of lfs files. * Moved model-dependent code into services. * Removed models dependency. Added TryReadPointerFromBuffer. * Migrated code from service to module. * Centralised storage creation. * Removed dependency from models. * Moved ContentStore into modules. * Share structs between server and client. * Moved method to services. * Implemented lfs download on clone. * Implemented LFS sync on clone and mirror update. * Added form fields. * Updated templates. * Fixed condition. * Use alternate endpoint. * Added missing methods. * Fixed typo and make linter happy. * Detached pointer parser from gogit dependency. * Fixed TestGetLFSRange test. * Added context to support cancellation. * Use ReadFull to probably read more data. * Removed duplicated code from models. * Moved scan implementation into pointer_scanner_nogogit. * Changed method name. * Added comments. * Added more/specific log/error messages. * Embedded lfs.Pointer into models.LFSMetaObject. * Moved code from models to module. * Moved code from models to module. * Moved code from models to module. * Reduced pointer usage. * Embedded type. * Use promoted fields. * Fixed unexpected eof. * Added unit tests. * Implemented migration of local file paths. * Show an error on invalid LFS endpoints. * Hide settings if not used. * Added LFS info to mirror struct. * Fixed comment. * Check LFS endpoint. * Manage LFS settings from mirror page. * Fixed selector. * Adjusted selector. * Added more tests. * Added local filesystem migration test. * Fixed typo. * Reset settings. * Added special windows path handling. * Added unit test for HTTPClient. * Added unit test for BasicTransferAdapter. * Moved into util package. * Test if LFS endpoint is allowed. * Added support for git:// * Just use a static placeholder as the displayed url may be invalid. * Reverted to original code. * Added "Advanced Settings". * Updated wording. * Added discovery info link. * Implemented suggestion. * Fixed missing format parameter. * Added Pointer.IsValid(). * Always remove model on error. * Added suggestions. * Use channel instead of array. * Update routers/repo/migrate.go * fmt Signed-off-by: Andrew Thornton <art27@cantab.net> Co-authored-by: zeripath <art27@cantab.net>
2021-04-09 00:25:57 +02:00
if err != nil {
log.Error("Repo[%-v]: Error creating LFS meta object %-v: %v", repo, p, err)
return err
}
Add LFS Migration and Mirror (#14726) * Implemented LFS client. * Implemented scanning for pointer files. * Implemented downloading of lfs files. * Moved model-dependent code into services. * Removed models dependency. Added TryReadPointerFromBuffer. * Migrated code from service to module. * Centralised storage creation. * Removed dependency from models. * Moved ContentStore into modules. * Share structs between server and client. * Moved method to services. * Implemented lfs download on clone. * Implemented LFS sync on clone and mirror update. * Added form fields. * Updated templates. * Fixed condition. * Use alternate endpoint. * Added missing methods. * Fixed typo and make linter happy. * Detached pointer parser from gogit dependency. * Fixed TestGetLFSRange test. * Added context to support cancellation. * Use ReadFull to probably read more data. * Removed duplicated code from models. * Moved scan implementation into pointer_scanner_nogogit. * Changed method name. * Added comments. * Added more/specific log/error messages. * Embedded lfs.Pointer into models.LFSMetaObject. * Moved code from models to module. * Moved code from models to module. * Moved code from models to module. * Reduced pointer usage. * Embedded type. * Use promoted fields. * Fixed unexpected eof. * Added unit tests. * Implemented migration of local file paths. * Show an error on invalid LFS endpoints. * Hide settings if not used. * Added LFS info to mirror struct. * Fixed comment. * Check LFS endpoint. * Manage LFS settings from mirror page. * Fixed selector. * Adjusted selector. * Added more tests. * Added local filesystem migration test. * Fixed typo. * Reset settings. * Added special windows path handling. * Added unit test for HTTPClient. * Added unit test for BasicTransferAdapter. * Moved into util package. * Test if LFS endpoint is allowed. * Added support for git:// * Just use a static placeholder as the displayed url may be invalid. * Reverted to original code. * Added "Advanced Settings". * Updated wording. * Added discovery info link. * Implemented suggestion. * Fixed missing format parameter. * Added Pointer.IsValid(). * Always remove model on error. * Added suggestions. * Use channel instead of array. * Update routers/repo/migrate.go * fmt Signed-off-by: Andrew Thornton <art27@cantab.net> Co-authored-by: zeripath <art27@cantab.net>
2021-04-09 00:25:57 +02:00
if err := contentStore.Put(p, content); err != nil {
log.Error("Repo[%-v]: Error storing content for LFS meta object %-v: %v", repo, p, err)
if _, err2 := git_model.RemoveLFSMetaObjectByOid(ctx, repo.ID, p.Oid); err2 != nil {
log.Error("Repo[%-v]: Error removing LFS meta object %-v: %v", repo, p, err2)
Add LFS Migration and Mirror (#14726) * Implemented LFS client. * Implemented scanning for pointer files. * Implemented downloading of lfs files. * Moved model-dependent code into services. * Removed models dependency. Added TryReadPointerFromBuffer. * Migrated code from service to module. * Centralised storage creation. * Removed dependency from models. * Moved ContentStore into modules. * Share structs between server and client. * Moved method to services. * Implemented lfs download on clone. * Implemented LFS sync on clone and mirror update. * Added form fields. * Updated templates. * Fixed condition. * Use alternate endpoint. * Added missing methods. * Fixed typo and make linter happy. * Detached pointer parser from gogit dependency. * Fixed TestGetLFSRange test. * Added context to support cancellation. * Use ReadFull to probably read more data. * Removed duplicated code from models. * Moved scan implementation into pointer_scanner_nogogit. * Changed method name. * Added comments. * Added more/specific log/error messages. * Embedded lfs.Pointer into models.LFSMetaObject. * Moved code from models to module. * Moved code from models to module. * Moved code from models to module. * Reduced pointer usage. * Embedded type. * Use promoted fields. * Fixed unexpected eof. * Added unit tests. * Implemented migration of local file paths. * Show an error on invalid LFS endpoints. * Hide settings if not used. * Added LFS info to mirror struct. * Fixed comment. * Check LFS endpoint. * Manage LFS settings from mirror page. * Fixed selector. * Adjusted selector. * Added more tests. * Added local filesystem migration test. * Fixed typo. * Reset settings. * Added special windows path handling. * Added unit test for HTTPClient. * Added unit test for BasicTransferAdapter. * Moved into util package. * Test if LFS endpoint is allowed. * Added support for git:// * Just use a static placeholder as the displayed url may be invalid. * Reverted to original code. * Added "Advanced Settings". * Updated wording. * Added discovery info link. * Implemented suggestion. * Fixed missing format parameter. * Added Pointer.IsValid(). * Always remove model on error. * Added suggestions. * Use channel instead of array. * Update routers/repo/migrate.go * fmt Signed-off-by: Andrew Thornton <art27@cantab.net> Co-authored-by: zeripath <art27@cantab.net>
2021-04-09 00:25:57 +02:00
}
return err
}
return nil
})
if err != nil {
select {
case <-ctx.Done():
return nil
default:
}
Add LFS Migration and Mirror (#14726) * Implemented LFS client. * Implemented scanning for pointer files. * Implemented downloading of lfs files. * Moved model-dependent code into services. * Removed models dependency. Added TryReadPointerFromBuffer. * Migrated code from service to module. * Centralised storage creation. * Removed dependency from models. * Moved ContentStore into modules. * Share structs between server and client. * Moved method to services. * Implemented lfs download on clone. * Implemented LFS sync on clone and mirror update. * Added form fields. * Updated templates. * Fixed condition. * Use alternate endpoint. * Added missing methods. * Fixed typo and make linter happy. * Detached pointer parser from gogit dependency. * Fixed TestGetLFSRange test. * Added context to support cancellation. * Use ReadFull to probably read more data. * Removed duplicated code from models. * Moved scan implementation into pointer_scanner_nogogit. * Changed method name. * Added comments. * Added more/specific log/error messages. * Embedded lfs.Pointer into models.LFSMetaObject. * Moved code from models to module. * Moved code from models to module. * Moved code from models to module. * Reduced pointer usage. * Embedded type. * Use promoted fields. * Fixed unexpected eof. * Added unit tests. * Implemented migration of local file paths. * Show an error on invalid LFS endpoints. * Hide settings if not used. * Added LFS info to mirror struct. * Fixed comment. * Check LFS endpoint. * Manage LFS settings from mirror page. * Fixed selector. * Adjusted selector. * Added more tests. * Added local filesystem migration test. * Fixed typo. * Reset settings. * Added special windows path handling. * Added unit test for HTTPClient. * Added unit test for BasicTransferAdapter. * Moved into util package. * Test if LFS endpoint is allowed. * Added support for git:// * Just use a static placeholder as the displayed url may be invalid. * Reverted to original code. * Added "Advanced Settings". * Updated wording. * Added discovery info link. * Implemented suggestion. * Fixed missing format parameter. * Added Pointer.IsValid(). * Always remove model on error. * Added suggestions. * Use channel instead of array. * Update routers/repo/migrate.go * fmt Signed-off-by: Andrew Thornton <art27@cantab.net> Co-authored-by: zeripath <art27@cantab.net>
2021-04-09 00:25:57 +02:00
}
return err
}
var batch []lfs.Pointer
for pointerBlob := range pointerChan {
meta, err := git_model.GetLFSMetaObjectByOid(ctx, repo.ID, pointerBlob.Oid)
if err != nil && err != git_model.ErrLFSObjectNotExist {
log.Error("Repo[%-v]: Error querying LFS meta object %-v: %v", repo, pointerBlob.Pointer, err)
return err
}
if meta != nil {
log.Trace("Repo[%-v]: Skipping unknown LFS meta object %-v", repo, pointerBlob.Pointer)
continue
}
log.Trace("Repo[%-v]: LFS object %-v not present in repository", repo, pointerBlob.Pointer)
exist, err := contentStore.Exists(pointerBlob.Pointer)
if err != nil {
log.Error("Repo[%-v]: Error checking if LFS object %-v exists: %v", repo, pointerBlob.Pointer, err)
return err
}
if exist {
log.Trace("Repo[%-v]: LFS object %-v already present; creating meta object", repo, pointerBlob.Pointer)
_, err := git_model.NewLFSMetaObject(ctx, &git_model.LFSMetaObject{Pointer: pointerBlob.Pointer, RepositoryID: repo.ID})
if err != nil {
log.Error("Repo[%-v]: Error creating LFS meta object %-v: %v", repo, pointerBlob.Pointer, err)
return err
}
} else {
if setting.LFS.MaxFileSize > 0 && pointerBlob.Size > setting.LFS.MaxFileSize {
log.Info("Repo[%-v]: LFS object %-v download denied because of LFS_MAX_FILE_SIZE=%d < size %d", repo, pointerBlob.Pointer, setting.LFS.MaxFileSize, pointerBlob.Size)
continue
}
batch = append(batch, pointerBlob.Pointer)
if len(batch) >= lfsClient.BatchSize() {
if err := downloadObjects(batch); err != nil {
return err
}
batch = nil
}
}
}
if len(batch) > 0 {
if err := downloadObjects(batch); err != nil {
return err
}
}
Add LFS Migration and Mirror (#14726) * Implemented LFS client. * Implemented scanning for pointer files. * Implemented downloading of lfs files. * Moved model-dependent code into services. * Removed models dependency. Added TryReadPointerFromBuffer. * Migrated code from service to module. * Centralised storage creation. * Removed dependency from models. * Moved ContentStore into modules. * Share structs between server and client. * Moved method to services. * Implemented lfs download on clone. * Implemented LFS sync on clone and mirror update. * Added form fields. * Updated templates. * Fixed condition. * Use alternate endpoint. * Added missing methods. * Fixed typo and make linter happy. * Detached pointer parser from gogit dependency. * Fixed TestGetLFSRange test. * Added context to support cancellation. * Use ReadFull to probably read more data. * Removed duplicated code from models. * Moved scan implementation into pointer_scanner_nogogit. * Changed method name. * Added comments. * Added more/specific log/error messages. * Embedded lfs.Pointer into models.LFSMetaObject. * Moved code from models to module. * Moved code from models to module. * Moved code from models to module. * Reduced pointer usage. * Embedded type. * Use promoted fields. * Fixed unexpected eof. * Added unit tests. * Implemented migration of local file paths. * Show an error on invalid LFS endpoints. * Hide settings if not used. * Added LFS info to mirror struct. * Fixed comment. * Check LFS endpoint. * Manage LFS settings from mirror page. * Fixed selector. * Adjusted selector. * Added more tests. * Added local filesystem migration test. * Fixed typo. * Reset settings. * Added special windows path handling. * Added unit test for HTTPClient. * Added unit test for BasicTransferAdapter. * Moved into util package. * Test if LFS endpoint is allowed. * Added support for git:// * Just use a static placeholder as the displayed url may be invalid. * Reverted to original code. * Added "Advanced Settings". * Updated wording. * Added discovery info link. * Implemented suggestion. * Fixed missing format parameter. * Added Pointer.IsValid(). * Always remove model on error. * Added suggestions. * Use channel instead of array. * Update routers/repo/migrate.go * fmt Signed-off-by: Andrew Thornton <art27@cantab.net> Co-authored-by: zeripath <art27@cantab.net>
2021-04-09 00:25:57 +02:00
err, has := <-errChan
if has {
log.Error("Repo[%-v]: Error enumerating LFS objects for repository: %v", repo, err)
Add LFS Migration and Mirror (#14726) * Implemented LFS client. * Implemented scanning for pointer files. * Implemented downloading of lfs files. * Moved model-dependent code into services. * Removed models dependency. Added TryReadPointerFromBuffer. * Migrated code from service to module. * Centralised storage creation. * Removed dependency from models. * Moved ContentStore into modules. * Share structs between server and client. * Moved method to services. * Implemented lfs download on clone. * Implemented LFS sync on clone and mirror update. * Added form fields. * Updated templates. * Fixed condition. * Use alternate endpoint. * Added missing methods. * Fixed typo and make linter happy. * Detached pointer parser from gogit dependency. * Fixed TestGetLFSRange test. * Added context to support cancellation. * Use ReadFull to probably read more data. * Removed duplicated code from models. * Moved scan implementation into pointer_scanner_nogogit. * Changed method name. * Added comments. * Added more/specific log/error messages. * Embedded lfs.Pointer into models.LFSMetaObject. * Moved code from models to module. * Moved code from models to module. * Moved code from models to module. * Reduced pointer usage. * Embedded type. * Use promoted fields. * Fixed unexpected eof. * Added unit tests. * Implemented migration of local file paths. * Show an error on invalid LFS endpoints. * Hide settings if not used. * Added LFS info to mirror struct. * Fixed comment. * Check LFS endpoint. * Manage LFS settings from mirror page. * Fixed selector. * Adjusted selector. * Added more tests. * Added local filesystem migration test. * Fixed typo. * Reset settings. * Added special windows path handling. * Added unit test for HTTPClient. * Added unit test for BasicTransferAdapter. * Moved into util package. * Test if LFS endpoint is allowed. * Added support for git:// * Just use a static placeholder as the displayed url may be invalid. * Reverted to original code. * Added "Advanced Settings". * Updated wording. * Added discovery info link. * Implemented suggestion. * Fixed missing format parameter. * Added Pointer.IsValid(). * Always remove model on error. * Added suggestions. * Use channel instead of array. * Update routers/repo/migrate.go * fmt Signed-off-by: Andrew Thornton <art27@cantab.net> Co-authored-by: zeripath <art27@cantab.net>
2021-04-09 00:25:57 +02:00
return err
}
return nil
}
Improve sync performance for pull-mirrors (#19125) This addresses https://github.com/go-gitea/gitea/issues/18352 It aims to improve performance (and resource use) of the `SyncReleasesWithTags` operation for pull-mirrors. For large repositories with many tags, `SyncReleasesWithTags` can be a costly operation (taking several minutes to complete). The reason is two-fold: 1. on sync, every upstream repo tag is compared (for changes) against existing local entries in the release table to ensure that they are up-to-date. 2. the procedure for getting _each tag_ involves a series of git operations ```bash git show-ref --tags -- v8.2.4477 git cat-file -t 29ab6ce9f36660cffaad3c8789e71162e5db5d2f git cat-file -p 29ab6ce9f36660cffaad3c8789e71162e5db5d2f git rev-list --count 29ab6ce9f36660cffaad3c8789e71162e5db5d2f ``` of which the `git rev-list --count` can be particularly heavy. This PR optimizes performance for pull-mirrors. We utilize the fact that a pull-mirror is always identical to its upstream and rebuild the entire release table on every sync and use a batch `git for-each-ref .. refs/tags` call to retrieve all tags in one go. For large mirror repos, with hundreds of annotated tags, this brings down the duration of the sync operation from several minutes to a few seconds. A few unscientific examples run on my local machine: - https://github.com/spring-projects/spring-boot (223 tags) - before: `0m28,673s` - after: `0m2,244s` - https://github.com/kubernetes/kubernetes (890 tags) - before: `8m00s` - after: `0m8,520s` - https://github.com/vim/vim (13954 tags) - before: `14m20,383s` - after: `0m35,467s` I added a `foreachref` package which contains a flexible way of specifying which reference fields are of interest (`git-for-each-ref(1)`) and to produce a parser for the expected output. These could be reused in other places where `for-each-ref` is used. I'll add unit tests for those if the overall PR looks promising.
2022-03-31 14:30:40 +02:00
// pullMirrorReleaseSync is a pull-mirror specific tag<->release table
// synchronization which overwrites all Releases from the repository tags. This
// can be relied on since a pull-mirror is always identical to its
// upstream. Hence, after each sync we want the pull-mirror release set to be
// identical to the upstream tag set. This is much more efficient for
// repositories like https://github.com/vim/vim (with over 13000 tags).
func pullMirrorReleaseSync(repo *repo_model.Repository, gitRepo *git.Repository) error {
log.Trace("pullMirrorReleaseSync: rebuilding releases for pull-mirror Repo[%d:%s/%s]", repo.ID, repo.OwnerName, repo.Name)
tags, numTags, err := gitRepo.GetTagInfos(0, 0)
if err != nil {
return fmt.Errorf("unable to GetTagInfos in pull-mirror Repo[%d:%s/%s]: %w", repo.ID, repo.OwnerName, repo.Name, err)
}
err = db.WithTx(db.DefaultContext, func(ctx context.Context) error {
Improve sync performance for pull-mirrors (#19125) This addresses https://github.com/go-gitea/gitea/issues/18352 It aims to improve performance (and resource use) of the `SyncReleasesWithTags` operation for pull-mirrors. For large repositories with many tags, `SyncReleasesWithTags` can be a costly operation (taking several minutes to complete). The reason is two-fold: 1. on sync, every upstream repo tag is compared (for changes) against existing local entries in the release table to ensure that they are up-to-date. 2. the procedure for getting _each tag_ involves a series of git operations ```bash git show-ref --tags -- v8.2.4477 git cat-file -t 29ab6ce9f36660cffaad3c8789e71162e5db5d2f git cat-file -p 29ab6ce9f36660cffaad3c8789e71162e5db5d2f git rev-list --count 29ab6ce9f36660cffaad3c8789e71162e5db5d2f ``` of which the `git rev-list --count` can be particularly heavy. This PR optimizes performance for pull-mirrors. We utilize the fact that a pull-mirror is always identical to its upstream and rebuild the entire release table on every sync and use a batch `git for-each-ref .. refs/tags` call to retrieve all tags in one go. For large mirror repos, with hundreds of annotated tags, this brings down the duration of the sync operation from several minutes to a few seconds. A few unscientific examples run on my local machine: - https://github.com/spring-projects/spring-boot (223 tags) - before: `0m28,673s` - after: `0m2,244s` - https://github.com/kubernetes/kubernetes (890 tags) - before: `8m00s` - after: `0m8,520s` - https://github.com/vim/vim (13954 tags) - before: `14m20,383s` - after: `0m35,467s` I added a `foreachref` package which contains a flexible way of specifying which reference fields are of interest (`git-for-each-ref(1)`) and to produce a parser for the expected output. These could be reused in other places where `for-each-ref` is used. I'll add unit tests for those if the overall PR looks promising.
2022-03-31 14:30:40 +02:00
//
// clear out existing releases
//
if _, err := db.DeleteByBean(ctx, &repo_model.Release{RepoID: repo.ID}); err != nil {
Improve sync performance for pull-mirrors (#19125) This addresses https://github.com/go-gitea/gitea/issues/18352 It aims to improve performance (and resource use) of the `SyncReleasesWithTags` operation for pull-mirrors. For large repositories with many tags, `SyncReleasesWithTags` can be a costly operation (taking several minutes to complete). The reason is two-fold: 1. on sync, every upstream repo tag is compared (for changes) against existing local entries in the release table to ensure that they are up-to-date. 2. the procedure for getting _each tag_ involves a series of git operations ```bash git show-ref --tags -- v8.2.4477 git cat-file -t 29ab6ce9f36660cffaad3c8789e71162e5db5d2f git cat-file -p 29ab6ce9f36660cffaad3c8789e71162e5db5d2f git rev-list --count 29ab6ce9f36660cffaad3c8789e71162e5db5d2f ``` of which the `git rev-list --count` can be particularly heavy. This PR optimizes performance for pull-mirrors. We utilize the fact that a pull-mirror is always identical to its upstream and rebuild the entire release table on every sync and use a batch `git for-each-ref .. refs/tags` call to retrieve all tags in one go. For large mirror repos, with hundreds of annotated tags, this brings down the duration of the sync operation from several minutes to a few seconds. A few unscientific examples run on my local machine: - https://github.com/spring-projects/spring-boot (223 tags) - before: `0m28,673s` - after: `0m2,244s` - https://github.com/kubernetes/kubernetes (890 tags) - before: `8m00s` - after: `0m8,520s` - https://github.com/vim/vim (13954 tags) - before: `14m20,383s` - after: `0m35,467s` I added a `foreachref` package which contains a flexible way of specifying which reference fields are of interest (`git-for-each-ref(1)`) and to produce a parser for the expected output. These could be reused in other places where `for-each-ref` is used. I'll add unit tests for those if the overall PR looks promising.
2022-03-31 14:30:40 +02:00
return fmt.Errorf("unable to clear releases for pull-mirror Repo[%d:%s/%s]: %w", repo.ID, repo.OwnerName, repo.Name, err)
}
//
// make release set identical to upstream tags
//
for _, tag := range tags {
release := repo_model.Release{
Improve sync performance for pull-mirrors (#19125) This addresses https://github.com/go-gitea/gitea/issues/18352 It aims to improve performance (and resource use) of the `SyncReleasesWithTags` operation for pull-mirrors. For large repositories with many tags, `SyncReleasesWithTags` can be a costly operation (taking several minutes to complete). The reason is two-fold: 1. on sync, every upstream repo tag is compared (for changes) against existing local entries in the release table to ensure that they are up-to-date. 2. the procedure for getting _each tag_ involves a series of git operations ```bash git show-ref --tags -- v8.2.4477 git cat-file -t 29ab6ce9f36660cffaad3c8789e71162e5db5d2f git cat-file -p 29ab6ce9f36660cffaad3c8789e71162e5db5d2f git rev-list --count 29ab6ce9f36660cffaad3c8789e71162e5db5d2f ``` of which the `git rev-list --count` can be particularly heavy. This PR optimizes performance for pull-mirrors. We utilize the fact that a pull-mirror is always identical to its upstream and rebuild the entire release table on every sync and use a batch `git for-each-ref .. refs/tags` call to retrieve all tags in one go. For large mirror repos, with hundreds of annotated tags, this brings down the duration of the sync operation from several minutes to a few seconds. A few unscientific examples run on my local machine: - https://github.com/spring-projects/spring-boot (223 tags) - before: `0m28,673s` - after: `0m2,244s` - https://github.com/kubernetes/kubernetes (890 tags) - before: `8m00s` - after: `0m8,520s` - https://github.com/vim/vim (13954 tags) - before: `14m20,383s` - after: `0m35,467s` I added a `foreachref` package which contains a flexible way of specifying which reference fields are of interest (`git-for-each-ref(1)`) and to produce a parser for the expected output. These could be reused in other places where `for-each-ref` is used. I'll add unit tests for those if the overall PR looks promising.
2022-03-31 14:30:40 +02:00
RepoID: repo.ID,
TagName: tag.Name,
LowerTagName: strings.ToLower(tag.Name),
Sha1: tag.Object.String(),
// NOTE: ignored, since NumCommits are unused
// for pull-mirrors (only relevant when
// displaying releases, IsTag: false)
NumCommits: -1,
CreatedUnix: timeutil.TimeStamp(tag.Tagger.When.Unix()),
IsTag: true,
}
if err := db.Insert(ctx, release); err != nil {
return fmt.Errorf("unable insert tag %s for pull-mirror Repo[%d:%s/%s]: %w", tag.Name, repo.ID, repo.OwnerName, repo.Name, err)
}
}
return nil
})
if err != nil {
return fmt.Errorf("unable to rebuild release table for pull-mirror Repo[%d:%s/%s]: %w", repo.ID, repo.OwnerName, repo.Name, err)
}
log.Trace("pullMirrorReleaseSync: done rebuilding %d releases", numTags)
return nil
}