Detect migrating batch size (#7353)

* Make migrating batch size as configurable

* detect different table batch insert size and remove config item

* remove unrelated changes
tokarchuk/v1.17
Lunny Xiao 6 years ago committed by techknowlogick
parent ef57fe4ae3
commit 337d6915ff
  1. 6
      models/models.go
  2. 1
      modules/migrations/base/uploader.go
  3. 19
      modules/migrations/gitea.go
  4. 54
      modules/migrations/migrate.go

@ -368,3 +368,9 @@ func DumpDatabase(filePath string, dbType string) error {
} }
return x.DumpTablesToFile(tbs, filePath) return x.DumpTablesToFile(tbs, filePath)
} }
// MaxBatchInsertSize returns the table's max batch insert size
func MaxBatchInsertSize(bean interface{}) int {
t := x.TableInfo(bean)
return 999 / len(t.ColumnsSeq())
}

@ -7,6 +7,7 @@ package base
// Uploader uploads all the informations of one repository // Uploader uploads all the informations of one repository
type Uploader interface { type Uploader interface {
MaxBatchInsertSize(tp string) int
CreateRepo(repo *Repository, opts MigrateOptions) error CreateRepo(repo *Repository, opts MigrateOptions) error
CreateMilestones(milestones ...*Milestone) error CreateMilestones(milestones ...*Milestone) error
CreateReleases(releases ...*Release) error CreateReleases(releases ...*Release) error

@ -53,6 +53,25 @@ func NewGiteaLocalUploader(doer *models.User, repoOwner, repoName string) *Gitea
} }
} }
// MaxBatchInsertSize returns the table's max batch insert size
func (g *GiteaLocalUploader) MaxBatchInsertSize(tp string) int {
switch tp {
case "issue":
return models.MaxBatchInsertSize(new(models.Issue))
case "comment":
return models.MaxBatchInsertSize(new(models.Comment))
case "milestone":
return models.MaxBatchInsertSize(new(models.Milestone))
case "label":
return models.MaxBatchInsertSize(new(models.Label))
case "release":
return models.MaxBatchInsertSize(new(models.Release))
case "pullrequest":
return models.MaxBatchInsertSize(new(models.PullRequest))
}
return 10
}
// CreateRepo creates a repository // CreateRepo creates a repository
func (g *GiteaLocalUploader) CreateRepo(repo *base.Repository, opts base.MigrateOptions) error { func (g *GiteaLocalUploader) CreateRepo(repo *base.Repository, opts base.MigrateOptions) error {
owner, err := models.GetUserByName(g.repoOwner) owner, err := models.GetUserByName(g.repoOwner)

@ -91,9 +91,17 @@ func migrateRepository(downloader base.Downloader, uploader base.Uploader, opts
return err return err
} }
msBatchSize := uploader.MaxBatchInsertSize("milestone")
for len(milestones) > 0 {
if len(milestones) < msBatchSize {
msBatchSize = len(milestones)
}
if err := uploader.CreateMilestones(milestones...); err != nil { if err := uploader.CreateMilestones(milestones...); err != nil {
return err return err
} }
milestones = milestones[msBatchSize:]
}
} }
if opts.Labels { if opts.Labels {
@ -103,9 +111,17 @@ func migrateRepository(downloader base.Downloader, uploader base.Uploader, opts
return err return err
} }
lbBatchSize := uploader.MaxBatchInsertSize("label")
for len(labels) > 0 {
if len(labels) < lbBatchSize {
lbBatchSize = len(labels)
}
if err := uploader.CreateLabels(labels...); err != nil { if err := uploader.CreateLabels(labels...); err != nil {
return err return err
} }
labels = labels[lbBatchSize:]
}
} }
if opts.Releases { if opts.Releases {
@ -115,15 +131,27 @@ func migrateRepository(downloader base.Downloader, uploader base.Uploader, opts
return err return err
} }
if err := uploader.CreateReleases(releases...); err != nil { relBatchSize := uploader.MaxBatchInsertSize("release")
for len(releases) > 0 {
if len(releases) < relBatchSize {
relBatchSize = len(releases)
}
if err := uploader.CreateReleases(releases[:relBatchSize]...); err != nil {
return err return err
} }
releases = releases[relBatchSize:]
}
} }
var commentBatchSize = uploader.MaxBatchInsertSize("comment")
if opts.Issues { if opts.Issues {
log.Trace("migrating issues and comments") log.Trace("migrating issues and comments")
var issueBatchSize = uploader.MaxBatchInsertSize("issue")
for i := 1; ; i++ { for i := 1; ; i++ {
issues, isEnd, err := downloader.GetIssues(i, 100) issues, isEnd, err := downloader.GetIssues(i, issueBatchSize)
if err != nil { if err != nil {
return err return err
} }
@ -141,7 +169,7 @@ func migrateRepository(downloader base.Downloader, uploader base.Uploader, opts
continue continue
} }
var allComments = make([]*base.Comment, 0, 100) var allComments = make([]*base.Comment, 0, commentBatchSize)
for _, issue := range issues { for _, issue := range issues {
comments, err := downloader.GetComments(issue.Number) comments, err := downloader.GetComments(issue.Number)
if err != nil { if err != nil {
@ -154,11 +182,12 @@ func migrateRepository(downloader base.Downloader, uploader base.Uploader, opts
} }
allComments = append(allComments, comments...) allComments = append(allComments, comments...)
if len(allComments) >= 100 { if len(allComments) >= commentBatchSize {
if err := uploader.CreateComments(allComments...); err != nil { if err := uploader.CreateComments(allComments[:commentBatchSize]...); err != nil {
return err return err
} }
allComments = make([]*base.Comment, 0, 100)
allComments = allComments[commentBatchSize:]
} }
} }
@ -176,8 +205,9 @@ func migrateRepository(downloader base.Downloader, uploader base.Uploader, opts
if opts.PullRequests { if opts.PullRequests {
log.Trace("migrating pull requests and comments") log.Trace("migrating pull requests and comments")
var prBatchSize = models.MaxBatchInsertSize("pullrequest")
for i := 1; ; i++ { for i := 1; ; i++ {
prs, err := downloader.GetPullRequests(i, 100) prs, err := downloader.GetPullRequests(i, prBatchSize)
if err != nil { if err != nil {
return err return err
} }
@ -195,7 +225,7 @@ func migrateRepository(downloader base.Downloader, uploader base.Uploader, opts
continue continue
} }
var allComments = make([]*base.Comment, 0, 100) var allComments = make([]*base.Comment, 0, commentBatchSize)
for _, pr := range prs { for _, pr := range prs {
comments, err := downloader.GetComments(pr.Number) comments, err := downloader.GetComments(pr.Number)
if err != nil { if err != nil {
@ -209,11 +239,11 @@ func migrateRepository(downloader base.Downloader, uploader base.Uploader, opts
allComments = append(allComments, comments...) allComments = append(allComments, comments...)
if len(allComments) >= 100 { if len(allComments) >= commentBatchSize {
if err := uploader.CreateComments(allComments...); err != nil { if err := uploader.CreateComments(allComments[:commentBatchSize]...); err != nil {
return err return err
} }
allComments = make([]*base.Comment, 0, 100) allComments = allComments[commentBatchSize:]
} }
} }
if len(allComments) > 0 { if len(allComments) > 0 {
@ -222,7 +252,7 @@ func migrateRepository(downloader base.Downloader, uploader base.Uploader, opts
} }
} }
if len(prs) < 100 { if len(prs) < prBatchSize {
break break
} }
} }

Loading…
Cancel
Save