Use batch insert on migrating repository to make the process faster (#7050)

* Use batch insert on migrating repository to make the process faster

* fix lint

* fix tests

* fix comments
This commit is contained in:
Lunny Xiao 2019-06-29 21:38:22 +08:00 committed by GitHub
parent e463bdaf8d
commit 462284e2f5
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
7 changed files with 343 additions and 242 deletions

View file

@ -6,38 +6,58 @@ package models
import "github.com/go-xorm/xorm" import "github.com/go-xorm/xorm"
// InsertIssue insert one issue to database // InsertMilestones creates milestones of repository.
func InsertIssue(issue *Issue, labelIDs []int64) error { func InsertMilestones(ms ...*Milestone) (err error) {
if len(ms) == 0 {
return nil
}
sess := x.NewSession() sess := x.NewSession()
if err := sess.Begin(); err != nil { defer sess.Close()
if err = sess.Begin(); err != nil {
return err return err
} }
if err := insertIssue(sess, issue, labelIDs); err != nil { // to return the id, so we should not use batch insert
for _, m := range ms {
if _, err = sess.NoAutoTime().Insert(m); err != nil {
return err
}
}
if _, err = sess.Exec("UPDATE `repository` SET num_milestones = num_milestones + ? WHERE id = ?", len(ms), ms[0].RepoID); err != nil {
return err return err
} }
return sess.Commit() return sess.Commit()
} }
func insertIssue(sess *xorm.Session, issue *Issue, labelIDs []int64) error { // InsertIssues insert issues to database
if issue.MilestoneID > 0 { func InsertIssues(issues ...*Issue) error {
sess.Incr("num_issues") sess := x.NewSession()
if issue.IsClosed { if err := sess.Begin(); err != nil {
sess.Incr("num_closed_issues") return err
} }
if _, err := sess.ID(issue.MilestoneID).NoAutoTime().Update(new(Milestone)); err != nil {
for _, issue := range issues {
if err := insertIssue(sess, issue); err != nil {
return err return err
} }
} }
return sess.Commit()
}
func insertIssue(sess *xorm.Session, issue *Issue) error {
if _, err := sess.NoAutoTime().Insert(issue); err != nil { if _, err := sess.NoAutoTime().Insert(issue); err != nil {
return err return err
} }
var issueLabels = make([]IssueLabel, 0, len(labelIDs)) var issueLabels = make([]IssueLabel, 0, len(issue.Labels))
for _, labelID := range labelIDs { var labelIDs = make([]int64, 0, len(issue.Labels))
for _, label := range issue.Labels {
issueLabels = append(issueLabels, IssueLabel{ issueLabels = append(issueLabels, IssueLabel{
IssueID: issue.ID, IssueID: issue.ID,
LabelID: labelID, LabelID: label.ID,
}) })
labelIDs = append(labelIDs, label.ID)
} }
if _, err := sess.Insert(issueLabels); err != nil { if _, err := sess.Insert(issueLabels); err != nil {
return err return err
@ -61,12 +81,20 @@ func insertIssue(sess *xorm.Session, issue *Issue, labelIDs []int64) error {
if issue.IsClosed { if issue.IsClosed {
sess.Incr("num_closed_issues") sess.Incr("num_closed_issues")
} }
if _, err := sess.In("id", labelIDs).Update(new(Label)); err != nil { if _, err := sess.In("id", labelIDs).NoAutoTime().Update(new(Label)); err != nil {
return err return err
} }
if issue.MilestoneID > 0 { if issue.MilestoneID > 0 {
if _, err := sess.ID(issue.MilestoneID).SetExpr("completeness", "num_closed_issues * 100 / num_issues").Update(new(Milestone)); err != nil { sess.Incr("num_issues")
if issue.IsClosed {
sess.Incr("num_closed_issues")
}
if _, err := sess.ID(issue.MilestoneID).
SetExpr("completeness", "num_closed_issues * 100 / num_issues").
NoAutoTime().
Update(new(Milestone)); err != nil {
return err return err
} }
} }
@ -74,72 +102,73 @@ func insertIssue(sess *xorm.Session, issue *Issue, labelIDs []int64) error {
return nil return nil
} }
// InsertComment inserted a comment // InsertIssueComments inserts many comments of issues.
func InsertComment(comment *Comment) error { func InsertIssueComments(comments []*Comment) error {
if len(comments) == 0 {
return nil
}
var issueIDs = make(map[int64]bool)
for _, comment := range comments {
issueIDs[comment.IssueID] = true
}
sess := x.NewSession() sess := x.NewSession()
defer sess.Close() defer sess.Close()
if err := sess.Begin(); err != nil { if err := sess.Begin(); err != nil {
return err return err
} }
if _, err := sess.NoAutoTime().Insert(comment); err != nil { if _, err := sess.NoAutoTime().Insert(comments); err != nil {
return err return err
} }
if _, err := sess.ID(comment.IssueID).Incr("num_comments").Update(new(Issue)); err != nil { for issueID := range issueIDs {
return err if _, err := sess.Exec("UPDATE issue set num_comments = (SELECT count(*) FROM comment WHERE issue_id = ?) WHERE id = ?", issueID, issueID); err != nil {
return err
}
} }
return sess.Commit() return sess.Commit()
} }
// InsertPullRequest inserted a pull request // InsertPullRequests inserted pull requests
func InsertPullRequest(pr *PullRequest, labelIDs []int64) error { func InsertPullRequests(prs ...*PullRequest) error {
sess := x.NewSession() sess := x.NewSession()
defer sess.Close() defer sess.Close()
if err := sess.Begin(); err != nil { if err := sess.Begin(); err != nil {
return err return err
} }
if err := insertIssue(sess, pr.Issue, labelIDs); err != nil { for _, pr := range prs {
return err if err := insertIssue(sess, pr.Issue); err != nil {
} return err
pr.IssueID = pr.Issue.ID }
if _, err := sess.NoAutoTime().Insert(pr); err != nil { pr.IssueID = pr.Issue.ID
return err if _, err := sess.NoAutoTime().Insert(pr); err != nil {
return err
}
} }
return sess.Commit() return sess.Commit()
} }
// MigrateRelease migrates release // InsertReleases migrates release
func MigrateRelease(rel *Release) error { func InsertReleases(rels ...*Release) error {
sess := x.NewSession() sess := x.NewSession()
if err := sess.Begin(); err != nil { if err := sess.Begin(); err != nil {
return err return err
} }
var oriRel = Release{ for _, rel := range rels {
RepoID: rel.RepoID,
TagName: rel.TagName,
}
exist, err := sess.Get(&oriRel)
if err != nil {
return err
}
if !exist {
if _, err := sess.NoAutoTime().Insert(rel); err != nil { if _, err := sess.NoAutoTime().Insert(rel); err != nil {
return err return err
} }
} else {
rel.ID = oriRel.ID for i := 0; i < len(rel.Attachments); i++ {
if _, err := sess.ID(rel.ID).Cols("target, title, note, is_tag, num_commits").Update(rel); err != nil { rel.Attachments[i].ReleaseID = rel.ID
}
if _, err := sess.NoAutoTime().Insert(rel.Attachments); err != nil {
return err return err
} }
} }
for i := 0; i < len(rel.Attachments); i++ {
rel.Attachments[i].ReleaseID = rel.ID
}
if _, err := sess.NoAutoTime().Insert(rel.Attachments); err != nil {
return err
}
return sess.Commit() return sess.Commit()
} }

View file

@ -9,6 +9,7 @@ import "time"
// Comment is a standard comment information // Comment is a standard comment information
type Comment struct { type Comment struct {
IssueIndex int64
PosterName string PosterName string
PosterEmail string PosterEmail string
Created time.Time Created time.Time

View file

@ -5,14 +5,14 @@
package base package base
// Uploader uploads all the informations // Uploader uploads all the informations of one repository
type Uploader interface { type Uploader interface {
CreateRepo(repo *Repository, includeWiki bool) error CreateRepo(repo *Repository, includeWiki bool) error
CreateMilestone(milestone *Milestone) error CreateMilestones(milestones ...*Milestone) error
CreateRelease(release *Release) error CreateReleases(releases ...*Release) error
CreateLabel(label *Label) error CreateLabels(labels ...*Label) error
CreateIssue(issue *Issue) error CreateIssues(issues ...*Issue) error
CreateComment(issueNumber int64, comment *Comment) error CreateComments(comments ...*Comment) error
CreatePullRequest(pr *PullRequest) error CreatePullRequests(prs ...*PullRequest) error
Rollback() error Rollback() error
} }

View file

@ -76,238 +76,280 @@ func (g *GiteaLocalUploader) CreateRepo(repo *base.Repository, includeWiki bool)
return err return err
} }
// CreateMilestone creates milestone // CreateMilestones creates milestones
func (g *GiteaLocalUploader) CreateMilestone(milestone *base.Milestone) error { func (g *GiteaLocalUploader) CreateMilestones(milestones ...*base.Milestone) error {
var deadline util.TimeStamp var mss = make([]*models.Milestone, 0, len(milestones))
if milestone.Deadline != nil { for _, milestone := range milestones {
deadline = util.TimeStamp(milestone.Deadline.Unix()) var deadline util.TimeStamp
if milestone.Deadline != nil {
deadline = util.TimeStamp(milestone.Deadline.Unix())
}
if deadline == 0 {
deadline = util.TimeStamp(time.Date(9999, 1, 1, 0, 0, 0, 0, setting.UILocation).Unix())
}
var ms = models.Milestone{
RepoID: g.repo.ID,
Name: milestone.Title,
Content: milestone.Description,
IsClosed: milestone.State == "close",
DeadlineUnix: deadline,
}
if ms.IsClosed && milestone.Closed != nil {
ms.ClosedDateUnix = util.TimeStamp(milestone.Closed.Unix())
}
mss = append(mss, &ms)
} }
if deadline == 0 {
deadline = util.TimeStamp(time.Date(9999, 1, 1, 0, 0, 0, 0, setting.UILocation).Unix())
}
var ms = models.Milestone{
RepoID: g.repo.ID,
Name: milestone.Title,
Content: milestone.Description,
IsClosed: milestone.State == "close",
DeadlineUnix: deadline,
}
if ms.IsClosed && milestone.Closed != nil {
ms.ClosedDateUnix = util.TimeStamp(milestone.Closed.Unix())
}
err := models.NewMilestone(&ms)
err := models.InsertMilestones(mss...)
if err != nil { if err != nil {
return err return err
} }
g.milestones.Store(ms.Name, ms.ID)
for _, ms := range mss {
g.milestones.Store(ms.Name, ms.ID)
}
return nil return nil
} }
// CreateLabel creates label // CreateLabels creates labels
func (g *GiteaLocalUploader) CreateLabel(label *base.Label) error { func (g *GiteaLocalUploader) CreateLabels(labels ...*base.Label) error {
var lb = models.Label{ var lbs = make([]*models.Label, 0, len(labels))
RepoID: g.repo.ID, for _, label := range labels {
Name: label.Name, lbs = append(lbs, &models.Label{
Description: label.Description, RepoID: g.repo.ID,
Color: fmt.Sprintf("#%s", label.Color), Name: label.Name,
Description: label.Description,
Color: fmt.Sprintf("#%s", label.Color),
})
} }
err := models.NewLabel(&lb)
err := models.NewLabels(lbs...)
if err != nil { if err != nil {
return err return err
} }
g.labels.Store(lb.Name, lb.ID) for _, lb := range lbs {
g.labels.Store(lb.Name, lb)
}
return nil return nil
} }
// CreateRelease creates release // CreateReleases creates releases
func (g *GiteaLocalUploader) CreateRelease(release *base.Release) error { func (g *GiteaLocalUploader) CreateReleases(releases ...*base.Release) error {
var rel = models.Release{ var rels = make([]*models.Release, 0, len(releases))
RepoID: g.repo.ID, for _, release := range releases {
PublisherID: g.doer.ID, var rel = models.Release{
TagName: release.TagName, RepoID: g.repo.ID,
LowerTagName: strings.ToLower(release.TagName), PublisherID: g.doer.ID,
Target: release.TargetCommitish, TagName: release.TagName,
Title: release.Name, LowerTagName: strings.ToLower(release.TagName),
Sha1: release.TargetCommitish, Target: release.TargetCommitish,
Note: release.Body, Title: release.Name,
IsDraft: release.Draft, Sha1: release.TargetCommitish,
IsPrerelease: release.Prerelease, Note: release.Body,
IsTag: false, IsDraft: release.Draft,
CreatedUnix: util.TimeStamp(release.Created.Unix()), IsPrerelease: release.Prerelease,
} IsTag: false,
CreatedUnix: util.TimeStamp(release.Created.Unix()),
// calc NumCommits
commit, err := g.gitRepo.GetCommit(rel.TagName)
if err != nil {
return fmt.Errorf("GetCommit: %v", err)
}
rel.NumCommits, err = commit.CommitsCount()
if err != nil {
return fmt.Errorf("CommitsCount: %v", err)
}
for _, asset := range release.Assets {
var attach = models.Attachment{
UUID: gouuid.NewV4().String(),
Name: asset.Name,
DownloadCount: int64(*asset.DownloadCount),
Size: int64(*asset.Size),
CreatedUnix: util.TimeStamp(asset.Created.Unix()),
} }
// download attachment // calc NumCommits
resp, err := http.Get(asset.URL) commit, err := g.gitRepo.GetCommit(rel.TagName)
if err != nil { if err != nil {
return err return fmt.Errorf("GetCommit: %v", err)
} }
defer resp.Body.Close() rel.NumCommits, err = commit.CommitsCount()
localPath := attach.LocalPath()
if err = os.MkdirAll(path.Dir(localPath), os.ModePerm); err != nil {
return fmt.Errorf("MkdirAll: %v", err)
}
fw, err := os.Create(localPath)
if err != nil { if err != nil {
return fmt.Errorf("Create: %v", err) return fmt.Errorf("CommitsCount: %v", err)
}
defer fw.Close()
if _, err := io.Copy(fw, resp.Body); err != nil {
return err
} }
rel.Attachments = append(rel.Attachments, &attach) for _, asset := range release.Assets {
var attach = models.Attachment{
UUID: gouuid.NewV4().String(),
Name: asset.Name,
DownloadCount: int64(*asset.DownloadCount),
Size: int64(*asset.Size),
CreatedUnix: util.TimeStamp(asset.Created.Unix()),
}
// download attachment
resp, err := http.Get(asset.URL)
if err != nil {
return err
}
defer resp.Body.Close()
localPath := attach.LocalPath()
if err = os.MkdirAll(path.Dir(localPath), os.ModePerm); err != nil {
return fmt.Errorf("MkdirAll: %v", err)
}
fw, err := os.Create(localPath)
if err != nil {
return fmt.Errorf("Create: %v", err)
}
defer fw.Close()
if _, err := io.Copy(fw, resp.Body); err != nil {
return err
}
rel.Attachments = append(rel.Attachments, &attach)
}
rels = append(rels, &rel)
} }
return models.InsertReleases(rels...)
return models.MigrateRelease(&rel)
} }
// CreateIssue creates issue // CreateIssues creates issues
func (g *GiteaLocalUploader) CreateIssue(issue *base.Issue) error { func (g *GiteaLocalUploader) CreateIssues(issues ...*base.Issue) error {
var labelIDs []int64 var iss = make([]*models.Issue, 0, len(issues))
for _, label := range issue.Labels { for _, issue := range issues {
id, ok := g.labels.Load(label.Name) var labels []*models.Label
if !ok { for _, label := range issue.Labels {
return fmt.Errorf("Label %s missing when create issue", label.Name) lb, ok := g.labels.Load(label.Name)
if ok {
labels = append(labels, lb.(*models.Label))
}
} }
labelIDs = append(labelIDs, id.(int64))
}
var milestoneID int64 var milestoneID int64
if issue.Milestone != "" { if issue.Milestone != "" {
milestone, ok := g.milestones.Load(issue.Milestone) milestone, ok := g.milestones.Load(issue.Milestone)
if !ok { if ok {
return fmt.Errorf("Milestone %s missing when create issue", issue.Milestone) milestoneID = milestone.(int64)
}
} }
milestoneID = milestone.(int64)
var is = models.Issue{
RepoID: g.repo.ID,
Repo: g.repo,
Index: issue.Number,
PosterID: g.doer.ID,
Title: issue.Title,
Content: issue.Content,
IsClosed: issue.State == "closed",
IsLocked: issue.IsLocked,
MilestoneID: milestoneID,
Labels: labels,
CreatedUnix: util.TimeStamp(issue.Created.Unix()),
}
if issue.Closed != nil {
is.ClosedUnix = util.TimeStamp(issue.Closed.Unix())
}
// TODO: add reactions
iss = append(iss, &is)
} }
var is = models.Issue{ err := models.InsertIssues(iss...)
RepoID: g.repo.ID,
Repo: g.repo,
Index: issue.Number,
PosterID: g.doer.ID,
Title: issue.Title,
Content: issue.Content,
IsClosed: issue.State == "closed",
IsLocked: issue.IsLocked,
MilestoneID: milestoneID,
CreatedUnix: util.TimeStamp(issue.Created.Unix()),
}
if issue.Closed != nil {
is.ClosedUnix = util.TimeStamp(issue.Closed.Unix())
}
err := models.InsertIssue(&is, labelIDs)
if err != nil { if err != nil {
return err return err
} }
g.issues.Store(issue.Number, is.ID) for _, is := range iss {
// TODO: add reactions g.issues.Store(is.Index, is.ID)
return err }
return nil
} }
// CreateComment creates comment // CreateComments creates comments of issues
func (g *GiteaLocalUploader) CreateComment(issueNumber int64, comment *base.Comment) error { func (g *GiteaLocalUploader) CreateComments(comments ...*base.Comment) error {
var issueID int64 var cms = make([]*models.Comment, 0, len(comments))
if issueIDStr, ok := g.issues.Load(issueNumber); !ok { for _, comment := range comments {
issue, err := models.GetIssueByIndex(g.repo.ID, issueNumber) var issueID int64
if issueIDStr, ok := g.issues.Load(comment.IssueIndex); !ok {
issue, err := models.GetIssueByIndex(g.repo.ID, comment.IssueIndex)
if err != nil {
return err
}
issueID = issue.ID
g.issues.Store(comment.IssueIndex, issueID)
} else {
issueID = issueIDStr.(int64)
}
cms = append(cms, &models.Comment{
IssueID: issueID,
Type: models.CommentTypeComment,
PosterID: g.doer.ID,
Content: comment.Content,
CreatedUnix: util.TimeStamp(comment.Created.Unix()),
})
// TODO: Reactions
}
return models.InsertIssueComments(cms)
}
// CreatePullRequests creates pull requests
func (g *GiteaLocalUploader) CreatePullRequests(prs ...*base.PullRequest) error {
var gprs = make([]*models.PullRequest, 0, len(prs))
for _, pr := range prs {
gpr, err := g.newPullRequest(pr)
if err != nil { if err != nil {
return err return err
} }
issueID = issue.ID gprs = append(gprs, gpr)
g.issues.Store(issueNumber, issueID)
} else {
issueID = issueIDStr.(int64)
} }
if err := models.InsertPullRequests(gprs...); err != nil {
var cm = models.Comment{ return err
IssueID: issueID,
Type: models.CommentTypeComment,
PosterID: g.doer.ID,
Content: comment.Content,
CreatedUnix: util.TimeStamp(comment.Created.Unix()),
} }
err := models.InsertComment(&cm) for _, pr := range gprs {
// TODO: Reactions g.issues.Store(pr.Issue.Index, pr.Issue.ID)
return err }
return nil
} }
// CreatePullRequest creates pull request func (g *GiteaLocalUploader) newPullRequest(pr *base.PullRequest) (*models.PullRequest, error) {
func (g *GiteaLocalUploader) CreatePullRequest(pr *base.PullRequest) error { var labels []*models.Label
var labelIDs []int64
for _, label := range pr.Labels { for _, label := range pr.Labels {
id, ok := g.labels.Load(label.Name) lb, ok := g.labels.Load(label.Name)
if !ok { if ok {
return fmt.Errorf("Label %s missing when create issue", label.Name) labels = append(labels, lb.(*models.Label))
} }
labelIDs = append(labelIDs, id.(int64))
} }
var milestoneID int64 var milestoneID int64
if pr.Milestone != "" { if pr.Milestone != "" {
milestone, ok := g.milestones.Load(pr.Milestone) milestone, ok := g.milestones.Load(pr.Milestone)
if !ok { if ok {
return fmt.Errorf("Milestone %s missing when create issue", pr.Milestone) milestoneID = milestone.(int64)
} }
milestoneID = milestone.(int64)
} }
// download patch file // download patch file
resp, err := http.Get(pr.PatchURL) resp, err := http.Get(pr.PatchURL)
if err != nil { if err != nil {
return err return nil, err
} }
defer resp.Body.Close() defer resp.Body.Close()
pullDir := filepath.Join(g.repo.RepoPath(), "pulls") pullDir := filepath.Join(g.repo.RepoPath(), "pulls")
if err = os.MkdirAll(pullDir, os.ModePerm); err != nil { if err = os.MkdirAll(pullDir, os.ModePerm); err != nil {
return err return nil, err
} }
f, err := os.Create(filepath.Join(pullDir, fmt.Sprintf("%d.patch", pr.Number))) f, err := os.Create(filepath.Join(pullDir, fmt.Sprintf("%d.patch", pr.Number)))
if err != nil { if err != nil {
return err return nil, err
} }
defer f.Close() defer f.Close()
_, err = io.Copy(f, resp.Body) _, err = io.Copy(f, resp.Body)
if err != nil { if err != nil {
return err return nil, err
} }
// set head information // set head information
pullHead := filepath.Join(g.repo.RepoPath(), "refs", "pull", fmt.Sprintf("%d", pr.Number)) pullHead := filepath.Join(g.repo.RepoPath(), "refs", "pull", fmt.Sprintf("%d", pr.Number))
if err := os.MkdirAll(pullHead, os.ModePerm); err != nil { if err := os.MkdirAll(pullHead, os.ModePerm); err != nil {
return err return nil, err
} }
p, err := os.Create(filepath.Join(pullHead, "head")) p, err := os.Create(filepath.Join(pullHead, "head"))
if err != nil { if err != nil {
return err return nil, err
} }
defer p.Close() defer p.Close()
_, err = p.WriteString(pr.Head.SHA) _, err = p.WriteString(pr.Head.SHA)
if err != nil { if err != nil {
return err return nil, err
} }
var head = "unknown repository" var head = "unknown repository"
@ -333,16 +375,16 @@ func (g *GiteaLocalUploader) CreatePullRequest(pr *base.PullRequest) error {
} else { } else {
headBranch := filepath.Join(g.repo.RepoPath(), "refs", "heads", pr.Head.OwnerName, pr.Head.Ref) headBranch := filepath.Join(g.repo.RepoPath(), "refs", "heads", pr.Head.OwnerName, pr.Head.Ref)
if err := os.MkdirAll(filepath.Dir(headBranch), os.ModePerm); err != nil { if err := os.MkdirAll(filepath.Dir(headBranch), os.ModePerm); err != nil {
return err return nil, err
} }
b, err := os.Create(headBranch) b, err := os.Create(headBranch)
if err != nil { if err != nil {
return err return nil, err
} }
defer b.Close() defer b.Close()
_, err = b.WriteString(pr.Head.SHA) _, err = b.WriteString(pr.Head.SHA)
if err != nil { if err != nil {
return err return nil, err
} }
head = pr.Head.OwnerName + "/" + pr.Head.Ref head = pr.Head.OwnerName + "/" + pr.Head.Ref
} }
@ -373,6 +415,7 @@ func (g *GiteaLocalUploader) CreatePullRequest(pr *base.PullRequest) error {
IsPull: true, IsPull: true,
IsClosed: pr.State == "closed", IsClosed: pr.State == "closed",
IsLocked: pr.IsLocked, IsLocked: pr.IsLocked,
Labels: labels,
CreatedUnix: util.TimeStamp(pr.Created.Unix()), CreatedUnix: util.TimeStamp(pr.Created.Unix()),
}, },
} }
@ -389,7 +432,7 @@ func (g *GiteaLocalUploader) CreatePullRequest(pr *base.PullRequest) error {
// TODO: reactions // TODO: reactions
// TODO: assignees // TODO: assignees
return models.InsertPullRequest(&pullRequest, labelIDs) return &pullRequest, nil
} }
// Rollback when migrating failed, this will rollback all the changes. // Rollback when migrating failed, this will rollback all the changes.

View file

@ -358,6 +358,7 @@ func (g *GithubDownloaderV3) GetComments(issueNumber int64) ([]*base.Comment, er
reactions = convertGithubReactions(comment.Reactions) reactions = convertGithubReactions(comment.Reactions)
} }
allComments = append(allComments, &base.Comment{ allComments = append(allComments, &base.Comment{
IssueIndex: issueNumber,
PosterName: *comment.User.Login, PosterName: *comment.User.Login,
PosterEmail: email, PosterEmail: email,
Content: *comment.Body, Content: *comment.Body,

View file

@ -269,6 +269,7 @@ func TestGitHubDownloadRepo(t *testing.T) {
assert.EqualValues(t, 35, len(comments)) assert.EqualValues(t, 35, len(comments))
assert.EqualValues(t, []*base.Comment{ assert.EqualValues(t, []*base.Comment{
{ {
IssueIndex: 6,
PosterName: "bkcsoft", PosterName: "bkcsoft",
Created: time.Date(2016, 11, 02, 18, 59, 48, 0, time.UTC), Created: time.Date(2016, 11, 02, 18, 59, 48, 0, time.UTC),
Content: `I would prefer a solution that is in the backend, unless it's required to have it update without reloading. Unfortunately I can't seem to find anything that does that :unamused: Content: `I would prefer a solution that is in the backend, unless it's required to have it update without reloading. Unfortunately I can't seem to find anything that does that :unamused:
@ -286,6 +287,7 @@ Also this would _require_ caching, since it will fetch huge amounts of data from
}, },
}, },
{ {
IssueIndex: 6,
PosterName: "joubertredrat", PosterName: "joubertredrat",
Created: time.Date(2016, 11, 02, 19, 16, 56, 0, time.UTC), Created: time.Date(2016, 11, 02, 19, 16, 56, 0, time.UTC),
Content: `Yes, this plugin build on front-end, with backend I don't know too, but we can consider make component for this. Content: `Yes, this plugin build on front-end, with backend I don't know too, but we can consider make component for this.
@ -303,6 +305,7 @@ In my case I use ajax to get data, but build on frontend anyway
}, },
}, },
{ {
IssueIndex: 6,
PosterName: "xinity", PosterName: "xinity",
Created: time.Date(2016, 11, 03, 13, 04, 56, 0, time.UTC), Created: time.Date(2016, 11, 03, 13, 04, 56, 0, time.UTC),
Content: `following @bkcsoft retention strategy in cache is a must if we don't want gitea to waste ressources. Content: `following @bkcsoft retention strategy in cache is a must if we don't want gitea to waste ressources.

View file

@ -91,10 +91,8 @@ func migrateRepository(downloader base.Downloader, uploader base.Uploader, opts
return err return err
} }
for _, milestone := range milestones { if err := uploader.CreateMilestones(milestones...); err != nil {
if err := uploader.CreateMilestone(milestone); err != nil { return err
return err
}
} }
} }
@ -105,10 +103,8 @@ func migrateRepository(downloader base.Downloader, uploader base.Uploader, opts
return err return err
} }
for _, label := range labels { if err := uploader.CreateLabels(labels...); err != nil {
if err := uploader.CreateLabel(label); err != nil { return err
return err
}
} }
} }
@ -119,10 +115,8 @@ func migrateRepository(downloader base.Downloader, uploader base.Uploader, opts
return err return err
} }
for _, release := range releases { if err := uploader.CreateReleases(releases...); err != nil {
if err := uploader.CreateRelease(release); err != nil { return err
return err
}
} }
} }
@ -137,15 +131,18 @@ func migrateRepository(downloader base.Downloader, uploader base.Uploader, opts
if !opts.IgnoreIssueAuthor { if !opts.IgnoreIssueAuthor {
issue.Content = fmt.Sprintf("Author: @%s \n\n%s", issue.PosterName, issue.Content) issue.Content = fmt.Sprintf("Author: @%s \n\n%s", issue.PosterName, issue.Content)
} }
}
if err := uploader.CreateIssue(issue); err != nil { if err := uploader.CreateIssues(issues...); err != nil {
return err return err
} }
if !opts.Comments { if !opts.Comments {
continue continue
} }
var allComments = make([]*base.Comment, 0, 100)
for _, issue := range issues {
comments, err := downloader.GetComments(issue.Number) comments, err := downloader.GetComments(issue.Number)
if err != nil { if err != nil {
return err return err
@ -154,9 +151,20 @@ func migrateRepository(downloader base.Downloader, uploader base.Uploader, opts
if !opts.IgnoreIssueAuthor { if !opts.IgnoreIssueAuthor {
comment.Content = fmt.Sprintf("Author: @%s \n\n%s", comment.PosterName, comment.Content) comment.Content = fmt.Sprintf("Author: @%s \n\n%s", comment.PosterName, comment.Content)
} }
if err := uploader.CreateComment(issue.Number, comment); err != nil { }
allComments = append(allComments, comments...)
if len(allComments) >= 100 {
if err := uploader.CreateComments(allComments...); err != nil {
return err return err
} }
allComments = make([]*base.Comment, 0, 100)
}
}
if len(allComments) > 0 {
if err := uploader.CreateComments(allComments...); err != nil {
return err
} }
} }
@ -178,13 +186,17 @@ func migrateRepository(downloader base.Downloader, uploader base.Uploader, opts
if !opts.IgnoreIssueAuthor { if !opts.IgnoreIssueAuthor {
pr.Content = fmt.Sprintf("Author: @%s \n\n%s", pr.PosterName, pr.Content) pr.Content = fmt.Sprintf("Author: @%s \n\n%s", pr.PosterName, pr.Content)
} }
if err := uploader.CreatePullRequest(pr); err != nil { }
return err if err := uploader.CreatePullRequests(prs...); err != nil {
} return err
if !opts.Comments { }
continue
}
if !opts.Comments {
continue
}
var allComments = make([]*base.Comment, 0, 100)
for _, pr := range prs {
comments, err := downloader.GetComments(pr.Number) comments, err := downloader.GetComments(pr.Number)
if err != nil { if err != nil {
return err return err
@ -193,11 +205,23 @@ func migrateRepository(downloader base.Downloader, uploader base.Uploader, opts
if !opts.IgnoreIssueAuthor { if !opts.IgnoreIssueAuthor {
comment.Content = fmt.Sprintf("Author: @%s \n\n%s", comment.PosterName, comment.Content) comment.Content = fmt.Sprintf("Author: @%s \n\n%s", comment.PosterName, comment.Content)
} }
if err := uploader.CreateComment(pr.Number, comment); err != nil { }
allComments = append(allComments, comments...)
if len(allComments) >= 100 {
if err := uploader.CreateComments(allComments...); err != nil {
return err return err
} }
allComments = make([]*base.Comment, 0, 100)
} }
} }
if len(allComments) > 0 {
if err := uploader.CreateComments(allComments...); err != nil {
return err
}
}
if len(prs) < 100 { if len(prs) < 100 {
break break
} }