Refactor and enhance issue indexer to support both searching, filtering and paging (#26012)
Fix #24662. Replace #24822 and #25708 (although it has been merged) ## Background In the past, Gitea supported issue searching with a keyword and conditions in a less efficient way. It worked by searching for issues with the keyword and obtaining limited IDs (as it is heavy to get all) on the indexer (bleve/elasticsearch/meilisearch), and then querying with conditions on the database to find a subset of the found IDs. This is why the results could be incomplete. To solve this issue, we need to store all fields that could be used as conditions in the indexer and support both keyword and additional conditions when searching with the indexer. ## Major changes - Redefine `IndexerData` to include all fields that could be used as filter conditions. - Refactor `Search(ctx context.Context, kw string, repoIDs []int64, limit, start int, state string)` to `Search(ctx context.Context, options *SearchOptions)`, so it supports more conditions now. - Change the data type stored in `issueIndexerQueue`. Use `IndexerMetadata` instead of `IndexerData` in case the data has been updated while it is in the queue. This also reduces the storage size of the queue. - Enhance searching with Bleve/Elasticsearch/Meilisearch, make them fully support `SearchOptions`. Also, update the data versions. - Keep most logic of database indexer, but remove `issues.SearchIssueIDsByKeyword` in `models` to avoid confusion where is the entry point to search issues. - Start a Meilisearch instance to test it in unit tests. - Add unit tests with almost full coverage to test Bleve/Elasticsearch/Meilisearch indexer. --------- Co-authored-by: Lunny Xiao <xiaolunwen@gmail.com>
This commit is contained in:
parent
aba9096999
commit
1e76a824bc
37 changed files with 2965 additions and 861 deletions
8
.github/workflows/pull-db-tests.yml
vendored
8
.github/workflows/pull-db-tests.yml
vendored
|
@ -98,6 +98,12 @@ jobs:
|
||||||
discovery.type: single-node
|
discovery.type: single-node
|
||||||
ports:
|
ports:
|
||||||
- "9200:9200"
|
- "9200:9200"
|
||||||
|
meilisearch:
|
||||||
|
image: getmeili/meilisearch:v1.2.0
|
||||||
|
env:
|
||||||
|
MEILI_ENV: development # disable auth
|
||||||
|
ports:
|
||||||
|
- "7700:7700"
|
||||||
smtpimap:
|
smtpimap:
|
||||||
image: tabascoterrier/docker-imap-devel:latest
|
image: tabascoterrier/docker-imap-devel:latest
|
||||||
ports:
|
ports:
|
||||||
|
@ -128,7 +134,7 @@ jobs:
|
||||||
go-version: ">=1.20"
|
go-version: ">=1.20"
|
||||||
check-latest: true
|
check-latest: true
|
||||||
- name: Add hosts to /etc/hosts
|
- name: Add hosts to /etc/hosts
|
||||||
run: '[ -e "/.dockerenv" ] || [ -e "/run/.containerenv" ] || echo "127.0.0.1 mysql elasticsearch smtpimap" | sudo tee -a /etc/hosts'
|
run: '[ -e "/.dockerenv" ] || [ -e "/run/.containerenv" ] || echo "127.0.0.1 mysql elasticsearch meilisearch smtpimap" | sudo tee -a /etc/hosts'
|
||||||
- run: make deps-backend
|
- run: make deps-backend
|
||||||
- run: make backend
|
- run: make backend
|
||||||
env:
|
env:
|
||||||
|
|
|
@ -20,3 +20,20 @@ func BuildCaseInsensitiveLike(key, value string) builder.Cond {
|
||||||
}
|
}
|
||||||
return builder.Like{"UPPER(" + key + ")", strings.ToUpper(value)}
|
return builder.Like{"UPPER(" + key + ")", strings.ToUpper(value)}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// BuildCaseInsensitiveIn returns a condition to check if the given value is in the given values case-insensitively.
|
||||||
|
// Handles especially SQLite correctly as UPPER there only transforms ASCII letters.
|
||||||
|
func BuildCaseInsensitiveIn(key string, values []string) builder.Cond {
|
||||||
|
uppers := make([]string, 0, len(values))
|
||||||
|
if setting.Database.Type.IsSQLite3() {
|
||||||
|
for _, value := range values {
|
||||||
|
uppers = append(uppers, util.ToUpperASCII(value))
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
for _, value := range values {
|
||||||
|
uppers = append(uppers, strings.ToUpper(value))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return builder.In("UPPER("+key+")", uppers)
|
||||||
|
}
|
||||||
|
|
|
@ -13,6 +13,7 @@ import (
|
||||||
project_model "code.gitea.io/gitea/models/project"
|
project_model "code.gitea.io/gitea/models/project"
|
||||||
repo_model "code.gitea.io/gitea/models/repo"
|
repo_model "code.gitea.io/gitea/models/repo"
|
||||||
user_model "code.gitea.io/gitea/models/user"
|
user_model "code.gitea.io/gitea/models/user"
|
||||||
|
"code.gitea.io/gitea/modules/container"
|
||||||
"code.gitea.io/gitea/modules/log"
|
"code.gitea.io/gitea/modules/log"
|
||||||
"code.gitea.io/gitea/modules/setting"
|
"code.gitea.io/gitea/modules/setting"
|
||||||
api "code.gitea.io/gitea/modules/structs"
|
api "code.gitea.io/gitea/modules/structs"
|
||||||
|
@ -550,9 +551,30 @@ func GetIssueWithAttrsByID(id int64) (*Issue, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetIssuesByIDs return issues with the given IDs.
|
// GetIssuesByIDs return issues with the given IDs.
|
||||||
func GetIssuesByIDs(ctx context.Context, issueIDs []int64) (IssueList, error) {
|
// If keepOrder is true, the order of the returned issues will be the same as the given IDs.
|
||||||
|
func GetIssuesByIDs(ctx context.Context, issueIDs []int64, keepOrder ...bool) (IssueList, error) {
|
||||||
issues := make([]*Issue, 0, len(issueIDs))
|
issues := make([]*Issue, 0, len(issueIDs))
|
||||||
return issues, db.GetEngine(ctx).In("id", issueIDs).Find(&issues)
|
|
||||||
|
if err := db.GetEngine(ctx).In("id", issueIDs).Find(&issues); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(keepOrder) > 0 && keepOrder[0] {
|
||||||
|
m := make(map[int64]*Issue, len(issues))
|
||||||
|
appended := container.Set[int64]{}
|
||||||
|
for _, issue := range issues {
|
||||||
|
m[issue.ID] = issue
|
||||||
|
}
|
||||||
|
issues = issues[:0]
|
||||||
|
for _, id := range issueIDs {
|
||||||
|
if issue, ok := m[id]; ok && !appended.Contains(id) { // make sure the id is existed and not appended
|
||||||
|
appended.Add(id)
|
||||||
|
issues = append(issues, issue)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return issues, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetIssueIDsByRepoID returns all issue ids by repo id
|
// GetIssueIDsByRepoID returns all issue ids by repo id
|
||||||
|
|
|
@ -21,7 +21,7 @@ import (
|
||||||
|
|
||||||
// IssuesOptions represents options of an issue.
|
// IssuesOptions represents options of an issue.
|
||||||
type IssuesOptions struct { //nolint
|
type IssuesOptions struct { //nolint
|
||||||
db.ListOptions
|
db.Paginator
|
||||||
RepoIDs []int64 // overwrites RepoCond if the length is not 0
|
RepoIDs []int64 // overwrites RepoCond if the length is not 0
|
||||||
RepoCond builder.Cond
|
RepoCond builder.Cond
|
||||||
AssigneeID int64
|
AssigneeID int64
|
||||||
|
@ -99,18 +99,31 @@ func applySorts(sess *xorm.Session, sortType string, priorityRepoID int64) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func applyLimit(sess *xorm.Session, opts *IssuesOptions) *xorm.Session {
|
func applyLimit(sess *xorm.Session, opts *IssuesOptions) *xorm.Session {
|
||||||
if opts.Page >= 0 && opts.PageSize > 0 {
|
if opts.Paginator == nil || opts.Paginator.IsListAll() {
|
||||||
|
return sess
|
||||||
|
}
|
||||||
|
|
||||||
|
// Warning: Do not use GetSkipTake() for *db.ListOptions
|
||||||
|
// Its implementation could reset the page size with setting.API.MaxResponseItems
|
||||||
|
if listOptions, ok := opts.Paginator.(*db.ListOptions); ok {
|
||||||
|
if listOptions.Page >= 0 && listOptions.PageSize > 0 {
|
||||||
var start int
|
var start int
|
||||||
if opts.Page == 0 {
|
if listOptions.Page == 0 {
|
||||||
start = 0
|
start = 0
|
||||||
} else {
|
} else {
|
||||||
start = (opts.Page - 1) * opts.PageSize
|
start = (listOptions.Page - 1) * listOptions.PageSize
|
||||||
}
|
}
|
||||||
sess.Limit(opts.PageSize, start)
|
sess.Limit(listOptions.PageSize, start)
|
||||||
}
|
}
|
||||||
return sess
|
return sess
|
||||||
}
|
}
|
||||||
|
|
||||||
|
start, limit := opts.Paginator.GetSkipTake()
|
||||||
|
sess.Limit(limit, start)
|
||||||
|
|
||||||
|
return sess
|
||||||
|
}
|
||||||
|
|
||||||
func applyLabelsCondition(sess *xorm.Session, opts *IssuesOptions) *xorm.Session {
|
func applyLabelsCondition(sess *xorm.Session, opts *IssuesOptions) *xorm.Session {
|
||||||
if len(opts.LabelIDs) > 0 {
|
if len(opts.LabelIDs) > 0 {
|
||||||
if opts.LabelIDs[0] == 0 {
|
if opts.LabelIDs[0] == 0 {
|
||||||
|
@ -435,7 +448,7 @@ func Issues(ctx context.Context, opts *IssuesOptions) ([]*Issue, error) {
|
||||||
applyConditions(sess, opts)
|
applyConditions(sess, opts)
|
||||||
applySorts(sess, opts.SortType, opts.PriorityRepoID)
|
applySorts(sess, opts.SortType, opts.PriorityRepoID)
|
||||||
|
|
||||||
issues := make(IssueList, 0, opts.ListOptions.PageSize)
|
issues := IssueList{}
|
||||||
if err := sess.Find(&issues); err != nil {
|
if err := sess.Find(&issues); err != nil {
|
||||||
return nil, fmt.Errorf("unable to query Issues: %w", err)
|
return nil, fmt.Errorf("unable to query Issues: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -447,45 +460,23 @@ func Issues(ctx context.Context, opts *IssuesOptions) ([]*Issue, error) {
|
||||||
return issues, nil
|
return issues, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// SearchIssueIDsByKeyword search issues on database
|
// IssueIDs returns a list of issue ids by given conditions.
|
||||||
func SearchIssueIDsByKeyword(ctx context.Context, kw string, repoIDs []int64, limit, start int) (int64, []int64, error) {
|
func IssueIDs(ctx context.Context, opts *IssuesOptions, otherConds ...builder.Cond) ([]int64, int64, error) {
|
||||||
repoCond := builder.In("repo_id", repoIDs)
|
sess := db.GetEngine(ctx).
|
||||||
subQuery := builder.Select("id").From("issue").Where(repoCond)
|
Join("INNER", "repository", "`issue`.repo_id = `repository`.id")
|
||||||
cond := builder.And(
|
applyConditions(sess, opts)
|
||||||
repoCond,
|
for _, cond := range otherConds {
|
||||||
builder.Or(
|
sess.And(cond)
|
||||||
db.BuildCaseInsensitiveLike("name", kw),
|
}
|
||||||
db.BuildCaseInsensitiveLike("content", kw),
|
|
||||||
builder.In("id", builder.Select("issue_id").
|
|
||||||
From("comment").
|
|
||||||
Where(builder.And(
|
|
||||||
builder.Eq{"type": CommentTypeComment},
|
|
||||||
builder.In("issue_id", subQuery),
|
|
||||||
db.BuildCaseInsensitiveLike("content", kw),
|
|
||||||
)),
|
|
||||||
),
|
|
||||||
),
|
|
||||||
)
|
|
||||||
|
|
||||||
ids := make([]int64, 0, limit)
|
applyLimit(sess, opts)
|
||||||
res := make([]struct {
|
applySorts(sess, opts.SortType, opts.PriorityRepoID)
|
||||||
ID int64
|
|
||||||
UpdatedUnix int64
|
var res []int64
|
||||||
}, 0, limit)
|
total, err := sess.Select("`issue`.id").Table(&Issue{}).FindAndCount(&res)
|
||||||
err := db.GetEngine(ctx).Distinct("id", "updated_unix").Table("issue").Where(cond).
|
|
||||||
OrderBy("`updated_unix` DESC").Limit(limit, start).
|
|
||||||
Find(&res)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, nil, err
|
return nil, 0, err
|
||||||
}
|
|
||||||
for _, r := range res {
|
|
||||||
ids = append(ids, r.ID)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
total, err := db.GetEngine(ctx).Distinct("id").Table("issue").Where(cond).Count()
|
return res, total, nil
|
||||||
if err != nil {
|
|
||||||
return 0, nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return total, ids, nil
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -73,7 +73,7 @@ func TestIssueAPIURL(t *testing.T) {
|
||||||
func TestGetIssuesByIDs(t *testing.T) {
|
func TestGetIssuesByIDs(t *testing.T) {
|
||||||
assert.NoError(t, unittest.PrepareTestDatabase())
|
assert.NoError(t, unittest.PrepareTestDatabase())
|
||||||
testSuccess := func(expectedIssueIDs, nonExistentIssueIDs []int64) {
|
testSuccess := func(expectedIssueIDs, nonExistentIssueIDs []int64) {
|
||||||
issues, err := issues_model.GetIssuesByIDs(db.DefaultContext, append(expectedIssueIDs, nonExistentIssueIDs...))
|
issues, err := issues_model.GetIssuesByIDs(db.DefaultContext, append(expectedIssueIDs, nonExistentIssueIDs...), true)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
actualIssueIDs := make([]int64, len(issues))
|
actualIssueIDs := make([]int64, len(issues))
|
||||||
for i, issue := range issues {
|
for i, issue := range issues {
|
||||||
|
@ -83,6 +83,7 @@ func TestGetIssuesByIDs(t *testing.T) {
|
||||||
}
|
}
|
||||||
testSuccess([]int64{1, 2, 3}, []int64{})
|
testSuccess([]int64{1, 2, 3}, []int64{})
|
||||||
testSuccess([]int64{1, 2, 3}, []int64{unittest.NonexistentID})
|
testSuccess([]int64{1, 2, 3}, []int64{unittest.NonexistentID})
|
||||||
|
testSuccess([]int64{3, 2, 1}, []int64{})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestGetParticipantIDsByIssue(t *testing.T) {
|
func TestGetParticipantIDsByIssue(t *testing.T) {
|
||||||
|
@ -165,7 +166,7 @@ func TestIssues(t *testing.T) {
|
||||||
issues_model.IssuesOptions{
|
issues_model.IssuesOptions{
|
||||||
RepoCond: builder.In("repo_id", 1, 3),
|
RepoCond: builder.In("repo_id", 1, 3),
|
||||||
SortType: "oldest",
|
SortType: "oldest",
|
||||||
ListOptions: db.ListOptions{
|
Paginator: &db.ListOptions{
|
||||||
Page: 1,
|
Page: 1,
|
||||||
PageSize: 4,
|
PageSize: 4,
|
||||||
},
|
},
|
||||||
|
@ -175,7 +176,7 @@ func TestIssues(t *testing.T) {
|
||||||
{
|
{
|
||||||
issues_model.IssuesOptions{
|
issues_model.IssuesOptions{
|
||||||
LabelIDs: []int64{1},
|
LabelIDs: []int64{1},
|
||||||
ListOptions: db.ListOptions{
|
Paginator: &db.ListOptions{
|
||||||
Page: 1,
|
Page: 1,
|
||||||
PageSize: 4,
|
PageSize: 4,
|
||||||
},
|
},
|
||||||
|
@ -185,7 +186,7 @@ func TestIssues(t *testing.T) {
|
||||||
{
|
{
|
||||||
issues_model.IssuesOptions{
|
issues_model.IssuesOptions{
|
||||||
LabelIDs: []int64{1, 2},
|
LabelIDs: []int64{1, 2},
|
||||||
ListOptions: db.ListOptions{
|
Paginator: &db.ListOptions{
|
||||||
Page: 1,
|
Page: 1,
|
||||||
PageSize: 4,
|
PageSize: 4,
|
||||||
},
|
},
|
||||||
|
@ -333,30 +334,6 @@ func TestIssue_loadTotalTimes(t *testing.T) {
|
||||||
assert.Equal(t, int64(3682), ms.TotalTrackedTime)
|
assert.Equal(t, int64(3682), ms.TotalTrackedTime)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestIssue_SearchIssueIDsByKeyword(t *testing.T) {
|
|
||||||
assert.NoError(t, unittest.PrepareTestDatabase())
|
|
||||||
total, ids, err := issues_model.SearchIssueIDsByKeyword(context.TODO(), "issue2", []int64{1}, 10, 0)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.EqualValues(t, 1, total)
|
|
||||||
assert.EqualValues(t, []int64{2}, ids)
|
|
||||||
|
|
||||||
total, ids, err = issues_model.SearchIssueIDsByKeyword(context.TODO(), "first", []int64{1}, 10, 0)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.EqualValues(t, 1, total)
|
|
||||||
assert.EqualValues(t, []int64{1}, ids)
|
|
||||||
|
|
||||||
total, ids, err = issues_model.SearchIssueIDsByKeyword(context.TODO(), "for", []int64{1}, 10, 0)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.EqualValues(t, 5, total)
|
|
||||||
assert.ElementsMatch(t, []int64{1, 2, 3, 5, 11}, ids)
|
|
||||||
|
|
||||||
// issue1's comment id 2
|
|
||||||
total, ids, err = issues_model.SearchIssueIDsByKeyword(context.TODO(), "good", []int64{1}, 10, 0)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.EqualValues(t, 1, total)
|
|
||||||
assert.EqualValues(t, []int64{1}, ids)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGetRepoIDsForIssuesOptions(t *testing.T) {
|
func TestGetRepoIDsForIssuesOptions(t *testing.T) {
|
||||||
assert.NoError(t, unittest.PrepareTestDatabase())
|
assert.NoError(t, unittest.PrepareTestDatabase())
|
||||||
user := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 2})
|
user := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 2})
|
||||||
|
@ -496,7 +473,19 @@ func TestCorrectIssueStats(t *testing.T) {
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
|
|
||||||
// Now we will get all issueID's that match the "Bugs are nasty" query.
|
// Now we will get all issueID's that match the "Bugs are nasty" query.
|
||||||
total, ids, err := issues_model.SearchIssueIDsByKeyword(context.TODO(), "Bugs are nasty", []int64{1}, issueAmount, 0)
|
issues, err := issues_model.Issues(context.TODO(), &issues_model.IssuesOptions{
|
||||||
|
Paginator: &db.ListOptions{
|
||||||
|
PageSize: issueAmount,
|
||||||
|
},
|
||||||
|
RepoIDs: []int64{1},
|
||||||
|
})
|
||||||
|
total := int64(len(issues))
|
||||||
|
var ids []int64
|
||||||
|
for _, issue := range issues {
|
||||||
|
if issue.Content == "Bugs are nasty" {
|
||||||
|
ids = append(ids, issue.ID)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Just to be sure.
|
// Just to be sure.
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
|
@ -84,3 +84,13 @@ func UpdateIssueUsersByMentions(ctx context.Context, issueID int64, uids []int64
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetIssueMentionIDs returns all mentioned user IDs of an issue.
|
||||||
|
func GetIssueMentionIDs(ctx context.Context, issueID int64) ([]int64, error) {
|
||||||
|
var ids []int64
|
||||||
|
return ids, db.GetEngine(ctx).Table(IssueUser{}).
|
||||||
|
Where("issue_id=?", issueID).
|
||||||
|
And("is_mentioned=?", true).
|
||||||
|
Select("uid").
|
||||||
|
Find(&ids)
|
||||||
|
}
|
||||||
|
|
|
@ -272,12 +272,12 @@ func GetLabelByID(ctx context.Context, labelID int64) (*Label, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetLabelsByIDs returns a list of labels by IDs
|
// GetLabelsByIDs returns a list of labels by IDs
|
||||||
func GetLabelsByIDs(labelIDs []int64) ([]*Label, error) {
|
func GetLabelsByIDs(labelIDs []int64, cols ...string) ([]*Label, error) {
|
||||||
labels := make([]*Label, 0, len(labelIDs))
|
labels := make([]*Label, 0, len(labelIDs))
|
||||||
return labels, db.GetEngine(db.DefaultContext).Table("label").
|
return labels, db.GetEngine(db.DefaultContext).Table("label").
|
||||||
In("id", labelIDs).
|
In("id", labelIDs).
|
||||||
Asc("name").
|
Asc("name").
|
||||||
Cols("id", "repo_id", "org_id").
|
Cols(cols...).
|
||||||
Find(&labels)
|
Find(&labels)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -476,6 +476,18 @@ func GetLabelsByOrgID(ctx context.Context, orgID int64, sortType string, listOpt
|
||||||
return labels, sess.Find(&labels)
|
return labels, sess.Find(&labels)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetLabelIDsByNames returns a list of labelIDs by names.
|
||||||
|
// It doesn't filter them by repo or org, so it could return labels belonging to different repos/orgs.
|
||||||
|
// It's used for filtering issues via indexer, otherwise it would be useless.
|
||||||
|
// Since it could return labels with the same name, so the length of returned ids could be more than the length of names.
|
||||||
|
func GetLabelIDsByNames(ctx context.Context, labelNames []string) ([]int64, error) {
|
||||||
|
labelIDs := make([]int64, 0, len(labelNames))
|
||||||
|
return labelIDs, db.GetEngine(ctx).Table("label").
|
||||||
|
In("name", labelNames).
|
||||||
|
Cols("id").
|
||||||
|
Find(&labelIDs)
|
||||||
|
}
|
||||||
|
|
||||||
// CountLabelsByOrgID count all labels that belong to given organization by ID.
|
// CountLabelsByOrgID count all labels that belong to given organization by ID.
|
||||||
func CountLabelsByOrgID(orgID int64) (int64, error) {
|
func CountLabelsByOrgID(orgID int64) (int64, error) {
|
||||||
return db.GetEngine(db.DefaultContext).Where("org_id = ?", orgID).Count(&Label{})
|
return db.GetEngine(db.DefaultContext).Where("org_id = ?", orgID).Count(&Label{})
|
||||||
|
|
|
@ -396,6 +396,18 @@ func GetMilestones(opts GetMilestonesOption) (MilestoneList, int64, error) {
|
||||||
return miles, total, err
|
return miles, total, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetMilestoneIDsByNames returns a list of milestone ids by given names.
|
||||||
|
// It doesn't filter them by repo, so it could return milestones belonging to different repos.
|
||||||
|
// It's used for filtering issues via indexer, otherwise it would be useless.
|
||||||
|
// Since it could return milestones with the same name, so the length of returned ids could be more than the length of names.
|
||||||
|
func GetMilestoneIDsByNames(ctx context.Context, names []string) ([]int64, error) {
|
||||||
|
var ids []int64
|
||||||
|
return ids, db.GetEngine(ctx).Table("milestone").
|
||||||
|
Where(db.BuildCaseInsensitiveIn("name", names)).
|
||||||
|
Cols("id").
|
||||||
|
Find(&ids)
|
||||||
|
}
|
||||||
|
|
||||||
// SearchMilestones search milestones
|
// SearchMilestones search milestones
|
||||||
func SearchMilestones(repoCond builder.Cond, page int, isClosed bool, sortType, keyword string) (MilestoneList, error) {
|
func SearchMilestones(repoCond builder.Cond, page int, isClosed bool, sortType, keyword string) (MilestoneList, error) {
|
||||||
miles := make([]*Milestone, 0, setting.UI.IssuePagingNum)
|
miles := make([]*Milestone, 0, setting.UI.IssuePagingNum)
|
||||||
|
|
|
@ -41,15 +41,6 @@ const (
|
||||||
maxBatchSize = 16
|
maxBatchSize = 16
|
||||||
)
|
)
|
||||||
|
|
||||||
// numericEqualityQuery a numeric equality query for the given value and field
|
|
||||||
func numericEqualityQuery(value int64, field string) *query.NumericRangeQuery {
|
|
||||||
f := float64(value)
|
|
||||||
tru := true
|
|
||||||
q := bleve.NewNumericRangeInclusiveQuery(&f, &f, &tru, &tru)
|
|
||||||
q.SetField(field)
|
|
||||||
return q
|
|
||||||
}
|
|
||||||
|
|
||||||
func addUnicodeNormalizeTokenFilter(m *mapping.IndexMappingImpl) error {
|
func addUnicodeNormalizeTokenFilter(m *mapping.IndexMappingImpl) error {
|
||||||
return m.AddCustomTokenFilter(unicodeNormalizeName, map[string]any{
|
return m.AddCustomTokenFilter(unicodeNormalizeName, map[string]any{
|
||||||
"type": unicodenorm.Name,
|
"type": unicodenorm.Name,
|
||||||
|
@ -225,7 +216,7 @@ func (b *Indexer) Index(ctx context.Context, repo *repo_model.Repository, sha st
|
||||||
|
|
||||||
// Delete deletes indexes by ids
|
// Delete deletes indexes by ids
|
||||||
func (b *Indexer) Delete(_ context.Context, repoID int64) error {
|
func (b *Indexer) Delete(_ context.Context, repoID int64) error {
|
||||||
query := numericEqualityQuery(repoID, "RepoID")
|
query := inner_bleve.NumericEqualityQuery(repoID, "RepoID")
|
||||||
searchRequest := bleve.NewSearchRequestOptions(query, 2147483647, 0, false)
|
searchRequest := bleve.NewSearchRequestOptions(query, 2147483647, 0, false)
|
||||||
result, err := b.inner.Indexer.Search(searchRequest)
|
result, err := b.inner.Indexer.Search(searchRequest)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -262,7 +253,7 @@ func (b *Indexer) Search(ctx context.Context, repoIDs []int64, language, keyword
|
||||||
if len(repoIDs) > 0 {
|
if len(repoIDs) > 0 {
|
||||||
repoQueries := make([]query.Query, 0, len(repoIDs))
|
repoQueries := make([]query.Query, 0, len(repoIDs))
|
||||||
for _, repoID := range repoIDs {
|
for _, repoID := range repoIDs {
|
||||||
repoQueries = append(repoQueries, numericEqualityQuery(repoID, "RepoID"))
|
repoQueries = append(repoQueries, inner_bleve.NumericEqualityQuery(repoID, "RepoID"))
|
||||||
}
|
}
|
||||||
|
|
||||||
indexerQuery = bleve.NewConjunctionQuery(
|
indexerQuery = bleve.NewConjunctionQuery(
|
||||||
|
|
53
modules/indexer/internal/bleve/query.go
Normal file
53
modules/indexer/internal/bleve/query.go
Normal file
|
@ -0,0 +1,53 @@
|
||||||
|
// Copyright 2023 The Gitea Authors. All rights reserved.
|
||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
|
||||||
|
package bleve
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/blevesearch/bleve/v2"
|
||||||
|
"github.com/blevesearch/bleve/v2/search/query"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NumericEqualityQuery generates a numeric equality query for the given value and field
|
||||||
|
func NumericEqualityQuery(value int64, field string) *query.NumericRangeQuery {
|
||||||
|
f := float64(value)
|
||||||
|
tru := true
|
||||||
|
q := bleve.NewNumericRangeInclusiveQuery(&f, &f, &tru, &tru)
|
||||||
|
q.SetField(field)
|
||||||
|
return q
|
||||||
|
}
|
||||||
|
|
||||||
|
// MatchPhraseQuery generates a match phrase query for the given phrase, field and analyzer
|
||||||
|
func MatchPhraseQuery(matchPhrase, field, analyzer string) *query.MatchPhraseQuery {
|
||||||
|
q := bleve.NewMatchPhraseQuery(matchPhrase)
|
||||||
|
q.FieldVal = field
|
||||||
|
q.Analyzer = analyzer
|
||||||
|
return q
|
||||||
|
}
|
||||||
|
|
||||||
|
// BoolFieldQuery generates a bool field query for the given value and field
|
||||||
|
func BoolFieldQuery(value bool, field string) *query.BoolFieldQuery {
|
||||||
|
q := bleve.NewBoolFieldQuery(value)
|
||||||
|
q.SetField(field)
|
||||||
|
return q
|
||||||
|
}
|
||||||
|
|
||||||
|
func NumericRangeInclusiveQuery(min, max *int64, field string) *query.NumericRangeQuery {
|
||||||
|
var minF, maxF *float64
|
||||||
|
var minI, maxI *bool
|
||||||
|
if min != nil {
|
||||||
|
minF = new(float64)
|
||||||
|
*minF = float64(*min)
|
||||||
|
minI = new(bool)
|
||||||
|
*minI = true
|
||||||
|
}
|
||||||
|
if max != nil {
|
||||||
|
maxF = new(float64)
|
||||||
|
*maxF = float64(*max)
|
||||||
|
maxI = new(bool)
|
||||||
|
*maxI = true
|
||||||
|
}
|
||||||
|
q := bleve.NewNumericRangeInclusiveQuery(minF, maxF, minI, maxI)
|
||||||
|
q.SetField(field)
|
||||||
|
return q
|
||||||
|
}
|
|
@ -76,7 +76,8 @@ func (i *Indexer) Ping(ctx context.Context) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if resp.Status != "green" {
|
if resp.Status != "green" && resp.Status != "yellow" {
|
||||||
|
// It's healthy if the status is green, and it's available if the status is yellow,
|
||||||
// see https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html
|
// see https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html
|
||||||
return fmt.Errorf("status of elasticsearch cluster is %s", resp.Status)
|
return fmt.Errorf("status of elasticsearch cluster is %s", resp.Status)
|
||||||
}
|
}
|
||||||
|
|
119
modules/indexer/internal/meilisearch/filter.go
Normal file
119
modules/indexer/internal/meilisearch/filter.go
Normal file
|
@ -0,0 +1,119 @@
|
||||||
|
// Copyright 2023 The Gitea Authors. All rights reserved.
|
||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
|
||||||
|
package meilisearch
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Filter represents a filter for meilisearch queries.
|
||||||
|
// It's just a simple wrapper around a string.
|
||||||
|
// DO NOT assume that it is a complete implementation.
|
||||||
|
type Filter interface {
|
||||||
|
Statement() string
|
||||||
|
}
|
||||||
|
|
||||||
|
type FilterAnd struct {
|
||||||
|
filters []Filter
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *FilterAnd) Statement() string {
|
||||||
|
var statements []string
|
||||||
|
for _, filter := range f.filters {
|
||||||
|
if s := filter.Statement(); s != "" {
|
||||||
|
statements = append(statements, fmt.Sprintf("(%s)", s))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return strings.Join(statements, " AND ")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *FilterAnd) And(filter Filter) *FilterAnd {
|
||||||
|
f.filters = append(f.filters, filter)
|
||||||
|
return f
|
||||||
|
}
|
||||||
|
|
||||||
|
type FilterOr struct {
|
||||||
|
filters []Filter
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *FilterOr) Statement() string {
|
||||||
|
var statements []string
|
||||||
|
for _, filter := range f.filters {
|
||||||
|
if s := filter.Statement(); s != "" {
|
||||||
|
statements = append(statements, fmt.Sprintf("(%s)", s))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return strings.Join(statements, " OR ")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *FilterOr) Or(filter Filter) *FilterOr {
|
||||||
|
f.filters = append(f.filters, filter)
|
||||||
|
return f
|
||||||
|
}
|
||||||
|
|
||||||
|
type FilterIn string
|
||||||
|
|
||||||
|
// NewFilterIn creates a new FilterIn.
|
||||||
|
// It supports int64 only, to avoid extra works to handle strings with special characters.
|
||||||
|
func NewFilterIn[T int64](field string, values ...T) FilterIn {
|
||||||
|
if len(values) == 0 {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
vs := make([]string, len(values))
|
||||||
|
for i, v := range values {
|
||||||
|
vs[i] = fmt.Sprintf("%v", v)
|
||||||
|
}
|
||||||
|
return FilterIn(fmt.Sprintf("%s IN [%v]", field, strings.Join(vs, ", ")))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f FilterIn) Statement() string {
|
||||||
|
return string(f)
|
||||||
|
}
|
||||||
|
|
||||||
|
type FilterEq string
|
||||||
|
|
||||||
|
// NewFilterEq creates a new FilterEq.
|
||||||
|
// It supports int64 and bool only, to avoid extra works to handle strings with special characters.
|
||||||
|
func NewFilterEq[T bool | int64](field string, value T) FilterEq {
|
||||||
|
return FilterEq(fmt.Sprintf("%s = %v", field, value))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f FilterEq) Statement() string {
|
||||||
|
return string(f)
|
||||||
|
}
|
||||||
|
|
||||||
|
type FilterNot string
|
||||||
|
|
||||||
|
func NewFilterNot(filter Filter) FilterNot {
|
||||||
|
return FilterNot(fmt.Sprintf("NOT (%s)", filter.Statement()))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f FilterNot) Statement() string {
|
||||||
|
return string(f)
|
||||||
|
}
|
||||||
|
|
||||||
|
type FilterGte string
|
||||||
|
|
||||||
|
// NewFilterGte creates a new FilterGte.
|
||||||
|
// It supports int64 only, to avoid extra works to handle strings with special characters.
|
||||||
|
func NewFilterGte[T int64](field string, value T) FilterGte {
|
||||||
|
return FilterGte(fmt.Sprintf("%s >= %v", field, value))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f FilterGte) Statement() string {
|
||||||
|
return string(f)
|
||||||
|
}
|
||||||
|
|
||||||
|
type FilterLte string
|
||||||
|
|
||||||
|
// NewFilterLte creates a new FilterLte.
|
||||||
|
// It supports int64 only, to avoid extra works to handle strings with special characters.
|
||||||
|
func NewFilterLte[T int64](field string, value T) FilterLte {
|
||||||
|
return FilterLte(fmt.Sprintf("%s <= %v", field, value))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f FilterLte) Statement() string {
|
||||||
|
return string(f)
|
||||||
|
}
|
|
@ -17,14 +17,16 @@ type Indexer struct {
|
||||||
url, apiKey string
|
url, apiKey string
|
||||||
indexName string
|
indexName string
|
||||||
version int
|
version int
|
||||||
|
settings *meilisearch.Settings
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewIndexer(url, apiKey, indexName string, version int) *Indexer {
|
func NewIndexer(url, apiKey, indexName string, version int, settings *meilisearch.Settings) *Indexer {
|
||||||
return &Indexer{
|
return &Indexer{
|
||||||
url: url,
|
url: url,
|
||||||
apiKey: apiKey,
|
apiKey: apiKey,
|
||||||
indexName: indexName,
|
indexName: indexName,
|
||||||
version: version,
|
version: version,
|
||||||
|
settings: settings,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -57,7 +59,7 @@ func (i *Indexer) Init(_ context.Context) (bool, error) {
|
||||||
|
|
||||||
i.checkOldIndexes()
|
i.checkOldIndexes()
|
||||||
|
|
||||||
_, err = i.Client.Index(i.VersionedIndexName()).UpdateFilterableAttributes(&[]string{"repo_id"})
|
_, err = i.Client.Index(i.VersionedIndexName()).UpdateSettings(i.settings)
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
41
modules/indexer/internal/paginator.go
Normal file
41
modules/indexer/internal/paginator.go
Normal file
|
@ -0,0 +1,41 @@
|
||||||
|
// Copyright 2023 The Gitea Authors. All rights reserved.
|
||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
|
||||||
|
package internal
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math"
|
||||||
|
|
||||||
|
"code.gitea.io/gitea/models/db"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ParsePaginator parses a db.Paginator into a skip and limit
|
||||||
|
func ParsePaginator(paginator db.Paginator, max ...int) (int, int) {
|
||||||
|
// Use a very large number to indicate no limit
|
||||||
|
unlimited := math.MaxInt32
|
||||||
|
if len(max) > 0 {
|
||||||
|
// Some indexer engines have a limit on the page size, respect that
|
||||||
|
unlimited = max[0]
|
||||||
|
}
|
||||||
|
|
||||||
|
if paginator == nil || paginator.IsListAll() {
|
||||||
|
return 0, unlimited
|
||||||
|
}
|
||||||
|
|
||||||
|
// Warning: Do not use GetSkipTake() for *db.ListOptions
|
||||||
|
// Its implementation could reset the page size with setting.API.MaxResponseItems
|
||||||
|
if listOptions, ok := paginator.(*db.ListOptions); ok {
|
||||||
|
if listOptions.Page >= 0 && listOptions.PageSize > 0 {
|
||||||
|
var start int
|
||||||
|
if listOptions.Page == 0 {
|
||||||
|
start = 0
|
||||||
|
} else {
|
||||||
|
start = (listOptions.Page - 1) * listOptions.PageSize
|
||||||
|
}
|
||||||
|
return start, listOptions.PageSize
|
||||||
|
}
|
||||||
|
return 0, unlimited
|
||||||
|
}
|
||||||
|
|
||||||
|
return paginator.GetSkipTake()
|
||||||
|
}
|
|
@ -23,25 +23,9 @@ import (
|
||||||
const (
|
const (
|
||||||
issueIndexerAnalyzer = "issueIndexer"
|
issueIndexerAnalyzer = "issueIndexer"
|
||||||
issueIndexerDocType = "issueIndexerDocType"
|
issueIndexerDocType = "issueIndexerDocType"
|
||||||
issueIndexerLatestVersion = 3
|
issueIndexerLatestVersion = 4
|
||||||
)
|
)
|
||||||
|
|
||||||
// numericEqualityQuery a numeric equality query for the given value and field
|
|
||||||
func numericEqualityQuery(value int64, field string) *query.NumericRangeQuery {
|
|
||||||
f := float64(value)
|
|
||||||
tru := true
|
|
||||||
q := bleve.NewNumericRangeInclusiveQuery(&f, &f, &tru, &tru)
|
|
||||||
q.SetField(field)
|
|
||||||
return q
|
|
||||||
}
|
|
||||||
|
|
||||||
func newMatchPhraseQuery(matchPhrase, field, analyzer string) *query.MatchPhraseQuery {
|
|
||||||
q := bleve.NewMatchPhraseQuery(matchPhrase)
|
|
||||||
q.FieldVal = field
|
|
||||||
q.Analyzer = analyzer
|
|
||||||
return q
|
|
||||||
}
|
|
||||||
|
|
||||||
const unicodeNormalizeName = "unicodeNormalize"
|
const unicodeNormalizeName = "unicodeNormalize"
|
||||||
|
|
||||||
func addUnicodeNormalizeTokenFilter(m *mapping.IndexMappingImpl) error {
|
func addUnicodeNormalizeTokenFilter(m *mapping.IndexMappingImpl) error {
|
||||||
|
@ -74,10 +58,40 @@ func generateIssueIndexMapping() (mapping.IndexMapping, error) {
|
||||||
textFieldMapping := bleve.NewTextFieldMapping()
|
textFieldMapping := bleve.NewTextFieldMapping()
|
||||||
textFieldMapping.Store = false
|
textFieldMapping.Store = false
|
||||||
textFieldMapping.IncludeInAll = false
|
textFieldMapping.IncludeInAll = false
|
||||||
|
|
||||||
|
boolFieldMapping := bleve.NewBooleanFieldMapping()
|
||||||
|
boolFieldMapping.Store = false
|
||||||
|
boolFieldMapping.IncludeInAll = false
|
||||||
|
|
||||||
|
numberFieldMapping := bleve.NewNumericFieldMapping()
|
||||||
|
numberFieldMapping.Store = false
|
||||||
|
numberFieldMapping.IncludeInAll = false
|
||||||
|
|
||||||
|
docMapping.AddFieldMappingsAt("is_public", boolFieldMapping)
|
||||||
|
|
||||||
docMapping.AddFieldMappingsAt("title", textFieldMapping)
|
docMapping.AddFieldMappingsAt("title", textFieldMapping)
|
||||||
docMapping.AddFieldMappingsAt("content", textFieldMapping)
|
docMapping.AddFieldMappingsAt("content", textFieldMapping)
|
||||||
docMapping.AddFieldMappingsAt("comments", textFieldMapping)
|
docMapping.AddFieldMappingsAt("comments", textFieldMapping)
|
||||||
|
|
||||||
|
docMapping.AddFieldMappingsAt("is_pull", boolFieldMapping)
|
||||||
|
docMapping.AddFieldMappingsAt("is_closed", boolFieldMapping)
|
||||||
|
docMapping.AddFieldMappingsAt("label_ids", numberFieldMapping)
|
||||||
|
docMapping.AddFieldMappingsAt("no_label", boolFieldMapping)
|
||||||
|
docMapping.AddFieldMappingsAt("milestone_id", numberFieldMapping)
|
||||||
|
docMapping.AddFieldMappingsAt("project_id", numberFieldMapping)
|
||||||
|
docMapping.AddFieldMappingsAt("project_board_id", numberFieldMapping)
|
||||||
|
docMapping.AddFieldMappingsAt("poster_id", numberFieldMapping)
|
||||||
|
docMapping.AddFieldMappingsAt("assignee_id", numberFieldMapping)
|
||||||
|
docMapping.AddFieldMappingsAt("mention_ids", numberFieldMapping)
|
||||||
|
docMapping.AddFieldMappingsAt("reviewed_ids", numberFieldMapping)
|
||||||
|
docMapping.AddFieldMappingsAt("review_requested_ids", numberFieldMapping)
|
||||||
|
docMapping.AddFieldMappingsAt("subscriber_ids", numberFieldMapping)
|
||||||
|
docMapping.AddFieldMappingsAt("updated_unix", numberFieldMapping)
|
||||||
|
|
||||||
|
docMapping.AddFieldMappingsAt("created_unix", numberFieldMapping)
|
||||||
|
docMapping.AddFieldMappingsAt("deadline_unix", numberFieldMapping)
|
||||||
|
docMapping.AddFieldMappingsAt("comment_count", numberFieldMapping)
|
||||||
|
|
||||||
if err := addUnicodeNormalizeTokenFilter(mapping); err != nil {
|
if err := addUnicodeNormalizeTokenFilter(mapping); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
} else if err = mapping.AddCustomAnalyzer(issueIndexerAnalyzer, map[string]any{
|
} else if err = mapping.AddCustomAnalyzer(issueIndexerAnalyzer, map[string]any{
|
||||||
|
@ -115,7 +129,7 @@ func NewIndexer(indexDir string) *Indexer {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Index will save the index data
|
// Index will save the index data
|
||||||
func (b *Indexer) Index(_ context.Context, issues []*internal.IndexerData) error {
|
func (b *Indexer) Index(_ context.Context, issues ...*internal.IndexerData) error {
|
||||||
batch := inner_bleve.NewFlushingBatch(b.inner.Indexer, maxBatchSize)
|
batch := inner_bleve.NewFlushingBatch(b.inner.Indexer, maxBatchSize)
|
||||||
for _, issue := range issues {
|
for _, issue := range issues {
|
||||||
if err := batch.Index(indexer_internal.Base36(issue.ID), (*IndexerData)(issue)); err != nil {
|
if err := batch.Index(indexer_internal.Base36(issue.ID), (*IndexerData)(issue)); err != nil {
|
||||||
|
@ -138,32 +152,126 @@ func (b *Indexer) Delete(_ context.Context, ids ...int64) error {
|
||||||
|
|
||||||
// Search searches for issues by given conditions.
|
// Search searches for issues by given conditions.
|
||||||
// Returns the matching issue IDs
|
// Returns the matching issue IDs
|
||||||
func (b *Indexer) Search(ctx context.Context, keyword string, repoIDs []int64, limit, start int, state string) (*internal.SearchResult, error) {
|
func (b *Indexer) Search(ctx context.Context, options *internal.SearchOptions) (*internal.SearchResult, error) {
|
||||||
var repoQueriesP []*query.NumericRangeQuery
|
var queries []query.Query
|
||||||
for _, repoID := range repoIDs {
|
|
||||||
repoQueriesP = append(repoQueriesP, numericEqualityQuery(repoID, "repo_id"))
|
if options.Keyword != "" {
|
||||||
|
keywordQueries := []query.Query{
|
||||||
|
inner_bleve.MatchPhraseQuery(options.Keyword, "title", issueIndexerAnalyzer),
|
||||||
|
inner_bleve.MatchPhraseQuery(options.Keyword, "content", issueIndexerAnalyzer),
|
||||||
|
inner_bleve.MatchPhraseQuery(options.Keyword, "comments", issueIndexerAnalyzer),
|
||||||
}
|
}
|
||||||
repoQueries := make([]query.Query, len(repoQueriesP))
|
queries = append(queries, bleve.NewDisjunctionQuery(keywordQueries...))
|
||||||
for i, v := range repoQueriesP {
|
|
||||||
repoQueries[i] = query.Query(v)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
indexerQuery := bleve.NewConjunctionQuery(
|
if len(options.RepoIDs) > 0 || options.AllPublic {
|
||||||
bleve.NewDisjunctionQuery(repoQueries...),
|
var repoQueries []query.Query
|
||||||
bleve.NewDisjunctionQuery(
|
for _, repoID := range options.RepoIDs {
|
||||||
newMatchPhraseQuery(keyword, "title", issueIndexerAnalyzer),
|
repoQueries = append(repoQueries, inner_bleve.NumericEqualityQuery(repoID, "repo_id"))
|
||||||
newMatchPhraseQuery(keyword, "content", issueIndexerAnalyzer),
|
}
|
||||||
newMatchPhraseQuery(keyword, "comments", issueIndexerAnalyzer),
|
if options.AllPublic {
|
||||||
))
|
repoQueries = append(repoQueries, inner_bleve.BoolFieldQuery(true, "is_public"))
|
||||||
search := bleve.NewSearchRequestOptions(indexerQuery, limit, start, false)
|
}
|
||||||
search.SortBy([]string{"-_score"})
|
queries = append(queries, bleve.NewDisjunctionQuery(repoQueries...))
|
||||||
|
}
|
||||||
|
|
||||||
|
if !options.IsPull.IsNone() {
|
||||||
|
queries = append(queries, inner_bleve.BoolFieldQuery(options.IsPull.IsTrue(), "is_pull"))
|
||||||
|
}
|
||||||
|
if !options.IsClosed.IsNone() {
|
||||||
|
queries = append(queries, inner_bleve.BoolFieldQuery(options.IsClosed.IsTrue(), "is_closed"))
|
||||||
|
}
|
||||||
|
|
||||||
|
if options.NoLabelOnly {
|
||||||
|
queries = append(queries, inner_bleve.BoolFieldQuery(true, "no_label"))
|
||||||
|
} else {
|
||||||
|
if len(options.IncludedLabelIDs) > 0 {
|
||||||
|
var includeQueries []query.Query
|
||||||
|
for _, labelID := range options.IncludedLabelIDs {
|
||||||
|
includeQueries = append(includeQueries, inner_bleve.NumericEqualityQuery(labelID, "label_ids"))
|
||||||
|
}
|
||||||
|
queries = append(queries, bleve.NewConjunctionQuery(includeQueries...))
|
||||||
|
} else if len(options.IncludedAnyLabelIDs) > 0 {
|
||||||
|
var includeQueries []query.Query
|
||||||
|
for _, labelID := range options.IncludedAnyLabelIDs {
|
||||||
|
includeQueries = append(includeQueries, inner_bleve.NumericEqualityQuery(labelID, "label_ids"))
|
||||||
|
}
|
||||||
|
queries = append(queries, bleve.NewDisjunctionQuery(includeQueries...))
|
||||||
|
}
|
||||||
|
if len(options.ExcludedLabelIDs) > 0 {
|
||||||
|
var excludeQueries []query.Query
|
||||||
|
for _, labelID := range options.ExcludedLabelIDs {
|
||||||
|
q := bleve.NewBooleanQuery()
|
||||||
|
q.AddMustNot(inner_bleve.NumericEqualityQuery(labelID, "label_ids"))
|
||||||
|
excludeQueries = append(excludeQueries, q)
|
||||||
|
}
|
||||||
|
queries = append(queries, bleve.NewConjunctionQuery(excludeQueries...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(options.MilestoneIDs) > 0 {
|
||||||
|
var milestoneQueries []query.Query
|
||||||
|
for _, milestoneID := range options.MilestoneIDs {
|
||||||
|
milestoneQueries = append(milestoneQueries, inner_bleve.NumericEqualityQuery(milestoneID, "milestone_id"))
|
||||||
|
}
|
||||||
|
queries = append(queries, bleve.NewDisjunctionQuery(milestoneQueries...))
|
||||||
|
}
|
||||||
|
|
||||||
|
if options.ProjectID != nil {
|
||||||
|
queries = append(queries, inner_bleve.NumericEqualityQuery(*options.ProjectID, "project_id"))
|
||||||
|
}
|
||||||
|
if options.ProjectBoardID != nil {
|
||||||
|
queries = append(queries, inner_bleve.NumericEqualityQuery(*options.ProjectBoardID, "project_board_id"))
|
||||||
|
}
|
||||||
|
|
||||||
|
if options.PosterID != nil {
|
||||||
|
queries = append(queries, inner_bleve.NumericEqualityQuery(*options.PosterID, "poster_id"))
|
||||||
|
}
|
||||||
|
|
||||||
|
if options.AssigneeID != nil {
|
||||||
|
queries = append(queries, inner_bleve.NumericEqualityQuery(*options.AssigneeID, "assignee_id"))
|
||||||
|
}
|
||||||
|
|
||||||
|
if options.MentionID != nil {
|
||||||
|
queries = append(queries, inner_bleve.NumericEqualityQuery(*options.MentionID, "mention_ids"))
|
||||||
|
}
|
||||||
|
|
||||||
|
if options.ReviewedID != nil {
|
||||||
|
queries = append(queries, inner_bleve.NumericEqualityQuery(*options.ReviewedID, "reviewed_ids"))
|
||||||
|
}
|
||||||
|
if options.ReviewRequestedID != nil {
|
||||||
|
queries = append(queries, inner_bleve.NumericEqualityQuery(*options.ReviewRequestedID, "review_requested_ids"))
|
||||||
|
}
|
||||||
|
|
||||||
|
if options.SubscriberID != nil {
|
||||||
|
queries = append(queries, inner_bleve.NumericEqualityQuery(*options.SubscriberID, "subscriber_ids"))
|
||||||
|
}
|
||||||
|
|
||||||
|
if options.UpdatedAfterUnix != nil || options.UpdatedBeforeUnix != nil {
|
||||||
|
queries = append(queries, inner_bleve.NumericRangeInclusiveQuery(options.UpdatedAfterUnix, options.UpdatedBeforeUnix, "updated_unix"))
|
||||||
|
}
|
||||||
|
|
||||||
|
var indexerQuery query.Query = bleve.NewConjunctionQuery(queries...)
|
||||||
|
if len(queries) == 0 {
|
||||||
|
indexerQuery = bleve.NewMatchAllQuery()
|
||||||
|
}
|
||||||
|
|
||||||
|
skip, limit := indexer_internal.ParsePaginator(options.Paginator)
|
||||||
|
search := bleve.NewSearchRequestOptions(indexerQuery, limit, skip, false)
|
||||||
|
|
||||||
|
if options.SortBy == "" {
|
||||||
|
options.SortBy = internal.SortByCreatedAsc
|
||||||
|
}
|
||||||
|
|
||||||
|
search.SortBy([]string{string(options.SortBy), "-_id"})
|
||||||
|
|
||||||
result, err := b.inner.Indexer.SearchInContext(ctx, search)
|
result, err := b.inner.Indexer.SearchInContext(ctx, search)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
ret := internal.SearchResult{
|
ret := &internal.SearchResult{
|
||||||
|
Total: int64(result.Total),
|
||||||
Hits: make([]internal.Match, 0, len(result.Hits)),
|
Hits: make([]internal.Match, 0, len(result.Hits)),
|
||||||
}
|
}
|
||||||
for _, hit := range result.Hits {
|
for _, hit := range result.Hits {
|
||||||
|
@ -175,5 +283,5 @@ func (b *Indexer) Search(ctx context.Context, keyword string, repoIDs []int64, l
|
||||||
ID: id,
|
ID: id,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
return &ret, nil
|
return ret, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,86 +4,15 @@
|
||||||
package bleve
|
package bleve
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"code.gitea.io/gitea/modules/indexer/issues/internal"
|
"code.gitea.io/gitea/modules/indexer/issues/internal/tests"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestBleveIndexAndSearch(t *testing.T) {
|
func TestBleveIndexer(t *testing.T) {
|
||||||
dir := t.TempDir()
|
dir := t.TempDir()
|
||||||
indexer := NewIndexer(dir)
|
indexer := NewIndexer(dir)
|
||||||
defer indexer.Close()
|
defer indexer.Close()
|
||||||
|
|
||||||
if _, err := indexer.Init(context.Background()); err != nil {
|
tests.TestIndexer(t, indexer)
|
||||||
assert.Fail(t, "Unable to initialize bleve indexer: %v", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
err := indexer.Index(context.Background(), []*internal.IndexerData{
|
|
||||||
{
|
|
||||||
ID: 1,
|
|
||||||
RepoID: 2,
|
|
||||||
Title: "Issue search should support Chinese",
|
|
||||||
Content: "As title",
|
|
||||||
Comments: []string{
|
|
||||||
"test1",
|
|
||||||
"test2",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ID: 2,
|
|
||||||
RepoID: 2,
|
|
||||||
Title: "CJK support could be optional",
|
|
||||||
Content: "Chinese Korean and Japanese should be supported but I would like it's not enabled by default",
|
|
||||||
Comments: []string{
|
|
||||||
"LGTM",
|
|
||||||
"Good idea",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
})
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
keywords := []struct {
|
|
||||||
Keyword string
|
|
||||||
IDs []int64
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
Keyword: "search",
|
|
||||||
IDs: []int64{1},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Keyword: "test1",
|
|
||||||
IDs: []int64{1},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Keyword: "test2",
|
|
||||||
IDs: []int64{1},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Keyword: "support",
|
|
||||||
IDs: []int64{1, 2},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Keyword: "chinese",
|
|
||||||
IDs: []int64{1, 2},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Keyword: "help",
|
|
||||||
IDs: []int64{},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, kw := range keywords {
|
|
||||||
res, err := indexer.Search(context.TODO(), kw.Keyword, []int64{2}, 10, 0, "")
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
ids := make([]int64, 0, len(res.Hits))
|
|
||||||
for _, hit := range res.Hits {
|
|
||||||
ids = append(ids, hit.ID)
|
|
||||||
}
|
|
||||||
assert.ElementsMatch(t, kw.IDs, ids)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -6,10 +6,13 @@ package db
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
issues_model "code.gitea.io/gitea/models/issues"
|
"code.gitea.io/gitea/models/db"
|
||||||
|
issue_model "code.gitea.io/gitea/models/issues"
|
||||||
indexer_internal "code.gitea.io/gitea/modules/indexer/internal"
|
indexer_internal "code.gitea.io/gitea/modules/indexer/internal"
|
||||||
inner_db "code.gitea.io/gitea/modules/indexer/internal/db"
|
inner_db "code.gitea.io/gitea/modules/indexer/internal/db"
|
||||||
"code.gitea.io/gitea/modules/indexer/issues/internal"
|
"code.gitea.io/gitea/modules/indexer/issues/internal"
|
||||||
|
|
||||||
|
"xorm.io/builder"
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ internal.Indexer = &Indexer{}
|
var _ internal.Indexer = &Indexer{}
|
||||||
|
@ -26,7 +29,7 @@ func NewIndexer() *Indexer {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Index dummy function
|
// Index dummy function
|
||||||
func (i *Indexer) Index(_ context.Context, _ []*internal.IndexerData) error {
|
func (i *Indexer) Index(_ context.Context, _ ...*internal.IndexerData) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -36,19 +39,58 @@ func (i *Indexer) Delete(_ context.Context, _ ...int64) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Search searches for issues
|
// Search searches for issues
|
||||||
func (i *Indexer) Search(ctx context.Context, kw string, repoIDs []int64, limit, start int, state string) (*internal.SearchResult, error) {
|
func (i *Indexer) Search(ctx context.Context, options *internal.SearchOptions) (*internal.SearchResult, error) {
|
||||||
total, ids, err := issues_model.SearchIssueIDsByKeyword(ctx, kw, repoIDs, limit, start)
|
// FIXME: I tried to avoid importing models here, but it seems to be impossible.
|
||||||
|
// We can provide a function to register the search function, so models/issues can register it.
|
||||||
|
// So models/issues will import modules/indexer/issues, it's OK because it's by design.
|
||||||
|
// But modules/indexer/issues has already imported models/issues to do UpdateRepoIndexer and UpdateIssueIndexer.
|
||||||
|
// And to avoid circular import, we have to move the functions to another package.
|
||||||
|
// I believe it should be services/indexer, sounds great!
|
||||||
|
// But the two functions are used in modules/notification/indexer, that means we will import services/indexer in modules/notification/indexer.
|
||||||
|
// So that's the root problem:
|
||||||
|
// The notification is defined in modules, but it's using lots of things should be in services.
|
||||||
|
|
||||||
|
cond := builder.NewCond()
|
||||||
|
|
||||||
|
if options.Keyword != "" {
|
||||||
|
repoCond := builder.In("repo_id", options.RepoIDs)
|
||||||
|
if len(options.RepoIDs) == 1 {
|
||||||
|
repoCond = builder.Eq{"repo_id": options.RepoIDs[0]}
|
||||||
|
}
|
||||||
|
subQuery := builder.Select("id").From("issue").Where(repoCond)
|
||||||
|
|
||||||
|
cond = builder.Or(
|
||||||
|
db.BuildCaseInsensitiveLike("issue.name", options.Keyword),
|
||||||
|
db.BuildCaseInsensitiveLike("issue.content", options.Keyword),
|
||||||
|
builder.In("issue.id", builder.Select("issue_id").
|
||||||
|
From("comment").
|
||||||
|
Where(builder.And(
|
||||||
|
builder.Eq{"type": issue_model.CommentTypeComment},
|
||||||
|
builder.In("issue_id", subQuery),
|
||||||
|
db.BuildCaseInsensitiveLike("content", options.Keyword),
|
||||||
|
)),
|
||||||
|
),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
opt, err := ToDBOptions(ctx, options)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
result := internal.SearchResult{
|
|
||||||
Total: total,
|
ids, total, err := issue_model.IssueIDs(ctx, opt, cond)
|
||||||
Hits: make([]internal.Match, 0, limit),
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
hits := make([]internal.Match, 0, len(ids))
|
||||||
for _, id := range ids {
|
for _, id := range ids {
|
||||||
result.Hits = append(result.Hits, internal.Match{
|
hits = append(hits, internal.Match{
|
||||||
ID: id,
|
ID: id,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
return &result, nil
|
return &internal.SearchResult{
|
||||||
|
Total: total,
|
||||||
|
Hits: hits,
|
||||||
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
114
modules/indexer/issues/db/options.go
Normal file
114
modules/indexer/issues/db/options.go
Normal file
|
@ -0,0 +1,114 @@
|
||||||
|
// Copyright 2023 The Gitea Authors. All rights reserved.
|
||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
|
||||||
|
package db
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"code.gitea.io/gitea/models/db"
|
||||||
|
issue_model "code.gitea.io/gitea/models/issues"
|
||||||
|
"code.gitea.io/gitea/modules/container"
|
||||||
|
"code.gitea.io/gitea/modules/indexer/issues/internal"
|
||||||
|
)
|
||||||
|
|
||||||
|
func ToDBOptions(ctx context.Context, options *internal.SearchOptions) (*issue_model.IssuesOptions, error) {
|
||||||
|
convertID := func(id *int64) int64 {
|
||||||
|
if id == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
if *id == 0 {
|
||||||
|
return db.NoConditionID
|
||||||
|
}
|
||||||
|
return *id
|
||||||
|
}
|
||||||
|
convertInt64 := func(i *int64) int64 {
|
||||||
|
if i == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return *i
|
||||||
|
}
|
||||||
|
var sortType string
|
||||||
|
switch options.SortBy {
|
||||||
|
case internal.SortByCreatedAsc:
|
||||||
|
sortType = "oldest"
|
||||||
|
case internal.SortByUpdatedAsc:
|
||||||
|
sortType = "leastupdate"
|
||||||
|
case internal.SortByCommentsAsc:
|
||||||
|
sortType = "leastcomment"
|
||||||
|
case internal.SortByDeadlineAsc:
|
||||||
|
sortType = "farduedate"
|
||||||
|
case internal.SortByCreatedDesc:
|
||||||
|
sortType = "newest"
|
||||||
|
case internal.SortByUpdatedDesc:
|
||||||
|
sortType = "recentupdate"
|
||||||
|
case internal.SortByCommentsDesc:
|
||||||
|
sortType = "mostcomment"
|
||||||
|
case internal.SortByDeadlineDesc:
|
||||||
|
sortType = "nearduedate"
|
||||||
|
default:
|
||||||
|
sortType = "newest"
|
||||||
|
}
|
||||||
|
|
||||||
|
opts := &issue_model.IssuesOptions{
|
||||||
|
Paginator: options.Paginator,
|
||||||
|
RepoIDs: options.RepoIDs,
|
||||||
|
RepoCond: nil,
|
||||||
|
AssigneeID: convertID(options.AssigneeID),
|
||||||
|
PosterID: convertID(options.PosterID),
|
||||||
|
MentionedID: convertID(options.MentionID),
|
||||||
|
ReviewRequestedID: convertID(options.ReviewRequestedID),
|
||||||
|
ReviewedID: convertID(options.ReviewedID),
|
||||||
|
SubscriberID: convertID(options.SubscriberID),
|
||||||
|
ProjectID: convertID(options.ProjectID),
|
||||||
|
ProjectBoardID: convertID(options.ProjectBoardID),
|
||||||
|
IsClosed: options.IsClosed,
|
||||||
|
IsPull: options.IsPull,
|
||||||
|
IncludedLabelNames: nil,
|
||||||
|
ExcludedLabelNames: nil,
|
||||||
|
IncludeMilestones: nil,
|
||||||
|
SortType: sortType,
|
||||||
|
IssueIDs: nil,
|
||||||
|
UpdatedAfterUnix: convertInt64(options.UpdatedAfterUnix),
|
||||||
|
UpdatedBeforeUnix: convertInt64(options.UpdatedBeforeUnix),
|
||||||
|
PriorityRepoID: 0,
|
||||||
|
IsArchived: 0,
|
||||||
|
Org: nil,
|
||||||
|
Team: nil,
|
||||||
|
User: nil,
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(options.MilestoneIDs) == 1 && options.MilestoneIDs[0] == 0 {
|
||||||
|
opts.MilestoneIDs = []int64{db.NoConditionID}
|
||||||
|
} else {
|
||||||
|
opts.MilestoneIDs = options.MilestoneIDs
|
||||||
|
}
|
||||||
|
|
||||||
|
if options.NoLabelOnly {
|
||||||
|
opts.LabelIDs = []int64{0} // Be careful, it's zero, not db.NoConditionID
|
||||||
|
} else {
|
||||||
|
opts.LabelIDs = make([]int64, 0, len(options.IncludedLabelIDs)+len(options.ExcludedLabelIDs))
|
||||||
|
opts.LabelIDs = append(opts.LabelIDs, options.IncludedLabelIDs...)
|
||||||
|
for _, id := range options.ExcludedLabelIDs {
|
||||||
|
opts.LabelIDs = append(opts.LabelIDs, -id)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(options.IncludedLabelIDs) == 0 && len(options.IncludedAnyLabelIDs) > 0 {
|
||||||
|
_ = ctx // issue_model.GetLabelsByIDs should be called with ctx, this line can be removed when it's done.
|
||||||
|
labels, err := issue_model.GetLabelsByIDs(options.IncludedAnyLabelIDs, "name")
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("GetLabelsByIDs: %v", err)
|
||||||
|
}
|
||||||
|
set := container.Set[string]{}
|
||||||
|
for _, label := range labels {
|
||||||
|
if !set.Contains(label.Name) {
|
||||||
|
set.Add(label.Name)
|
||||||
|
opts.IncludedLabelNames = append(opts.IncludedLabelNames, label.Name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return opts, nil
|
||||||
|
}
|
93
modules/indexer/issues/dboptions.go
Normal file
93
modules/indexer/issues/dboptions.go
Normal file
|
@ -0,0 +1,93 @@
|
||||||
|
// Copyright 2023 The Gitea Authors. All rights reserved.
|
||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
|
||||||
|
package issues
|
||||||
|
|
||||||
|
import (
|
||||||
|
"code.gitea.io/gitea/models/db"
|
||||||
|
issues_model "code.gitea.io/gitea/models/issues"
|
||||||
|
)
|
||||||
|
|
||||||
|
func ToSearchOptions(keyword string, opts *issues_model.IssuesOptions) *SearchOptions {
|
||||||
|
searchOpt := &SearchOptions{
|
||||||
|
Keyword: keyword,
|
||||||
|
RepoIDs: opts.RepoIDs,
|
||||||
|
AllPublic: false,
|
||||||
|
IsPull: opts.IsPull,
|
||||||
|
IsClosed: opts.IsClosed,
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(opts.LabelIDs) == 1 && opts.LabelIDs[0] == 0 {
|
||||||
|
searchOpt.NoLabelOnly = true
|
||||||
|
} else {
|
||||||
|
for _, labelID := range opts.LabelIDs {
|
||||||
|
if labelID > 0 {
|
||||||
|
searchOpt.IncludedLabelIDs = append(searchOpt.IncludedLabelIDs, labelID)
|
||||||
|
} else {
|
||||||
|
searchOpt.ExcludedLabelIDs = append(searchOpt.ExcludedLabelIDs, -labelID)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// opts.IncludedLabelNames and opts.ExcludedLabelNames are not supported here.
|
||||||
|
// It's not a TO DO, it's just unnecessary.
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(opts.MilestoneIDs) == 1 && opts.MilestoneIDs[0] == db.NoConditionID {
|
||||||
|
searchOpt.MilestoneIDs = []int64{0}
|
||||||
|
} else {
|
||||||
|
searchOpt.MilestoneIDs = opts.MilestoneIDs
|
||||||
|
}
|
||||||
|
|
||||||
|
if opts.AssigneeID > 0 {
|
||||||
|
searchOpt.AssigneeID = &opts.AssigneeID
|
||||||
|
}
|
||||||
|
if opts.PosterID > 0 {
|
||||||
|
searchOpt.PosterID = &opts.PosterID
|
||||||
|
}
|
||||||
|
if opts.MentionedID > 0 {
|
||||||
|
searchOpt.MentionID = &opts.MentionedID
|
||||||
|
}
|
||||||
|
if opts.ReviewedID > 0 {
|
||||||
|
searchOpt.ReviewedID = &opts.ReviewedID
|
||||||
|
}
|
||||||
|
if opts.ReviewRequestedID > 0 {
|
||||||
|
searchOpt.ReviewRequestedID = &opts.ReviewRequestedID
|
||||||
|
}
|
||||||
|
if opts.SubscriberID > 0 {
|
||||||
|
searchOpt.SubscriberID = &opts.SubscriberID
|
||||||
|
}
|
||||||
|
|
||||||
|
if opts.UpdatedAfterUnix > 0 {
|
||||||
|
searchOpt.UpdatedAfterUnix = &opts.UpdatedAfterUnix
|
||||||
|
}
|
||||||
|
if opts.UpdatedBeforeUnix > 0 {
|
||||||
|
searchOpt.UpdatedBeforeUnix = &opts.UpdatedBeforeUnix
|
||||||
|
}
|
||||||
|
|
||||||
|
searchOpt.Paginator = opts.Paginator
|
||||||
|
|
||||||
|
switch opts.SortType {
|
||||||
|
case "":
|
||||||
|
searchOpt.SortBy = SortByCreatedDesc
|
||||||
|
case "oldest":
|
||||||
|
searchOpt.SortBy = SortByCreatedAsc
|
||||||
|
case "recentupdate":
|
||||||
|
searchOpt.SortBy = SortByUpdatedDesc
|
||||||
|
case "leastupdate":
|
||||||
|
searchOpt.SortBy = SortByUpdatedAsc
|
||||||
|
case "mostcomment":
|
||||||
|
searchOpt.SortBy = SortByCommentsDesc
|
||||||
|
case "leastcomment":
|
||||||
|
searchOpt.SortBy = SortByCommentsAsc
|
||||||
|
case "nearduedate":
|
||||||
|
searchOpt.SortBy = SortByDeadlineAsc
|
||||||
|
case "farduedate":
|
||||||
|
searchOpt.SortBy = SortByDeadlineDesc
|
||||||
|
case "priority", "priorityrepo", "project-column-sorting":
|
||||||
|
// Unsupported sort type for search
|
||||||
|
searchOpt.SortBy = SortByUpdatedDesc
|
||||||
|
default:
|
||||||
|
searchOpt.SortBy = SortByUpdatedDesc
|
||||||
|
}
|
||||||
|
|
||||||
|
return searchOpt
|
||||||
|
}
|
|
@ -7,6 +7,7 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"code.gitea.io/gitea/modules/graceful"
|
"code.gitea.io/gitea/modules/graceful"
|
||||||
indexer_internal "code.gitea.io/gitea/modules/indexer/internal"
|
indexer_internal "code.gitea.io/gitea/modules/indexer/internal"
|
||||||
|
@ -17,7 +18,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
issueIndexerLatestVersion = 0
|
issueIndexerLatestVersion = 1
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ internal.Indexer = &Indexer{}
|
var _ internal.Indexer = &Indexer{}
|
||||||
|
@ -39,36 +40,44 @@ func NewIndexer(url, indexerName string) *Indexer {
|
||||||
}
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
defaultMapping = `{
|
defaultMapping = `
|
||||||
|
{
|
||||||
"mappings": {
|
"mappings": {
|
||||||
"properties": {
|
"properties": {
|
||||||
"id": {
|
"id": { "type": "integer", "index": true },
|
||||||
"type": "integer",
|
"repo_id": { "type": "integer", "index": true },
|
||||||
"index": true
|
"is_public": { "type": "boolean", "index": true },
|
||||||
},
|
|
||||||
"repo_id": {
|
"title": { "type": "text", "index": true },
|
||||||
"type": "integer",
|
"content": { "type": "text", "index": true },
|
||||||
"index": true
|
"comments": { "type" : "text", "index": true },
|
||||||
},
|
|
||||||
"title": {
|
"is_pull": { "type": "boolean", "index": true },
|
||||||
"type": "text",
|
"is_closed": { "type": "boolean", "index": true },
|
||||||
"index": true
|
"label_ids": { "type": "integer", "index": true },
|
||||||
},
|
"no_label": { "type": "boolean", "index": true },
|
||||||
"content": {
|
"milestone_id": { "type": "integer", "index": true },
|
||||||
"type": "text",
|
"project_id": { "type": "integer", "index": true },
|
||||||
"index": true
|
"project_board_id": { "type": "integer", "index": true },
|
||||||
},
|
"poster_id": { "type": "integer", "index": true },
|
||||||
"comments": {
|
"assignee_id": { "type": "integer", "index": true },
|
||||||
"type" : "text",
|
"mention_ids": { "type": "integer", "index": true },
|
||||||
"index": true
|
"reviewed_ids": { "type": "integer", "index": true },
|
||||||
|
"review_requested_ids": { "type": "integer", "index": true },
|
||||||
|
"subscriber_ids": { "type": "integer", "index": true },
|
||||||
|
"updated_unix": { "type": "integer", "index": true },
|
||||||
|
|
||||||
|
"created_unix": { "type": "integer", "index": true },
|
||||||
|
"deadline_unix": { "type": "integer", "index": true },
|
||||||
|
"comment_count": { "type": "integer", "index": true }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}`
|
`
|
||||||
)
|
)
|
||||||
|
|
||||||
// Index will save the index data
|
// Index will save the index data
|
||||||
func (b *Indexer) Index(ctx context.Context, issues []*internal.IndexerData) error {
|
func (b *Indexer) Index(ctx context.Context, issues ...*internal.IndexerData) error {
|
||||||
if len(issues) == 0 {
|
if len(issues) == 0 {
|
||||||
return nil
|
return nil
|
||||||
} else if len(issues) == 1 {
|
} else if len(issues) == 1 {
|
||||||
|
@ -76,13 +85,7 @@ func (b *Indexer) Index(ctx context.Context, issues []*internal.IndexerData) err
|
||||||
_, err := b.inner.Client.Index().
|
_, err := b.inner.Client.Index().
|
||||||
Index(b.inner.VersionedIndexName()).
|
Index(b.inner.VersionedIndexName()).
|
||||||
Id(fmt.Sprintf("%d", issue.ID)).
|
Id(fmt.Sprintf("%d", issue.ID)).
|
||||||
BodyJson(map[string]any{
|
BodyJson(issue).
|
||||||
"id": issue.ID,
|
|
||||||
"repo_id": issue.RepoID,
|
|
||||||
"title": issue.Title,
|
|
||||||
"content": issue.Content,
|
|
||||||
"comments": issue.Comments,
|
|
||||||
}).
|
|
||||||
Do(ctx)
|
Do(ctx)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -93,13 +96,7 @@ func (b *Indexer) Index(ctx context.Context, issues []*internal.IndexerData) err
|
||||||
elastic.NewBulkIndexRequest().
|
elastic.NewBulkIndexRequest().
|
||||||
Index(b.inner.VersionedIndexName()).
|
Index(b.inner.VersionedIndexName()).
|
||||||
Id(fmt.Sprintf("%d", issue.ID)).
|
Id(fmt.Sprintf("%d", issue.ID)).
|
||||||
Doc(map[string]any{
|
Doc(issue),
|
||||||
"id": issue.ID,
|
|
||||||
"repo_id": issue.RepoID,
|
|
||||||
"title": issue.Title,
|
|
||||||
"content": issue.Content,
|
|
||||||
"comments": issue.Comments,
|
|
||||||
}),
|
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -140,23 +137,113 @@ func (b *Indexer) Delete(ctx context.Context, ids ...int64) error {
|
||||||
|
|
||||||
// Search searches for issues by given conditions.
|
// Search searches for issues by given conditions.
|
||||||
// Returns the matching issue IDs
|
// Returns the matching issue IDs
|
||||||
func (b *Indexer) Search(ctx context.Context, keyword string, repoIDs []int64, limit, start int, state string) (*internal.SearchResult, error) {
|
func (b *Indexer) Search(ctx context.Context, options *internal.SearchOptions) (*internal.SearchResult, error) {
|
||||||
kwQuery := elastic.NewMultiMatchQuery(keyword, "title", "content", "comments")
|
|
||||||
query := elastic.NewBoolQuery()
|
query := elastic.NewBoolQuery()
|
||||||
query = query.Must(kwQuery)
|
|
||||||
if len(repoIDs) > 0 {
|
if options.Keyword != "" {
|
||||||
repoStrs := make([]any, 0, len(repoIDs))
|
query.Must(elastic.NewMultiMatchQuery(options.Keyword, "title", "content", "comments"))
|
||||||
for _, repoID := range repoIDs {
|
|
||||||
repoStrs = append(repoStrs, repoID)
|
|
||||||
}
|
}
|
||||||
repoQuery := elastic.NewTermsQuery("repo_id", repoStrs...)
|
|
||||||
query = query.Must(repoQuery)
|
if len(options.RepoIDs) > 0 {
|
||||||
|
q := elastic.NewBoolQuery()
|
||||||
|
q.Should(elastic.NewTermsQuery("repo_id", toAnySlice(options.RepoIDs)...))
|
||||||
|
if options.AllPublic {
|
||||||
|
q.Should(elastic.NewTermQuery("is_public", true))
|
||||||
}
|
}
|
||||||
|
query.Must(q)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !options.IsPull.IsNone() {
|
||||||
|
query.Must(elastic.NewTermQuery("is_pull", options.IsPull.IsTrue()))
|
||||||
|
}
|
||||||
|
if !options.IsClosed.IsNone() {
|
||||||
|
query.Must(elastic.NewTermQuery("is_closed", options.IsClosed.IsTrue()))
|
||||||
|
}
|
||||||
|
|
||||||
|
if options.NoLabelOnly {
|
||||||
|
query.Must(elastic.NewTermQuery("no_label", true))
|
||||||
|
} else {
|
||||||
|
if len(options.IncludedLabelIDs) > 0 {
|
||||||
|
q := elastic.NewBoolQuery()
|
||||||
|
for _, labelID := range options.IncludedLabelIDs {
|
||||||
|
q.Must(elastic.NewTermQuery("label_ids", labelID))
|
||||||
|
}
|
||||||
|
query.Must(q)
|
||||||
|
} else if len(options.IncludedAnyLabelIDs) > 0 {
|
||||||
|
query.Must(elastic.NewTermsQuery("label_ids", toAnySlice(options.IncludedAnyLabelIDs)...))
|
||||||
|
}
|
||||||
|
if len(options.ExcludedLabelIDs) > 0 {
|
||||||
|
q := elastic.NewBoolQuery()
|
||||||
|
for _, labelID := range options.ExcludedLabelIDs {
|
||||||
|
q.MustNot(elastic.NewTermQuery("label_ids", labelID))
|
||||||
|
}
|
||||||
|
query.Must(q)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(options.MilestoneIDs) > 0 {
|
||||||
|
query.Must(elastic.NewTermsQuery("milestone_id", toAnySlice(options.MilestoneIDs)...))
|
||||||
|
}
|
||||||
|
|
||||||
|
if options.ProjectID != nil {
|
||||||
|
query.Must(elastic.NewTermQuery("project_id", *options.ProjectID))
|
||||||
|
}
|
||||||
|
if options.ProjectBoardID != nil {
|
||||||
|
query.Must(elastic.NewTermQuery("project_board_id", *options.ProjectBoardID))
|
||||||
|
}
|
||||||
|
|
||||||
|
if options.PosterID != nil {
|
||||||
|
query.Must(elastic.NewTermQuery("poster_id", *options.PosterID))
|
||||||
|
}
|
||||||
|
|
||||||
|
if options.AssigneeID != nil {
|
||||||
|
query.Must(elastic.NewTermQuery("assignee_id", *options.AssigneeID))
|
||||||
|
}
|
||||||
|
|
||||||
|
if options.MentionID != nil {
|
||||||
|
query.Must(elastic.NewTermQuery("mention_ids", *options.MentionID))
|
||||||
|
}
|
||||||
|
|
||||||
|
if options.ReviewedID != nil {
|
||||||
|
query.Must(elastic.NewTermQuery("reviewed_ids", *options.ReviewedID))
|
||||||
|
}
|
||||||
|
if options.ReviewRequestedID != nil {
|
||||||
|
query.Must(elastic.NewTermQuery("review_requested_ids", *options.ReviewRequestedID))
|
||||||
|
}
|
||||||
|
|
||||||
|
if options.SubscriberID != nil {
|
||||||
|
query.Must(elastic.NewTermQuery("subscriber_ids", *options.SubscriberID))
|
||||||
|
}
|
||||||
|
|
||||||
|
if options.UpdatedAfterUnix != nil || options.UpdatedBeforeUnix != nil {
|
||||||
|
q := elastic.NewRangeQuery("updated_unix")
|
||||||
|
if options.UpdatedAfterUnix != nil {
|
||||||
|
q.Gte(*options.UpdatedAfterUnix)
|
||||||
|
}
|
||||||
|
if options.UpdatedBeforeUnix != nil {
|
||||||
|
q.Lte(*options.UpdatedBeforeUnix)
|
||||||
|
}
|
||||||
|
query.Must(q)
|
||||||
|
}
|
||||||
|
|
||||||
|
if options.SortBy == "" {
|
||||||
|
options.SortBy = internal.SortByCreatedAsc
|
||||||
|
}
|
||||||
|
sortBy := []elastic.Sorter{
|
||||||
|
parseSortBy(options.SortBy),
|
||||||
|
elastic.NewFieldSort("id").Desc(),
|
||||||
|
}
|
||||||
|
|
||||||
|
// See https://stackoverflow.com/questions/35206409/elasticsearch-2-1-result-window-is-too-large-index-max-result-window/35221900
|
||||||
|
// TODO: make it configurable since it's configurable in elasticsearch
|
||||||
|
const maxPageSize = 10000
|
||||||
|
|
||||||
|
skip, limit := indexer_internal.ParsePaginator(options.Paginator, maxPageSize)
|
||||||
searchResult, err := b.inner.Client.Search().
|
searchResult, err := b.inner.Client.Search().
|
||||||
Index(b.inner.VersionedIndexName()).
|
Index(b.inner.VersionedIndexName()).
|
||||||
Query(query).
|
Query(query).
|
||||||
Sort("_score", false).
|
SortBy(sortBy...).
|
||||||
From(start).Size(limit).
|
From(skip).Size(limit).
|
||||||
Do(ctx)
|
Do(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -175,3 +262,20 @@ func (b *Indexer) Search(ctx context.Context, keyword string, repoIDs []int64, l
|
||||||
Hits: hits,
|
Hits: hits,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func toAnySlice[T any](s []T) []any {
|
||||||
|
ret := make([]any, 0, len(s))
|
||||||
|
for _, item := range s {
|
||||||
|
ret = append(ret, item)
|
||||||
|
}
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseSortBy(sortBy internal.SortBy) elastic.Sorter {
|
||||||
|
field := strings.TrimPrefix(string(sortBy), "-")
|
||||||
|
ret := elastic.NewFieldSort(field)
|
||||||
|
if strings.HasPrefix(string(sortBy), "-") {
|
||||||
|
ret.Desc()
|
||||||
|
}
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
48
modules/indexer/issues/elasticsearch/elasticsearch_test.go
Normal file
48
modules/indexer/issues/elasticsearch/elasticsearch_test.go
Normal file
|
@ -0,0 +1,48 @@
|
||||||
|
// Copyright 2023 The Gitea Authors. All rights reserved.
|
||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
|
||||||
|
package elasticsearch
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"code.gitea.io/gitea/modules/indexer/issues/internal/tests"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestElasticsearchIndexer(t *testing.T) {
|
||||||
|
// The elasticsearch instance started by pull-db-tests.yml > test-unit > services > elasticsearch
|
||||||
|
url := "http://elastic:changeme@elasticsearch:9200"
|
||||||
|
|
||||||
|
if os.Getenv("CI") == "" {
|
||||||
|
// Make it possible to run tests against a local elasticsearch instance
|
||||||
|
url = os.Getenv("TEST_ELASTICSEARCH_URL")
|
||||||
|
if url == "" {
|
||||||
|
t.Skip("TEST_ELASTICSEARCH_URL not set and not running in CI")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ok := false
|
||||||
|
for i := 0; i < 60; i++ {
|
||||||
|
resp, err := http.Get(url)
|
||||||
|
if err == nil && resp.StatusCode == http.StatusOK {
|
||||||
|
ok = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
t.Logf("Waiting for elasticsearch to be up: %v", err)
|
||||||
|
time.Sleep(time.Second)
|
||||||
|
}
|
||||||
|
if !ok {
|
||||||
|
t.Fatalf("Failed to wait for elasticsearch to be up")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
indexer := NewIndexer(url, fmt.Sprintf("test_elasticsearch_indexer_%d", time.Now().Unix()))
|
||||||
|
defer indexer.Close()
|
||||||
|
|
||||||
|
tests.TestIndexer(t, indexer)
|
||||||
|
}
|
|
@ -11,7 +11,6 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
db_model "code.gitea.io/gitea/models/db"
|
db_model "code.gitea.io/gitea/models/db"
|
||||||
issues_model "code.gitea.io/gitea/models/issues"
|
|
||||||
repo_model "code.gitea.io/gitea/models/repo"
|
repo_model "code.gitea.io/gitea/models/repo"
|
||||||
"code.gitea.io/gitea/modules/graceful"
|
"code.gitea.io/gitea/modules/graceful"
|
||||||
"code.gitea.io/gitea/modules/indexer/issues/bleve"
|
"code.gitea.io/gitea/modules/indexer/issues/bleve"
|
||||||
|
@ -26,9 +25,24 @@ import (
|
||||||
"code.gitea.io/gitea/modules/util"
|
"code.gitea.io/gitea/modules/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// IndexerMetadata is used to send data to the queue, so it contains only the ids.
|
||||||
|
// It may look weired, because it has to be compatible with the old queue data format.
|
||||||
|
// If the IsDelete flag is true, the IDs specify the issues to delete from the index without querying the database.
|
||||||
|
// If the IsDelete flag is false, the ID specify the issue to index, so Indexer will query the database to get the issue data.
|
||||||
|
// It should be noted that if the id is not existing in the database, it's index will be deleted too even if IsDelete is false.
|
||||||
|
// Valid values:
|
||||||
|
// - IsDelete = true, IDs = [1, 2, 3], and ID will be ignored
|
||||||
|
// - IsDelete = false, ID = 1, and IDs will be ignored
|
||||||
|
type IndexerMetadata struct {
|
||||||
|
ID int64 `json:"id"`
|
||||||
|
|
||||||
|
IsDelete bool `json:"is_delete"`
|
||||||
|
IDs []int64 `json:"ids"`
|
||||||
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
// issueIndexerQueue queue of issue ids to be updated
|
// issueIndexerQueue queue of issue ids to be updated
|
||||||
issueIndexerQueue *queue.WorkerPoolQueue[*internal.IndexerData]
|
issueIndexerQueue *queue.WorkerPoolQueue[*IndexerMetadata]
|
||||||
// globalIndexer is the global indexer, it cannot be nil.
|
// globalIndexer is the global indexer, it cannot be nil.
|
||||||
// When the real indexer is not ready, it will be a dummy indexer which will return error to explain it's not ready.
|
// When the real indexer is not ready, it will be a dummy indexer which will return error to explain it's not ready.
|
||||||
// So it's always safe use it as *globalIndexer.Load() and call its methods.
|
// So it's always safe use it as *globalIndexer.Load() and call its methods.
|
||||||
|
@ -50,37 +64,7 @@ func InitIssueIndexer(syncReindex bool) {
|
||||||
indexerInitWaitChannel := make(chan time.Duration, 1)
|
indexerInitWaitChannel := make(chan time.Duration, 1)
|
||||||
|
|
||||||
// Create the Queue
|
// Create the Queue
|
||||||
switch setting.Indexer.IssueType {
|
issueIndexerQueue = queue.CreateUniqueQueue(ctx, "issue_indexer", getIssueIndexerQueueHandler(ctx))
|
||||||
case "bleve", "elasticsearch", "meilisearch":
|
|
||||||
handler := func(items ...*internal.IndexerData) (unhandled []*internal.IndexerData) {
|
|
||||||
indexer := *globalIndexer.Load()
|
|
||||||
toIndex := make([]*internal.IndexerData, 0, len(items))
|
|
||||||
for _, indexerData := range items {
|
|
||||||
log.Trace("IndexerData Process: %d %v %t", indexerData.ID, indexerData.IDs, indexerData.IsDelete)
|
|
||||||
if indexerData.IsDelete {
|
|
||||||
if err := indexer.Delete(ctx, indexerData.IDs...); err != nil {
|
|
||||||
log.Error("Issue indexer handler: failed to from index: %v Error: %v", indexerData.IDs, err)
|
|
||||||
unhandled = append(unhandled, indexerData)
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
toIndex = append(toIndex, indexerData)
|
|
||||||
}
|
|
||||||
if err := indexer.Index(ctx, toIndex); err != nil {
|
|
||||||
log.Error("Error whilst indexing: %v Error: %v", toIndex, err)
|
|
||||||
unhandled = append(unhandled, toIndex...)
|
|
||||||
}
|
|
||||||
return unhandled
|
|
||||||
}
|
|
||||||
|
|
||||||
issueIndexerQueue = queue.CreateSimpleQueue(ctx, "issue_indexer", handler)
|
|
||||||
|
|
||||||
if issueIndexerQueue == nil {
|
|
||||||
log.Fatal("Unable to create issue indexer queue")
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
issueIndexerQueue = queue.CreateSimpleQueue[*internal.IndexerData](ctx, "issue_indexer", nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
graceful.GetManager().RunAtTerminate(finished)
|
graceful.GetManager().RunAtTerminate(finished)
|
||||||
|
|
||||||
|
@ -176,6 +160,44 @@ func InitIssueIndexer(syncReindex bool) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func getIssueIndexerQueueHandler(ctx context.Context) func(items ...*IndexerMetadata) []*IndexerMetadata {
|
||||||
|
return func(items ...*IndexerMetadata) []*IndexerMetadata {
|
||||||
|
var unhandled []*IndexerMetadata
|
||||||
|
|
||||||
|
indexer := *globalIndexer.Load()
|
||||||
|
for _, item := range items {
|
||||||
|
log.Trace("IndexerMetadata Process: %d %v %t", item.ID, item.IDs, item.IsDelete)
|
||||||
|
if item.IsDelete {
|
||||||
|
if err := indexer.Delete(ctx, item.IDs...); err != nil {
|
||||||
|
log.Error("Issue indexer handler: failed to from index: %v Error: %v", item.IDs, err)
|
||||||
|
unhandled = append(unhandled, item)
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
data, existed, err := getIssueIndexerData(ctx, item.ID)
|
||||||
|
if err != nil {
|
||||||
|
log.Error("Issue indexer handler: failed to get issue data of %d: %v", item.ID, err)
|
||||||
|
unhandled = append(unhandled, item)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if !existed {
|
||||||
|
if err := indexer.Delete(ctx, item.ID); err != nil {
|
||||||
|
log.Error("Issue indexer handler: failed to delete issue %d from index: %v", item.ID, err)
|
||||||
|
unhandled = append(unhandled, item)
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if err := indexer.Index(ctx, data); err != nil {
|
||||||
|
log.Error("Issue indexer handler: failed to index issue %d: %v", item.ID, err)
|
||||||
|
unhandled = append(unhandled, item)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return unhandled
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// populateIssueIndexer populate the issue indexer with issue data
|
// populateIssueIndexer populate the issue indexer with issue data
|
||||||
func populateIssueIndexer(ctx context.Context) {
|
func populateIssueIndexer(ctx context.Context) {
|
||||||
ctx, _, finished := process.GetManager().AddTypedContext(ctx, "Service: PopulateIssueIndexer", process.SystemProcessType, true)
|
ctx, _, finished := process.GetManager().AddTypedContext(ctx, "Service: PopulateIssueIndexer", process.SystemProcessType, true)
|
||||||
|
@ -203,101 +225,87 @@ func populateIssueIndexer(ctx context.Context) {
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, repo := range repos {
|
for _, repo := range repos {
|
||||||
|
for {
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
log.Info("Issue Indexer population shutdown before completion")
|
log.Info("Issue Indexer population shutdown before completion")
|
||||||
return
|
return
|
||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
UpdateRepoIndexer(ctx, repo)
|
if err := updateRepoIndexer(ctx, repo.ID); err != nil {
|
||||||
|
log.Warn("Retry to populate issue indexer for repo %d: %v", repo.ID, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// UpdateRepoIndexer add/update all issues of the repositories
|
// UpdateRepoIndexer add/update all issues of the repositories
|
||||||
func UpdateRepoIndexer(ctx context.Context, repo *repo_model.Repository) {
|
func UpdateRepoIndexer(ctx context.Context, repoID int64) {
|
||||||
is, err := issues_model.Issues(ctx, &issues_model.IssuesOptions{
|
if err := updateRepoIndexer(ctx, repoID); err != nil {
|
||||||
RepoIDs: []int64{repo.ID},
|
log.Error("Unable to push repo %d to issue indexer: %v", repoID, err)
|
||||||
IsClosed: util.OptionalBoolNone,
|
|
||||||
IsPull: util.OptionalBoolNone,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
log.Error("Issues: %v", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if err = issues_model.IssueList(is).LoadDiscussComments(ctx); err != nil {
|
|
||||||
log.Error("LoadDiscussComments: %v", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
for _, issue := range is {
|
|
||||||
UpdateIssueIndexer(issue)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// UpdateIssueIndexer add/update an issue to the issue indexer
|
// UpdateIssueIndexer add/update an issue to the issue indexer
|
||||||
func UpdateIssueIndexer(issue *issues_model.Issue) {
|
func UpdateIssueIndexer(issueID int64) {
|
||||||
var comments []string
|
if err := updateIssueIndexer(issueID); err != nil {
|
||||||
for _, comment := range issue.Comments {
|
log.Error("Unable to push issue %d to issue indexer: %v", issueID, err)
|
||||||
if comment.Type == issues_model.CommentTypeComment {
|
|
||||||
comments = append(comments, comment.Content)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
issueType := "issue"
|
|
||||||
if issue.IsPull {
|
|
||||||
issueType = "pull"
|
|
||||||
}
|
|
||||||
indexerData := &internal.IndexerData{
|
|
||||||
ID: issue.ID,
|
|
||||||
RepoID: issue.RepoID,
|
|
||||||
State: string(issue.State()),
|
|
||||||
IssueType: issueType,
|
|
||||||
Title: issue.Title,
|
|
||||||
Content: issue.Content,
|
|
||||||
Comments: comments,
|
|
||||||
}
|
|
||||||
log.Debug("Adding to channel: %v", indexerData)
|
|
||||||
if err := issueIndexerQueue.Push(indexerData); err != nil {
|
|
||||||
log.Error("Unable to push to issue indexer: %v: Error: %v", indexerData, err)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteRepoIssueIndexer deletes repo's all issues indexes
|
// DeleteRepoIssueIndexer deletes repo's all issues indexes
|
||||||
func DeleteRepoIssueIndexer(ctx context.Context, repo *repo_model.Repository) {
|
func DeleteRepoIssueIndexer(ctx context.Context, repoID int64) {
|
||||||
var ids []int64
|
if err := deleteRepoIssueIndexer(ctx, repoID); err != nil {
|
||||||
ids, err := issues_model.GetIssueIDsByRepoID(ctx, repo.ID)
|
log.Error("Unable to push deleted repo %d to issue indexer: %v", repoID, err)
|
||||||
if err != nil {
|
|
||||||
log.Error("GetIssueIDsByRepoID failed: %v", err)
|
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(ids) == 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
indexerData := &internal.IndexerData{
|
|
||||||
IDs: ids,
|
|
||||||
IsDelete: true,
|
|
||||||
}
|
|
||||||
if err := issueIndexerQueue.Push(indexerData); err != nil {
|
|
||||||
log.Error("Unable to push to issue indexer: %v: Error: %v", indexerData, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// SearchIssuesByKeyword search issue ids by keywords and repo id
|
|
||||||
// WARNNING: You have to ensure user have permission to visit repoIDs' issues
|
|
||||||
func SearchIssuesByKeyword(ctx context.Context, repoIDs []int64, keyword, state string) ([]int64, error) {
|
|
||||||
var issueIDs []int64
|
|
||||||
indexer := *globalIndexer.Load()
|
|
||||||
res, err := indexer.Search(ctx, keyword, repoIDs, 50, 0, state)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
for _, r := range res.Hits {
|
|
||||||
issueIDs = append(issueIDs, r.ID)
|
|
||||||
}
|
|
||||||
return issueIDs, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsAvailable checks if issue indexer is available
|
// IsAvailable checks if issue indexer is available
|
||||||
func IsAvailable(ctx context.Context) bool {
|
func IsAvailable(ctx context.Context) bool {
|
||||||
return (*globalIndexer.Load()).Ping(ctx) == nil
|
return (*globalIndexer.Load()).Ping(ctx) == nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SearchOptions indicates the options for searching issues
|
||||||
|
type SearchOptions internal.SearchOptions
|
||||||
|
|
||||||
|
const (
|
||||||
|
SortByCreatedDesc = internal.SortByCreatedDesc
|
||||||
|
SortByUpdatedDesc = internal.SortByUpdatedDesc
|
||||||
|
SortByCommentsDesc = internal.SortByCommentsDesc
|
||||||
|
SortByDeadlineDesc = internal.SortByDeadlineDesc
|
||||||
|
SortByCreatedAsc = internal.SortByCreatedAsc
|
||||||
|
SortByUpdatedAsc = internal.SortByUpdatedAsc
|
||||||
|
SortByCommentsAsc = internal.SortByCommentsAsc
|
||||||
|
SortByDeadlineAsc = internal.SortByDeadlineAsc
|
||||||
|
)
|
||||||
|
|
||||||
|
// SearchIssues search issues by options.
|
||||||
|
// It returns issue ids and a bool value indicates if the result is imprecise.
|
||||||
|
func SearchIssues(ctx context.Context, opts *SearchOptions) ([]int64, int64, error) {
|
||||||
|
indexer := *globalIndexer.Load()
|
||||||
|
|
||||||
|
if opts.Keyword == "" {
|
||||||
|
// This is a conservative shortcut.
|
||||||
|
// If the keyword is empty, db has better (at least not worse) performance to filter issues.
|
||||||
|
// When the keyword is empty, it tends to listing rather than searching issues.
|
||||||
|
// So if the user creates an issue and list issues immediately, the issue may not be listed because the indexer needs time to index the issue.
|
||||||
|
// Even worse, the external indexer like elastic search may not be available for a while,
|
||||||
|
// and the user may not be able to list issues completely until it is available again.
|
||||||
|
indexer = db.NewIndexer()
|
||||||
|
}
|
||||||
|
|
||||||
|
result, err := indexer.Search(ctx, (*internal.SearchOptions)(opts))
|
||||||
|
if err != nil {
|
||||||
|
return nil, 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
ret := make([]int64, 0, len(result.Hits))
|
||||||
|
for _, hit := range result.Hits {
|
||||||
|
ret = append(ret, hit.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret, result.Total, nil
|
||||||
|
}
|
||||||
|
|
|
@ -50,21 +50,41 @@ func TestBleveSearchIssues(t *testing.T) {
|
||||||
|
|
||||||
time.Sleep(5 * time.Second)
|
time.Sleep(5 * time.Second)
|
||||||
|
|
||||||
ids, err := SearchIssuesByKeyword(context.TODO(), []int64{1}, "issue2", "")
|
t.Run("issue2", func(t *testing.T) {
|
||||||
|
ids, _, err := SearchIssues(context.TODO(), &SearchOptions{
|
||||||
|
Keyword: "issue2",
|
||||||
|
RepoIDs: []int64{1},
|
||||||
|
})
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.EqualValues(t, []int64{2}, ids)
|
assert.EqualValues(t, []int64{2}, ids)
|
||||||
|
})
|
||||||
|
|
||||||
ids, err = SearchIssuesByKeyword(context.TODO(), []int64{1}, "first", "")
|
t.Run("first", func(t *testing.T) {
|
||||||
|
ids, _, err := SearchIssues(context.TODO(), &SearchOptions{
|
||||||
|
Keyword: "first",
|
||||||
|
RepoIDs: []int64{1},
|
||||||
|
})
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.EqualValues(t, []int64{1}, ids)
|
assert.EqualValues(t, []int64{1}, ids)
|
||||||
|
})
|
||||||
|
|
||||||
ids, err = SearchIssuesByKeyword(context.TODO(), []int64{1}, "for", "")
|
t.Run("for", func(t *testing.T) {
|
||||||
|
ids, _, err := SearchIssues(context.TODO(), &SearchOptions{
|
||||||
|
Keyword: "for",
|
||||||
|
RepoIDs: []int64{1},
|
||||||
|
})
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.ElementsMatch(t, []int64{1, 2, 3, 5, 11}, ids)
|
assert.ElementsMatch(t, []int64{1, 2, 3, 5, 11}, ids)
|
||||||
|
})
|
||||||
|
|
||||||
ids, err = SearchIssuesByKeyword(context.TODO(), []int64{1}, "good", "")
|
t.Run("good", func(t *testing.T) {
|
||||||
|
ids, _, err := SearchIssues(context.TODO(), &SearchOptions{
|
||||||
|
Keyword: "good",
|
||||||
|
RepoIDs: []int64{1},
|
||||||
|
})
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.EqualValues(t, []int64{1}, ids)
|
assert.EqualValues(t, []int64{1}, ids)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestDBSearchIssues(t *testing.T) {
|
func TestDBSearchIssues(t *testing.T) {
|
||||||
|
@ -73,19 +93,39 @@ func TestDBSearchIssues(t *testing.T) {
|
||||||
setting.Indexer.IssueType = "db"
|
setting.Indexer.IssueType = "db"
|
||||||
InitIssueIndexer(true)
|
InitIssueIndexer(true)
|
||||||
|
|
||||||
ids, err := SearchIssuesByKeyword(context.TODO(), []int64{1}, "issue2", "")
|
t.Run("issue2", func(t *testing.T) {
|
||||||
|
ids, _, err := SearchIssues(context.TODO(), &SearchOptions{
|
||||||
|
Keyword: "issue2",
|
||||||
|
RepoIDs: []int64{1},
|
||||||
|
})
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.EqualValues(t, []int64{2}, ids)
|
assert.EqualValues(t, []int64{2}, ids)
|
||||||
|
})
|
||||||
|
|
||||||
ids, err = SearchIssuesByKeyword(context.TODO(), []int64{1}, "first", "")
|
t.Run("first", func(t *testing.T) {
|
||||||
|
ids, _, err := SearchIssues(context.TODO(), &SearchOptions{
|
||||||
|
Keyword: "first",
|
||||||
|
RepoIDs: []int64{1},
|
||||||
|
})
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.EqualValues(t, []int64{1}, ids)
|
assert.EqualValues(t, []int64{1}, ids)
|
||||||
|
})
|
||||||
|
|
||||||
ids, err = SearchIssuesByKeyword(context.TODO(), []int64{1}, "for", "")
|
t.Run("for", func(t *testing.T) {
|
||||||
|
ids, _, err := SearchIssues(context.TODO(), &SearchOptions{
|
||||||
|
Keyword: "for",
|
||||||
|
RepoIDs: []int64{1},
|
||||||
|
})
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.ElementsMatch(t, []int64{1, 2, 3, 5, 11}, ids)
|
assert.ElementsMatch(t, []int64{1, 2, 3, 5, 11}, ids)
|
||||||
|
})
|
||||||
|
|
||||||
ids, err = SearchIssuesByKeyword(context.TODO(), []int64{1}, "good", "")
|
t.Run("good", func(t *testing.T) {
|
||||||
|
ids, _, err := SearchIssues(context.TODO(), &SearchOptions{
|
||||||
|
Keyword: "good",
|
||||||
|
RepoIDs: []int64{1},
|
||||||
|
})
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.EqualValues(t, []int64{1}, ids)
|
assert.EqualValues(t, []int64{1}, ids)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
|
@ -13,9 +13,9 @@ import (
|
||||||
// Indexer defines an interface to indexer issues contents
|
// Indexer defines an interface to indexer issues contents
|
||||||
type Indexer interface {
|
type Indexer interface {
|
||||||
internal.Indexer
|
internal.Indexer
|
||||||
Index(ctx context.Context, issue []*IndexerData) error
|
Index(ctx context.Context, issue ...*IndexerData) error
|
||||||
Delete(ctx context.Context, ids ...int64) error
|
Delete(ctx context.Context, ids ...int64) error
|
||||||
Search(ctx context.Context, kw string, repoIDs []int64, limit, start int, state string) (*SearchResult, error)
|
Search(ctx context.Context, options *SearchOptions) (*SearchResult, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewDummyIndexer returns a dummy indexer
|
// NewDummyIndexer returns a dummy indexer
|
||||||
|
@ -29,14 +29,14 @@ type dummyIndexer struct {
|
||||||
internal.Indexer
|
internal.Indexer
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *dummyIndexer) Index(ctx context.Context, issue []*IndexerData) error {
|
func (d *dummyIndexer) Index(_ context.Context, _ ...*IndexerData) error {
|
||||||
return fmt.Errorf("indexer is not ready")
|
return fmt.Errorf("indexer is not ready")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *dummyIndexer) Delete(ctx context.Context, ids ...int64) error {
|
func (d *dummyIndexer) Delete(_ context.Context, _ ...int64) error {
|
||||||
return fmt.Errorf("indexer is not ready")
|
return fmt.Errorf("indexer is not ready")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *dummyIndexer) Search(ctx context.Context, kw string, repoIDs []int64, limit, start int, state string) (*SearchResult, error) {
|
func (d *dummyIndexer) Search(_ context.Context, _ *SearchOptions) (*SearchResult, error) {
|
||||||
return nil, fmt.Errorf("indexer is not ready")
|
return nil, fmt.Errorf("indexer is not ready")
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,17 +3,45 @@
|
||||||
|
|
||||||
package internal
|
package internal
|
||||||
|
|
||||||
|
import (
|
||||||
|
"code.gitea.io/gitea/models/db"
|
||||||
|
"code.gitea.io/gitea/modules/timeutil"
|
||||||
|
"code.gitea.io/gitea/modules/util"
|
||||||
|
)
|
||||||
|
|
||||||
// IndexerData data stored in the issue indexer
|
// IndexerData data stored in the issue indexer
|
||||||
type IndexerData struct {
|
type IndexerData struct {
|
||||||
ID int64 `json:"id"`
|
ID int64 `json:"id"`
|
||||||
RepoID int64 `json:"repo_id"`
|
RepoID int64 `json:"repo_id"`
|
||||||
State string `json:"state"` // open, closed, all
|
IsPublic bool `json:"is_public"` // If the repo is public
|
||||||
IssueType string `json:"type"` // issue or pull
|
|
||||||
|
// Fields used for keyword searching
|
||||||
Title string `json:"title"`
|
Title string `json:"title"`
|
||||||
Content string `json:"content"`
|
Content string `json:"content"`
|
||||||
Comments []string `json:"comments"`
|
Comments []string `json:"comments"`
|
||||||
IsDelete bool `json:"is_delete"`
|
|
||||||
IDs []int64 `json:"ids"`
|
// Fields used for filtering
|
||||||
|
IsPull bool `json:"is_pull"`
|
||||||
|
IsClosed bool `json:"is_closed"`
|
||||||
|
LabelIDs []int64 `json:"label_ids"`
|
||||||
|
NoLabel bool `json:"no_label"` // True if LabelIDs is empty
|
||||||
|
MilestoneID int64 `json:"milestone_id"`
|
||||||
|
ProjectID int64 `json:"project_id"`
|
||||||
|
ProjectBoardID int64 `json:"project_board_id"`
|
||||||
|
PosterID int64 `json:"poster_id"`
|
||||||
|
AssigneeID int64 `json:"assignee_id"`
|
||||||
|
MentionIDs []int64 `json:"mention_ids"`
|
||||||
|
ReviewedIDs []int64 `json:"reviewed_ids"`
|
||||||
|
ReviewRequestedIDs []int64 `json:"review_requested_ids"`
|
||||||
|
SubscriberIDs []int64 `json:"subscriber_ids"`
|
||||||
|
UpdatedUnix timeutil.TimeStamp `json:"updated_unix"`
|
||||||
|
|
||||||
|
// Fields used for sorting
|
||||||
|
// UpdatedUnix is both used for filtering and sorting.
|
||||||
|
// ID is used for sorting too, to make the sorting stable.
|
||||||
|
CreatedUnix timeutil.TimeStamp `json:"created_unix"`
|
||||||
|
DeadlineUnix timeutil.TimeStamp `json:"deadline_unix"`
|
||||||
|
CommentCount int64 `json:"comment_count"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Match represents on search result
|
// Match represents on search result
|
||||||
|
@ -27,3 +55,67 @@ type SearchResult struct {
|
||||||
Total int64
|
Total int64
|
||||||
Hits []Match
|
Hits []Match
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SearchOptions represents search options
|
||||||
|
type SearchOptions struct {
|
||||||
|
Keyword string // keyword to search
|
||||||
|
|
||||||
|
RepoIDs []int64 // repository IDs which the issues belong to
|
||||||
|
AllPublic bool // if include all public repositories
|
||||||
|
|
||||||
|
IsPull util.OptionalBool // if the issues is a pull request
|
||||||
|
IsClosed util.OptionalBool // if the issues is closed
|
||||||
|
|
||||||
|
IncludedLabelIDs []int64 // labels the issues have
|
||||||
|
ExcludedLabelIDs []int64 // labels the issues don't have
|
||||||
|
IncludedAnyLabelIDs []int64 // labels the issues have at least one. It will be ignored if IncludedLabelIDs is not empty. It's an uncommon filter, but it has been supported accidentally by issues.IssuesOptions.IncludedLabelNames.
|
||||||
|
NoLabelOnly bool // if the issues have no label, if true, IncludedLabelIDs and ExcludedLabelIDs, IncludedAnyLabelIDs will be ignored
|
||||||
|
|
||||||
|
MilestoneIDs []int64 // milestones the issues have
|
||||||
|
|
||||||
|
ProjectID *int64 // project the issues belong to
|
||||||
|
ProjectBoardID *int64 // project board the issues belong to
|
||||||
|
|
||||||
|
PosterID *int64 // poster of the issues
|
||||||
|
|
||||||
|
AssigneeID *int64 // assignee of the issues, zero means no assignee
|
||||||
|
|
||||||
|
MentionID *int64 // mentioned user of the issues
|
||||||
|
|
||||||
|
ReviewedID *int64 // reviewer of the issues
|
||||||
|
ReviewRequestedID *int64 // requested reviewer of the issues
|
||||||
|
|
||||||
|
SubscriberID *int64 // subscriber of the issues
|
||||||
|
|
||||||
|
UpdatedAfterUnix *int64
|
||||||
|
UpdatedBeforeUnix *int64
|
||||||
|
|
||||||
|
db.Paginator
|
||||||
|
|
||||||
|
SortBy SortBy // sort by field
|
||||||
|
}
|
||||||
|
|
||||||
|
type SortBy string
|
||||||
|
|
||||||
|
const (
|
||||||
|
SortByCreatedDesc SortBy = "-created_unix"
|
||||||
|
SortByUpdatedDesc SortBy = "-updated_unix"
|
||||||
|
SortByCommentsDesc SortBy = "-comment_count"
|
||||||
|
SortByDeadlineDesc SortBy = "-deadline_unix"
|
||||||
|
SortByCreatedAsc SortBy = "created_unix"
|
||||||
|
SortByUpdatedAsc SortBy = "updated_unix"
|
||||||
|
SortByCommentsAsc SortBy = "comment_count"
|
||||||
|
SortByDeadlineAsc SortBy = "deadline_unix"
|
||||||
|
// Unsupported sort types which are supported by issues.IssuesOptions.SortType:
|
||||||
|
//
|
||||||
|
// - "priorityrepo":
|
||||||
|
// It's impossible to support it in the indexer.
|
||||||
|
// It is based on the specified repository in the request, so we cannot add static field to the indexer.
|
||||||
|
// If we do something like that query the issues in the specified repository first then append other issues,
|
||||||
|
// it will break the pagination.
|
||||||
|
//
|
||||||
|
// - "project-column-sorting":
|
||||||
|
// Although it's possible to support it by adding project.ProjectIssue.Sorting to the indexer,
|
||||||
|
// but what if the issue belongs to multiple projects?
|
||||||
|
// Since it's unsupported to search issues with keyword in project page, we don't need to support it.
|
||||||
|
)
|
||||||
|
|
804
modules/indexer/issues/internal/tests/tests.go
Normal file
804
modules/indexer/issues/internal/tests/tests.go
Normal file
|
@ -0,0 +1,804 @@
|
||||||
|
// Copyright 2023 The Gitea Authors. All rights reserved.
|
||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
|
||||||
|
// This package contains tests for issues indexer modules.
|
||||||
|
// All the code in this package is only used for testing.
|
||||||
|
// Do not put any production code in this package to avoid it being included in the final binary.
|
||||||
|
|
||||||
|
package tests
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"code.gitea.io/gitea/models/db"
|
||||||
|
"code.gitea.io/gitea/modules/indexer/issues/internal"
|
||||||
|
"code.gitea.io/gitea/modules/timeutil"
|
||||||
|
"code.gitea.io/gitea/modules/util"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestIndexer(t *testing.T, indexer internal.Indexer) {
|
||||||
|
_, err := indexer.Init(context.Background())
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.NoError(t, indexer.Ping(context.Background()))
|
||||||
|
|
||||||
|
var (
|
||||||
|
ids []int64
|
||||||
|
data = map[int64]*internal.IndexerData{}
|
||||||
|
)
|
||||||
|
{
|
||||||
|
d := generateDefaultIndexerData()
|
||||||
|
for _, v := range d {
|
||||||
|
ids = append(ids, v.ID)
|
||||||
|
data[v.ID] = v
|
||||||
|
}
|
||||||
|
require.NoError(t, indexer.Index(context.Background(), d...))
|
||||||
|
require.NoError(t, waitData(indexer, int64(len(data))))
|
||||||
|
}
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
require.NoError(t, indexer.Delete(context.Background(), ids...))
|
||||||
|
}()
|
||||||
|
|
||||||
|
for _, c := range cases {
|
||||||
|
t.Run(c.Name, func(t *testing.T) {
|
||||||
|
if len(c.ExtraData) > 0 {
|
||||||
|
require.NoError(t, indexer.Index(context.Background(), c.ExtraData...))
|
||||||
|
for _, v := range c.ExtraData {
|
||||||
|
data[v.ID] = v
|
||||||
|
}
|
||||||
|
require.NoError(t, waitData(indexer, int64(len(data))))
|
||||||
|
defer func() {
|
||||||
|
for _, v := range c.ExtraData {
|
||||||
|
require.NoError(t, indexer.Delete(context.Background(), v.ID))
|
||||||
|
delete(data, v.ID)
|
||||||
|
}
|
||||||
|
require.NoError(t, waitData(indexer, int64(len(data))))
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
result, err := indexer.Search(context.Background(), c.SearchOptions)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
if c.Expected != nil {
|
||||||
|
c.Expected(t, data, result)
|
||||||
|
} else {
|
||||||
|
ids := make([]int64, 0, len(result.Hits))
|
||||||
|
for _, hit := range result.Hits {
|
||||||
|
ids = append(ids, hit.ID)
|
||||||
|
}
|
||||||
|
assert.Equal(t, c.ExpectedIDs, ids)
|
||||||
|
assert.Equal(t, c.ExpectedTotal, result.Total)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var cases = []*testIndexerCase{
|
||||||
|
{
|
||||||
|
Name: "default",
|
||||||
|
SearchOptions: &internal.SearchOptions{},
|
||||||
|
Expected: func(t *testing.T, data map[int64]*internal.IndexerData, result *internal.SearchResult) {
|
||||||
|
assert.Equal(t, len(data), len(result.Hits))
|
||||||
|
assert.Equal(t, len(data), int(result.Total))
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "empty",
|
||||||
|
SearchOptions: &internal.SearchOptions{
|
||||||
|
Keyword: "f1dfac73-fda6-4a6b-b8a4-2408fcb8ef69",
|
||||||
|
},
|
||||||
|
ExpectedIDs: []int64{},
|
||||||
|
ExpectedTotal: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "with limit",
|
||||||
|
SearchOptions: &internal.SearchOptions{
|
||||||
|
Paginator: &db.ListOptions{
|
||||||
|
PageSize: 5,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Expected: func(t *testing.T, data map[int64]*internal.IndexerData, result *internal.SearchResult) {
|
||||||
|
assert.Equal(t, 5, len(result.Hits))
|
||||||
|
assert.Equal(t, len(data), int(result.Total))
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "Keyword",
|
||||||
|
ExtraData: []*internal.IndexerData{
|
||||||
|
{ID: 1000, Title: "hi hello world"},
|
||||||
|
{ID: 1001, Content: "hi hello world"},
|
||||||
|
{ID: 1002, Comments: []string{"hi", "hello world"}},
|
||||||
|
},
|
||||||
|
SearchOptions: &internal.SearchOptions{
|
||||||
|
Keyword: "hello",
|
||||||
|
},
|
||||||
|
ExpectedIDs: []int64{1002, 1001, 1000},
|
||||||
|
ExpectedTotal: 3,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "RepoIDs",
|
||||||
|
ExtraData: []*internal.IndexerData{
|
||||||
|
{ID: 1001, Title: "hello world", RepoID: 1, IsPublic: false},
|
||||||
|
{ID: 1002, Title: "hello world", RepoID: 1, IsPublic: false},
|
||||||
|
{ID: 1003, Title: "hello world", RepoID: 2, IsPublic: true},
|
||||||
|
{ID: 1004, Title: "hello world", RepoID: 2, IsPublic: true},
|
||||||
|
{ID: 1005, Title: "hello world", RepoID: 3, IsPublic: true},
|
||||||
|
{ID: 1006, Title: "hello world", RepoID: 4, IsPublic: false},
|
||||||
|
{ID: 1007, Title: "hello world", RepoID: 5, IsPublic: false},
|
||||||
|
},
|
||||||
|
SearchOptions: &internal.SearchOptions{
|
||||||
|
Keyword: "hello",
|
||||||
|
RepoIDs: []int64{1, 4},
|
||||||
|
},
|
||||||
|
ExpectedIDs: []int64{1006, 1002, 1001},
|
||||||
|
ExpectedTotal: 3,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "RepoIDs and AllPublic",
|
||||||
|
ExtraData: []*internal.IndexerData{
|
||||||
|
{ID: 1001, Title: "hello world", RepoID: 1, IsPublic: false},
|
||||||
|
{ID: 1002, Title: "hello world", RepoID: 1, IsPublic: false},
|
||||||
|
{ID: 1003, Title: "hello world", RepoID: 2, IsPublic: true},
|
||||||
|
{ID: 1004, Title: "hello world", RepoID: 2, IsPublic: true},
|
||||||
|
{ID: 1005, Title: "hello world", RepoID: 3, IsPublic: true},
|
||||||
|
{ID: 1006, Title: "hello world", RepoID: 4, IsPublic: false},
|
||||||
|
{ID: 1007, Title: "hello world", RepoID: 5, IsPublic: false},
|
||||||
|
},
|
||||||
|
SearchOptions: &internal.SearchOptions{
|
||||||
|
Keyword: "hello",
|
||||||
|
RepoIDs: []int64{1, 4},
|
||||||
|
AllPublic: true,
|
||||||
|
},
|
||||||
|
ExpectedIDs: []int64{1006, 1005, 1004, 1003, 1002, 1001},
|
||||||
|
ExpectedTotal: 6,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "issue only",
|
||||||
|
SearchOptions: &internal.SearchOptions{
|
||||||
|
Paginator: &db.ListOptions{
|
||||||
|
PageSize: 5,
|
||||||
|
},
|
||||||
|
IsPull: util.OptionalBoolFalse,
|
||||||
|
},
|
||||||
|
Expected: func(t *testing.T, data map[int64]*internal.IndexerData, result *internal.SearchResult) {
|
||||||
|
assert.Equal(t, 5, len(result.Hits))
|
||||||
|
for _, v := range result.Hits {
|
||||||
|
assert.False(t, data[v.ID].IsPull)
|
||||||
|
}
|
||||||
|
assert.Equal(t, countIndexerData(data, func(v *internal.IndexerData) bool { return !v.IsPull }), result.Total)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "pull only",
|
||||||
|
SearchOptions: &internal.SearchOptions{
|
||||||
|
Paginator: &db.ListOptions{
|
||||||
|
PageSize: 5,
|
||||||
|
},
|
||||||
|
IsPull: util.OptionalBoolTrue,
|
||||||
|
},
|
||||||
|
Expected: func(t *testing.T, data map[int64]*internal.IndexerData, result *internal.SearchResult) {
|
||||||
|
assert.Equal(t, 5, len(result.Hits))
|
||||||
|
for _, v := range result.Hits {
|
||||||
|
assert.True(t, data[v.ID].IsPull)
|
||||||
|
}
|
||||||
|
assert.Equal(t, countIndexerData(data, func(v *internal.IndexerData) bool { return v.IsPull }), result.Total)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "opened only",
|
||||||
|
SearchOptions: &internal.SearchOptions{
|
||||||
|
Paginator: &db.ListOptions{
|
||||||
|
PageSize: 5,
|
||||||
|
},
|
||||||
|
IsClosed: util.OptionalBoolFalse,
|
||||||
|
},
|
||||||
|
Expected: func(t *testing.T, data map[int64]*internal.IndexerData, result *internal.SearchResult) {
|
||||||
|
assert.Equal(t, 5, len(result.Hits))
|
||||||
|
for _, v := range result.Hits {
|
||||||
|
assert.False(t, data[v.ID].IsClosed)
|
||||||
|
}
|
||||||
|
assert.Equal(t, countIndexerData(data, func(v *internal.IndexerData) bool { return !v.IsClosed }), result.Total)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "closed only",
|
||||||
|
SearchOptions: &internal.SearchOptions{
|
||||||
|
Paginator: &db.ListOptions{
|
||||||
|
PageSize: 5,
|
||||||
|
},
|
||||||
|
IsClosed: util.OptionalBoolTrue,
|
||||||
|
},
|
||||||
|
Expected: func(t *testing.T, data map[int64]*internal.IndexerData, result *internal.SearchResult) {
|
||||||
|
assert.Equal(t, 5, len(result.Hits))
|
||||||
|
for _, v := range result.Hits {
|
||||||
|
assert.True(t, data[v.ID].IsClosed)
|
||||||
|
}
|
||||||
|
assert.Equal(t, countIndexerData(data, func(v *internal.IndexerData) bool { return v.IsClosed }), result.Total)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "labels",
|
||||||
|
ExtraData: []*internal.IndexerData{
|
||||||
|
{ID: 1000, Title: "hello a", LabelIDs: []int64{2000, 2001, 2002}},
|
||||||
|
{ID: 1001, Title: "hello b", LabelIDs: []int64{2000, 2001}},
|
||||||
|
{ID: 1002, Title: "hello c", LabelIDs: []int64{2000, 2001, 2003}},
|
||||||
|
{ID: 1003, Title: "hello d", LabelIDs: []int64{2000}},
|
||||||
|
{ID: 1004, Title: "hello e", LabelIDs: []int64{}},
|
||||||
|
},
|
||||||
|
SearchOptions: &internal.SearchOptions{
|
||||||
|
Keyword: "hello",
|
||||||
|
IncludedLabelIDs: []int64{2000, 2001},
|
||||||
|
ExcludedLabelIDs: []int64{2003},
|
||||||
|
},
|
||||||
|
ExpectedIDs: []int64{1001, 1000},
|
||||||
|
ExpectedTotal: 2,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "include any labels",
|
||||||
|
ExtraData: []*internal.IndexerData{
|
||||||
|
{ID: 1000, Title: "hello a", LabelIDs: []int64{2000, 2001, 2002}},
|
||||||
|
{ID: 1001, Title: "hello b", LabelIDs: []int64{2001}},
|
||||||
|
{ID: 1002, Title: "hello c", LabelIDs: []int64{2000, 2001, 2003}},
|
||||||
|
{ID: 1003, Title: "hello d", LabelIDs: []int64{2002}},
|
||||||
|
{ID: 1004, Title: "hello e", LabelIDs: []int64{}},
|
||||||
|
},
|
||||||
|
SearchOptions: &internal.SearchOptions{
|
||||||
|
Keyword: "hello",
|
||||||
|
IncludedAnyLabelIDs: []int64{2001, 2002},
|
||||||
|
ExcludedLabelIDs: []int64{2003},
|
||||||
|
},
|
||||||
|
ExpectedIDs: []int64{1003, 1001, 1000},
|
||||||
|
ExpectedTotal: 3,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "MilestoneIDs",
|
||||||
|
SearchOptions: &internal.SearchOptions{
|
||||||
|
Paginator: &db.ListOptions{
|
||||||
|
PageSize: 5,
|
||||||
|
},
|
||||||
|
MilestoneIDs: []int64{1, 2, 6},
|
||||||
|
},
|
||||||
|
Expected: func(t *testing.T, data map[int64]*internal.IndexerData, result *internal.SearchResult) {
|
||||||
|
assert.Equal(t, 5, len(result.Hits))
|
||||||
|
for _, v := range result.Hits {
|
||||||
|
assert.Contains(t, []int64{1, 2, 6}, data[v.ID].MilestoneID)
|
||||||
|
}
|
||||||
|
assert.Equal(t, countIndexerData(data, func(v *internal.IndexerData) bool {
|
||||||
|
return v.MilestoneID == 1 || v.MilestoneID == 2 || v.MilestoneID == 6
|
||||||
|
}), result.Total)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "no MilestoneIDs",
|
||||||
|
SearchOptions: &internal.SearchOptions{
|
||||||
|
Paginator: &db.ListOptions{
|
||||||
|
PageSize: 5,
|
||||||
|
},
|
||||||
|
MilestoneIDs: []int64{0},
|
||||||
|
},
|
||||||
|
Expected: func(t *testing.T, data map[int64]*internal.IndexerData, result *internal.SearchResult) {
|
||||||
|
assert.Equal(t, 5, len(result.Hits))
|
||||||
|
for _, v := range result.Hits {
|
||||||
|
assert.Equal(t, int64(0), data[v.ID].MilestoneID)
|
||||||
|
}
|
||||||
|
assert.Equal(t, countIndexerData(data, func(v *internal.IndexerData) bool {
|
||||||
|
return v.MilestoneID == 0
|
||||||
|
}), result.Total)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "ProjectID",
|
||||||
|
SearchOptions: &internal.SearchOptions{
|
||||||
|
Paginator: &db.ListOptions{
|
||||||
|
PageSize: 5,
|
||||||
|
},
|
||||||
|
ProjectID: func() *int64 {
|
||||||
|
id := int64(1)
|
||||||
|
return &id
|
||||||
|
}(),
|
||||||
|
},
|
||||||
|
Expected: func(t *testing.T, data map[int64]*internal.IndexerData, result *internal.SearchResult) {
|
||||||
|
assert.Equal(t, 5, len(result.Hits))
|
||||||
|
for _, v := range result.Hits {
|
||||||
|
assert.Equal(t, int64(1), data[v.ID].ProjectID)
|
||||||
|
}
|
||||||
|
assert.Equal(t, countIndexerData(data, func(v *internal.IndexerData) bool {
|
||||||
|
return v.ProjectID == 1
|
||||||
|
}), result.Total)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "no ProjectID",
|
||||||
|
SearchOptions: &internal.SearchOptions{
|
||||||
|
Paginator: &db.ListOptions{
|
||||||
|
PageSize: 5,
|
||||||
|
},
|
||||||
|
ProjectID: func() *int64 {
|
||||||
|
id := int64(0)
|
||||||
|
return &id
|
||||||
|
}(),
|
||||||
|
},
|
||||||
|
Expected: func(t *testing.T, data map[int64]*internal.IndexerData, result *internal.SearchResult) {
|
||||||
|
assert.Equal(t, 5, len(result.Hits))
|
||||||
|
for _, v := range result.Hits {
|
||||||
|
assert.Equal(t, int64(0), data[v.ID].ProjectID)
|
||||||
|
}
|
||||||
|
assert.Equal(t, countIndexerData(data, func(v *internal.IndexerData) bool {
|
||||||
|
return v.ProjectID == 0
|
||||||
|
}), result.Total)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "ProjectBoardID",
|
||||||
|
SearchOptions: &internal.SearchOptions{
|
||||||
|
Paginator: &db.ListOptions{
|
||||||
|
PageSize: 5,
|
||||||
|
},
|
||||||
|
ProjectBoardID: func() *int64 {
|
||||||
|
id := int64(1)
|
||||||
|
return &id
|
||||||
|
}(),
|
||||||
|
},
|
||||||
|
Expected: func(t *testing.T, data map[int64]*internal.IndexerData, result *internal.SearchResult) {
|
||||||
|
assert.Equal(t, 5, len(result.Hits))
|
||||||
|
for _, v := range result.Hits {
|
||||||
|
assert.Equal(t, int64(1), data[v.ID].ProjectBoardID)
|
||||||
|
}
|
||||||
|
assert.Equal(t, countIndexerData(data, func(v *internal.IndexerData) bool {
|
||||||
|
return v.ProjectBoardID == 1
|
||||||
|
}), result.Total)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "no ProjectBoardID",
|
||||||
|
SearchOptions: &internal.SearchOptions{
|
||||||
|
Paginator: &db.ListOptions{
|
||||||
|
PageSize: 5,
|
||||||
|
},
|
||||||
|
ProjectBoardID: func() *int64 {
|
||||||
|
id := int64(0)
|
||||||
|
return &id
|
||||||
|
}(),
|
||||||
|
},
|
||||||
|
Expected: func(t *testing.T, data map[int64]*internal.IndexerData, result *internal.SearchResult) {
|
||||||
|
assert.Equal(t, 5, len(result.Hits))
|
||||||
|
for _, v := range result.Hits {
|
||||||
|
assert.Equal(t, int64(0), data[v.ID].ProjectBoardID)
|
||||||
|
}
|
||||||
|
assert.Equal(t, countIndexerData(data, func(v *internal.IndexerData) bool {
|
||||||
|
return v.ProjectBoardID == 0
|
||||||
|
}), result.Total)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "PosterID",
|
||||||
|
SearchOptions: &internal.SearchOptions{
|
||||||
|
Paginator: &db.ListOptions{
|
||||||
|
PageSize: 5,
|
||||||
|
},
|
||||||
|
PosterID: func() *int64 {
|
||||||
|
id := int64(1)
|
||||||
|
return &id
|
||||||
|
}(),
|
||||||
|
},
|
||||||
|
Expected: func(t *testing.T, data map[int64]*internal.IndexerData, result *internal.SearchResult) {
|
||||||
|
assert.Equal(t, 5, len(result.Hits))
|
||||||
|
for _, v := range result.Hits {
|
||||||
|
assert.Equal(t, int64(1), data[v.ID].PosterID)
|
||||||
|
}
|
||||||
|
assert.Equal(t, countIndexerData(data, func(v *internal.IndexerData) bool {
|
||||||
|
return v.PosterID == 1
|
||||||
|
}), result.Total)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "AssigneeID",
|
||||||
|
SearchOptions: &internal.SearchOptions{
|
||||||
|
Paginator: &db.ListOptions{
|
||||||
|
PageSize: 5,
|
||||||
|
},
|
||||||
|
AssigneeID: func() *int64 {
|
||||||
|
id := int64(1)
|
||||||
|
return &id
|
||||||
|
}(),
|
||||||
|
},
|
||||||
|
Expected: func(t *testing.T, data map[int64]*internal.IndexerData, result *internal.SearchResult) {
|
||||||
|
assert.Equal(t, 5, len(result.Hits))
|
||||||
|
for _, v := range result.Hits {
|
||||||
|
assert.Equal(t, int64(1), data[v.ID].AssigneeID)
|
||||||
|
}
|
||||||
|
assert.Equal(t, countIndexerData(data, func(v *internal.IndexerData) bool {
|
||||||
|
return v.AssigneeID == 1
|
||||||
|
}), result.Total)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "no AssigneeID",
|
||||||
|
SearchOptions: &internal.SearchOptions{
|
||||||
|
Paginator: &db.ListOptions{
|
||||||
|
PageSize: 5,
|
||||||
|
},
|
||||||
|
AssigneeID: func() *int64 {
|
||||||
|
id := int64(0)
|
||||||
|
return &id
|
||||||
|
}(),
|
||||||
|
},
|
||||||
|
Expected: func(t *testing.T, data map[int64]*internal.IndexerData, result *internal.SearchResult) {
|
||||||
|
assert.Equal(t, 5, len(result.Hits))
|
||||||
|
for _, v := range result.Hits {
|
||||||
|
assert.Equal(t, int64(0), data[v.ID].AssigneeID)
|
||||||
|
}
|
||||||
|
assert.Equal(t, countIndexerData(data, func(v *internal.IndexerData) bool {
|
||||||
|
return v.AssigneeID == 0
|
||||||
|
}), result.Total)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "MentionID",
|
||||||
|
SearchOptions: &internal.SearchOptions{
|
||||||
|
Paginator: &db.ListOptions{
|
||||||
|
PageSize: 5,
|
||||||
|
},
|
||||||
|
MentionID: func() *int64 {
|
||||||
|
id := int64(1)
|
||||||
|
return &id
|
||||||
|
}(),
|
||||||
|
},
|
||||||
|
Expected: func(t *testing.T, data map[int64]*internal.IndexerData, result *internal.SearchResult) {
|
||||||
|
assert.Equal(t, 5, len(result.Hits))
|
||||||
|
for _, v := range result.Hits {
|
||||||
|
assert.Contains(t, data[v.ID].MentionIDs, int64(1))
|
||||||
|
}
|
||||||
|
assert.Equal(t, countIndexerData(data, func(v *internal.IndexerData) bool {
|
||||||
|
return util.SliceContains(v.MentionIDs, 1)
|
||||||
|
}), result.Total)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "ReviewedID",
|
||||||
|
SearchOptions: &internal.SearchOptions{
|
||||||
|
Paginator: &db.ListOptions{
|
||||||
|
PageSize: 5,
|
||||||
|
},
|
||||||
|
ReviewedID: func() *int64 {
|
||||||
|
id := int64(1)
|
||||||
|
return &id
|
||||||
|
}(),
|
||||||
|
},
|
||||||
|
Expected: func(t *testing.T, data map[int64]*internal.IndexerData, result *internal.SearchResult) {
|
||||||
|
assert.Equal(t, 5, len(result.Hits))
|
||||||
|
for _, v := range result.Hits {
|
||||||
|
assert.Contains(t, data[v.ID].ReviewedIDs, int64(1))
|
||||||
|
}
|
||||||
|
assert.Equal(t, countIndexerData(data, func(v *internal.IndexerData) bool {
|
||||||
|
return util.SliceContains(v.ReviewedIDs, 1)
|
||||||
|
}), result.Total)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "ReviewRequestedID",
|
||||||
|
SearchOptions: &internal.SearchOptions{
|
||||||
|
Paginator: &db.ListOptions{
|
||||||
|
PageSize: 5,
|
||||||
|
},
|
||||||
|
ReviewRequestedID: func() *int64 {
|
||||||
|
id := int64(1)
|
||||||
|
return &id
|
||||||
|
}(),
|
||||||
|
},
|
||||||
|
Expected: func(t *testing.T, data map[int64]*internal.IndexerData, result *internal.SearchResult) {
|
||||||
|
assert.Equal(t, 5, len(result.Hits))
|
||||||
|
for _, v := range result.Hits {
|
||||||
|
assert.Contains(t, data[v.ID].ReviewRequestedIDs, int64(1))
|
||||||
|
}
|
||||||
|
assert.Equal(t, countIndexerData(data, func(v *internal.IndexerData) bool {
|
||||||
|
return util.SliceContains(v.ReviewRequestedIDs, 1)
|
||||||
|
}), result.Total)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "SubscriberID",
|
||||||
|
SearchOptions: &internal.SearchOptions{
|
||||||
|
Paginator: &db.ListOptions{
|
||||||
|
PageSize: 5,
|
||||||
|
},
|
||||||
|
SubscriberID: func() *int64 {
|
||||||
|
id := int64(1)
|
||||||
|
return &id
|
||||||
|
}(),
|
||||||
|
},
|
||||||
|
Expected: func(t *testing.T, data map[int64]*internal.IndexerData, result *internal.SearchResult) {
|
||||||
|
assert.Equal(t, 5, len(result.Hits))
|
||||||
|
for _, v := range result.Hits {
|
||||||
|
assert.Contains(t, data[v.ID].SubscriberIDs, int64(1))
|
||||||
|
}
|
||||||
|
assert.Equal(t, countIndexerData(data, func(v *internal.IndexerData) bool {
|
||||||
|
return util.SliceContains(v.SubscriberIDs, 1)
|
||||||
|
}), result.Total)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "updated",
|
||||||
|
SearchOptions: &internal.SearchOptions{
|
||||||
|
Paginator: &db.ListOptions{
|
||||||
|
PageSize: 5,
|
||||||
|
},
|
||||||
|
UpdatedAfterUnix: func() *int64 {
|
||||||
|
var t int64 = 20
|
||||||
|
return &t
|
||||||
|
}(),
|
||||||
|
UpdatedBeforeUnix: func() *int64 {
|
||||||
|
var t int64 = 30
|
||||||
|
return &t
|
||||||
|
}(),
|
||||||
|
},
|
||||||
|
Expected: func(t *testing.T, data map[int64]*internal.IndexerData, result *internal.SearchResult) {
|
||||||
|
assert.Equal(t, 5, len(result.Hits))
|
||||||
|
for _, v := range result.Hits {
|
||||||
|
assert.GreaterOrEqual(t, data[v.ID].UpdatedUnix, int64(20))
|
||||||
|
assert.LessOrEqual(t, data[v.ID].UpdatedUnix, int64(30))
|
||||||
|
}
|
||||||
|
assert.Equal(t, countIndexerData(data, func(v *internal.IndexerData) bool {
|
||||||
|
return data[v.ID].UpdatedUnix >= 20 && data[v.ID].UpdatedUnix <= 30
|
||||||
|
}), result.Total)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "SortByCreatedDesc",
|
||||||
|
SearchOptions: &internal.SearchOptions{
|
||||||
|
Paginator: &db.ListOptions{
|
||||||
|
ListAll: true,
|
||||||
|
},
|
||||||
|
SortBy: internal.SortByCreatedDesc,
|
||||||
|
},
|
||||||
|
Expected: func(t *testing.T, data map[int64]*internal.IndexerData, result *internal.SearchResult) {
|
||||||
|
assert.Equal(t, len(data), len(result.Hits))
|
||||||
|
assert.Equal(t, len(data), int(result.Total))
|
||||||
|
for i, v := range result.Hits {
|
||||||
|
if i < len(result.Hits)-1 {
|
||||||
|
assert.GreaterOrEqual(t, data[v.ID].CreatedUnix, data[result.Hits[i+1].ID].CreatedUnix)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "SortByUpdatedDesc",
|
||||||
|
SearchOptions: &internal.SearchOptions{
|
||||||
|
Paginator: &db.ListOptions{
|
||||||
|
ListAll: true,
|
||||||
|
},
|
||||||
|
SortBy: internal.SortByUpdatedDesc,
|
||||||
|
},
|
||||||
|
Expected: func(t *testing.T, data map[int64]*internal.IndexerData, result *internal.SearchResult) {
|
||||||
|
assert.Equal(t, len(data), len(result.Hits))
|
||||||
|
assert.Equal(t, len(data), int(result.Total))
|
||||||
|
for i, v := range result.Hits {
|
||||||
|
if i < len(result.Hits)-1 {
|
||||||
|
assert.GreaterOrEqual(t, data[v.ID].UpdatedUnix, data[result.Hits[i+1].ID].UpdatedUnix)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "SortByCommentsDesc",
|
||||||
|
SearchOptions: &internal.SearchOptions{
|
||||||
|
Paginator: &db.ListOptions{
|
||||||
|
ListAll: true,
|
||||||
|
},
|
||||||
|
SortBy: internal.SortByCommentsDesc,
|
||||||
|
},
|
||||||
|
Expected: func(t *testing.T, data map[int64]*internal.IndexerData, result *internal.SearchResult) {
|
||||||
|
assert.Equal(t, len(data), len(result.Hits))
|
||||||
|
assert.Equal(t, len(data), int(result.Total))
|
||||||
|
for i, v := range result.Hits {
|
||||||
|
if i < len(result.Hits)-1 {
|
||||||
|
assert.GreaterOrEqual(t, data[v.ID].CommentCount, data[result.Hits[i+1].ID].CommentCount)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "SortByDeadlineDesc",
|
||||||
|
SearchOptions: &internal.SearchOptions{
|
||||||
|
Paginator: &db.ListOptions{
|
||||||
|
ListAll: true,
|
||||||
|
},
|
||||||
|
SortBy: internal.SortByDeadlineDesc,
|
||||||
|
},
|
||||||
|
Expected: func(t *testing.T, data map[int64]*internal.IndexerData, result *internal.SearchResult) {
|
||||||
|
assert.Equal(t, len(data), len(result.Hits))
|
||||||
|
assert.Equal(t, len(data), int(result.Total))
|
||||||
|
for i, v := range result.Hits {
|
||||||
|
if i < len(result.Hits)-1 {
|
||||||
|
assert.GreaterOrEqual(t, data[v.ID].DeadlineUnix, data[result.Hits[i+1].ID].DeadlineUnix)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "SortByCreatedAsc",
|
||||||
|
SearchOptions: &internal.SearchOptions{
|
||||||
|
Paginator: &db.ListOptions{
|
||||||
|
ListAll: true,
|
||||||
|
},
|
||||||
|
SortBy: internal.SortByCreatedAsc,
|
||||||
|
},
|
||||||
|
Expected: func(t *testing.T, data map[int64]*internal.IndexerData, result *internal.SearchResult) {
|
||||||
|
assert.Equal(t, len(data), len(result.Hits))
|
||||||
|
assert.Equal(t, len(data), int(result.Total))
|
||||||
|
for i, v := range result.Hits {
|
||||||
|
if i < len(result.Hits)-1 {
|
||||||
|
assert.LessOrEqual(t, data[v.ID].CreatedUnix, data[result.Hits[i+1].ID].CreatedUnix)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "SortByUpdatedAsc",
|
||||||
|
SearchOptions: &internal.SearchOptions{
|
||||||
|
Paginator: &db.ListOptions{
|
||||||
|
ListAll: true,
|
||||||
|
},
|
||||||
|
SortBy: internal.SortByUpdatedAsc,
|
||||||
|
},
|
||||||
|
Expected: func(t *testing.T, data map[int64]*internal.IndexerData, result *internal.SearchResult) {
|
||||||
|
assert.Equal(t, len(data), len(result.Hits))
|
||||||
|
assert.Equal(t, len(data), int(result.Total))
|
||||||
|
for i, v := range result.Hits {
|
||||||
|
if i < len(result.Hits)-1 {
|
||||||
|
assert.LessOrEqual(t, data[v.ID].UpdatedUnix, data[result.Hits[i+1].ID].UpdatedUnix)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "SortByCommentsAsc",
|
||||||
|
SearchOptions: &internal.SearchOptions{
|
||||||
|
Paginator: &db.ListOptions{
|
||||||
|
ListAll: true,
|
||||||
|
},
|
||||||
|
SortBy: internal.SortByCommentsAsc,
|
||||||
|
},
|
||||||
|
Expected: func(t *testing.T, data map[int64]*internal.IndexerData, result *internal.SearchResult) {
|
||||||
|
assert.Equal(t, len(data), len(result.Hits))
|
||||||
|
assert.Equal(t, len(data), int(result.Total))
|
||||||
|
for i, v := range result.Hits {
|
||||||
|
if i < len(result.Hits)-1 {
|
||||||
|
assert.LessOrEqual(t, data[v.ID].CommentCount, data[result.Hits[i+1].ID].CommentCount)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "SortByDeadlineAsc",
|
||||||
|
SearchOptions: &internal.SearchOptions{
|
||||||
|
Paginator: &db.ListOptions{
|
||||||
|
ListAll: true,
|
||||||
|
},
|
||||||
|
SortBy: internal.SortByDeadlineAsc,
|
||||||
|
},
|
||||||
|
Expected: func(t *testing.T, data map[int64]*internal.IndexerData, result *internal.SearchResult) {
|
||||||
|
assert.Equal(t, len(data), len(result.Hits))
|
||||||
|
assert.Equal(t, len(data), int(result.Total))
|
||||||
|
for i, v := range result.Hits {
|
||||||
|
if i < len(result.Hits)-1 {
|
||||||
|
assert.LessOrEqual(t, data[v.ID].DeadlineUnix, data[result.Hits[i+1].ID].DeadlineUnix)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
type testIndexerCase struct {
|
||||||
|
Name string
|
||||||
|
ExtraData []*internal.IndexerData
|
||||||
|
|
||||||
|
SearchOptions *internal.SearchOptions
|
||||||
|
|
||||||
|
Expected func(t *testing.T, data map[int64]*internal.IndexerData, result *internal.SearchResult) // if nil, use ExpectedIDs, ExpectedTotal
|
||||||
|
ExpectedIDs []int64
|
||||||
|
ExpectedTotal int64
|
||||||
|
}
|
||||||
|
|
||||||
|
func generateDefaultIndexerData() []*internal.IndexerData {
|
||||||
|
var id int64
|
||||||
|
var data []*internal.IndexerData
|
||||||
|
for repoID := int64(1); repoID <= 10; repoID++ {
|
||||||
|
for issueIndex := int64(1); issueIndex <= 20; issueIndex++ {
|
||||||
|
id++
|
||||||
|
|
||||||
|
comments := make([]string, id%4)
|
||||||
|
for i := range comments {
|
||||||
|
comments[i] = fmt.Sprintf("comment%d", i)
|
||||||
|
}
|
||||||
|
|
||||||
|
labelIDs := make([]int64, id%5)
|
||||||
|
for i := range labelIDs {
|
||||||
|
labelIDs[i] = int64(i) + 1 // LabelID should not be 0
|
||||||
|
}
|
||||||
|
mentionIDs := make([]int64, id%6)
|
||||||
|
for i := range mentionIDs {
|
||||||
|
mentionIDs[i] = int64(i) + 1 // MentionID should not be 0
|
||||||
|
}
|
||||||
|
reviewedIDs := make([]int64, id%7)
|
||||||
|
for i := range reviewedIDs {
|
||||||
|
reviewedIDs[i] = int64(i) + 1 // ReviewID should not be 0
|
||||||
|
}
|
||||||
|
reviewRequestedIDs := make([]int64, id%8)
|
||||||
|
for i := range reviewRequestedIDs {
|
||||||
|
reviewRequestedIDs[i] = int64(i) + 1 // ReviewRequestedID should not be 0
|
||||||
|
}
|
||||||
|
subscriberIDs := make([]int64, id%9)
|
||||||
|
for i := range subscriberIDs {
|
||||||
|
subscriberIDs[i] = int64(i) + 1 // SubscriberID should not be 0
|
||||||
|
}
|
||||||
|
|
||||||
|
data = append(data, &internal.IndexerData{
|
||||||
|
ID: id,
|
||||||
|
RepoID: repoID,
|
||||||
|
IsPublic: repoID%2 == 0,
|
||||||
|
Title: fmt.Sprintf("issue%d of repo%d", issueIndex, repoID),
|
||||||
|
Content: fmt.Sprintf("content%d", issueIndex),
|
||||||
|
Comments: comments,
|
||||||
|
IsPull: issueIndex%2 == 0,
|
||||||
|
IsClosed: issueIndex%3 == 0,
|
||||||
|
LabelIDs: labelIDs,
|
||||||
|
NoLabel: len(labelIDs) == 0,
|
||||||
|
MilestoneID: issueIndex % 4,
|
||||||
|
ProjectID: issueIndex % 5,
|
||||||
|
ProjectBoardID: issueIndex % 6,
|
||||||
|
PosterID: id%10 + 1, // PosterID should not be 0
|
||||||
|
AssigneeID: issueIndex % 10,
|
||||||
|
MentionIDs: mentionIDs,
|
||||||
|
ReviewedIDs: reviewedIDs,
|
||||||
|
ReviewRequestedIDs: reviewRequestedIDs,
|
||||||
|
SubscriberIDs: subscriberIDs,
|
||||||
|
UpdatedUnix: timeutil.TimeStamp(id + issueIndex),
|
||||||
|
CreatedUnix: timeutil.TimeStamp(id),
|
||||||
|
DeadlineUnix: timeutil.TimeStamp(id + issueIndex + repoID),
|
||||||
|
CommentCount: int64(len(comments)),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return data
|
||||||
|
}
|
||||||
|
|
||||||
|
func countIndexerData(data map[int64]*internal.IndexerData, f func(v *internal.IndexerData) bool) int64 {
|
||||||
|
var count int64
|
||||||
|
for _, v := range data {
|
||||||
|
if f(v) {
|
||||||
|
count++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return count
|
||||||
|
}
|
||||||
|
|
||||||
|
// waitData waits for the indexer to index all data.
|
||||||
|
// Some engines like Elasticsearch index data asynchronously, so we need to wait for a while.
|
||||||
|
func waitData(indexer internal.Indexer, total int64) error {
|
||||||
|
var actual int64
|
||||||
|
for i := 0; i < 100; i++ {
|
||||||
|
result, err := indexer.Search(context.Background(), &internal.SearchOptions{
|
||||||
|
Paginator: &db.ListOptions{
|
||||||
|
PageSize: 0,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
actual = result.Total
|
||||||
|
if actual == total {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
time.Sleep(100 * time.Millisecond)
|
||||||
|
}
|
||||||
|
return fmt.Errorf("waitData: expected %d, actual %d", total, actual)
|
||||||
|
}
|
|
@ -16,7 +16,10 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
issueIndexerLatestVersion = 1
|
issueIndexerLatestVersion = 2
|
||||||
|
|
||||||
|
// TODO: make this configurable if necessary
|
||||||
|
maxTotalHits = 10000
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ internal.Indexer = &Indexer{}
|
var _ internal.Indexer = &Indexer{}
|
||||||
|
@ -29,7 +32,53 @@ type Indexer struct {
|
||||||
|
|
||||||
// NewIndexer creates a new meilisearch indexer
|
// NewIndexer creates a new meilisearch indexer
|
||||||
func NewIndexer(url, apiKey, indexerName string) *Indexer {
|
func NewIndexer(url, apiKey, indexerName string) *Indexer {
|
||||||
inner := inner_meilisearch.NewIndexer(url, apiKey, indexerName, issueIndexerLatestVersion)
|
settings := &meilisearch.Settings{
|
||||||
|
// The default ranking rules of meilisearch are: ["words", "typo", "proximity", "attribute", "sort", "exactness"]
|
||||||
|
// So even if we specify the sort order, it could not be respected because the priority of "sort" is so low.
|
||||||
|
// So we need to specify the ranking rules to make sure the sort order is respected.
|
||||||
|
// See https://www.meilisearch.com/docs/learn/core_concepts/relevancy
|
||||||
|
RankingRules: []string{"sort", // make sure "sort" has the highest priority
|
||||||
|
"words", "typo", "proximity", "attribute", "exactness"},
|
||||||
|
|
||||||
|
SearchableAttributes: []string{
|
||||||
|
"title",
|
||||||
|
"content",
|
||||||
|
"comments",
|
||||||
|
},
|
||||||
|
DisplayedAttributes: []string{
|
||||||
|
"id",
|
||||||
|
},
|
||||||
|
FilterableAttributes: []string{
|
||||||
|
"repo_id",
|
||||||
|
"is_public",
|
||||||
|
"is_pull",
|
||||||
|
"is_closed",
|
||||||
|
"label_ids",
|
||||||
|
"no_label",
|
||||||
|
"milestone_id",
|
||||||
|
"project_id",
|
||||||
|
"project_board_id",
|
||||||
|
"poster_id",
|
||||||
|
"assignee_id",
|
||||||
|
"mention_ids",
|
||||||
|
"reviewed_ids",
|
||||||
|
"review_requested_ids",
|
||||||
|
"subscriber_ids",
|
||||||
|
"updated_unix",
|
||||||
|
},
|
||||||
|
SortableAttributes: []string{
|
||||||
|
"updated_unix",
|
||||||
|
"created_unix",
|
||||||
|
"deadline_unix",
|
||||||
|
"comment_count",
|
||||||
|
"id",
|
||||||
|
},
|
||||||
|
Pagination: &meilisearch.Pagination{
|
||||||
|
MaxTotalHits: maxTotalHits,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
inner := inner_meilisearch.NewIndexer(url, apiKey, indexerName, issueIndexerLatestVersion, settings)
|
||||||
indexer := &Indexer{
|
indexer := &Indexer{
|
||||||
inner: inner,
|
inner: inner,
|
||||||
Indexer: inner,
|
Indexer: inner,
|
||||||
|
@ -38,7 +87,7 @@ func NewIndexer(url, apiKey, indexerName string) *Indexer {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Index will save the index data
|
// Index will save the index data
|
||||||
func (b *Indexer) Index(_ context.Context, issues []*internal.IndexerData) error {
|
func (b *Indexer) Index(_ context.Context, issues ...*internal.IndexerData) error {
|
||||||
if len(issues) == 0 {
|
if len(issues) == 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -70,23 +119,102 @@ func (b *Indexer) Delete(_ context.Context, ids ...int64) error {
|
||||||
|
|
||||||
// Search searches for issues by given conditions.
|
// Search searches for issues by given conditions.
|
||||||
// Returns the matching issue IDs
|
// Returns the matching issue IDs
|
||||||
func (b *Indexer) Search(ctx context.Context, keyword string, repoIDs []int64, limit, start int, state string) (*internal.SearchResult, error) {
|
func (b *Indexer) Search(ctx context.Context, options *internal.SearchOptions) (*internal.SearchResult, error) {
|
||||||
repoFilters := make([]string, 0, len(repoIDs))
|
query := inner_meilisearch.FilterAnd{}
|
||||||
for _, repoID := range repoIDs {
|
|
||||||
repoFilters = append(repoFilters, "repo_id = "+strconv.FormatInt(repoID, 10))
|
if len(options.RepoIDs) > 0 {
|
||||||
|
q := &inner_meilisearch.FilterOr{}
|
||||||
|
q.Or(inner_meilisearch.NewFilterIn("repo_id", options.RepoIDs...))
|
||||||
|
if options.AllPublic {
|
||||||
|
q.Or(inner_meilisearch.NewFilterEq("is_public", true))
|
||||||
}
|
}
|
||||||
filter := strings.Join(repoFilters, " OR ")
|
query.And(q)
|
||||||
if state == "open" || state == "closed" {
|
}
|
||||||
if filter != "" {
|
|
||||||
filter = "(" + filter + ") AND state = " + state
|
if !options.IsPull.IsNone() {
|
||||||
|
query.And(inner_meilisearch.NewFilterEq("is_pull", options.IsPull.IsTrue()))
|
||||||
|
}
|
||||||
|
if !options.IsClosed.IsNone() {
|
||||||
|
query.And(inner_meilisearch.NewFilterEq("is_closed", options.IsClosed.IsTrue()))
|
||||||
|
}
|
||||||
|
|
||||||
|
if options.NoLabelOnly {
|
||||||
|
query.And(inner_meilisearch.NewFilterEq("no_label", true))
|
||||||
} else {
|
} else {
|
||||||
filter = "state = " + state
|
if len(options.IncludedLabelIDs) > 0 {
|
||||||
|
q := &inner_meilisearch.FilterAnd{}
|
||||||
|
for _, labelID := range options.IncludedLabelIDs {
|
||||||
|
q.And(inner_meilisearch.NewFilterEq("label_ids", labelID))
|
||||||
|
}
|
||||||
|
query.And(q)
|
||||||
|
} else if len(options.IncludedAnyLabelIDs) > 0 {
|
||||||
|
query.And(inner_meilisearch.NewFilterIn("label_ids", options.IncludedAnyLabelIDs...))
|
||||||
|
}
|
||||||
|
if len(options.ExcludedLabelIDs) > 0 {
|
||||||
|
q := &inner_meilisearch.FilterAnd{}
|
||||||
|
for _, labelID := range options.ExcludedLabelIDs {
|
||||||
|
q.And(inner_meilisearch.NewFilterNot(inner_meilisearch.NewFilterEq("label_ids", labelID)))
|
||||||
|
}
|
||||||
|
query.And(q)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
searchRes, err := b.inner.Client.Index(b.inner.VersionedIndexName()).Search(keyword, &meilisearch.SearchRequest{
|
|
||||||
Filter: filter,
|
if len(options.MilestoneIDs) > 0 {
|
||||||
|
query.And(inner_meilisearch.NewFilterIn("milestone_id", options.MilestoneIDs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
if options.ProjectID != nil {
|
||||||
|
query.And(inner_meilisearch.NewFilterEq("project_id", *options.ProjectID))
|
||||||
|
}
|
||||||
|
if options.ProjectBoardID != nil {
|
||||||
|
query.And(inner_meilisearch.NewFilterEq("project_board_id", *options.ProjectBoardID))
|
||||||
|
}
|
||||||
|
|
||||||
|
if options.PosterID != nil {
|
||||||
|
query.And(inner_meilisearch.NewFilterEq("poster_id", *options.PosterID))
|
||||||
|
}
|
||||||
|
|
||||||
|
if options.AssigneeID != nil {
|
||||||
|
query.And(inner_meilisearch.NewFilterEq("assignee_id", *options.AssigneeID))
|
||||||
|
}
|
||||||
|
|
||||||
|
if options.MentionID != nil {
|
||||||
|
query.And(inner_meilisearch.NewFilterEq("mention_ids", *options.MentionID))
|
||||||
|
}
|
||||||
|
|
||||||
|
if options.ReviewedID != nil {
|
||||||
|
query.And(inner_meilisearch.NewFilterEq("reviewed_ids", *options.ReviewedID))
|
||||||
|
}
|
||||||
|
if options.ReviewRequestedID != nil {
|
||||||
|
query.And(inner_meilisearch.NewFilterEq("review_requested_ids", *options.ReviewRequestedID))
|
||||||
|
}
|
||||||
|
|
||||||
|
if options.SubscriberID != nil {
|
||||||
|
query.And(inner_meilisearch.NewFilterEq("subscriber_ids", *options.SubscriberID))
|
||||||
|
}
|
||||||
|
|
||||||
|
if options.UpdatedAfterUnix != nil {
|
||||||
|
query.And(inner_meilisearch.NewFilterGte("updated_unix", *options.UpdatedAfterUnix))
|
||||||
|
}
|
||||||
|
if options.UpdatedBeforeUnix != nil {
|
||||||
|
query.And(inner_meilisearch.NewFilterLte("updated_unix", *options.UpdatedBeforeUnix))
|
||||||
|
}
|
||||||
|
|
||||||
|
if options.SortBy == "" {
|
||||||
|
options.SortBy = internal.SortByCreatedAsc
|
||||||
|
}
|
||||||
|
sortBy := []string{
|
||||||
|
parseSortBy(options.SortBy),
|
||||||
|
"id:desc",
|
||||||
|
}
|
||||||
|
|
||||||
|
skip, limit := indexer_internal.ParsePaginator(options.Paginator, maxTotalHits)
|
||||||
|
|
||||||
|
searchRes, err := b.inner.Client.Index(b.inner.VersionedIndexName()).Search(options.Keyword, &meilisearch.SearchRequest{
|
||||||
|
Filter: query.Statement(),
|
||||||
Limit: int64(limit),
|
Limit: int64(limit),
|
||||||
Offset: int64(start),
|
Offset: int64(skip),
|
||||||
|
Sort: sortBy,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -98,8 +226,17 @@ func (b *Indexer) Search(ctx context.Context, keyword string, repoIDs []int64, l
|
||||||
ID: int64(hit.(map[string]any)["id"].(float64)),
|
ID: int64(hit.(map[string]any)["id"].(float64)),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
return &internal.SearchResult{
|
return &internal.SearchResult{
|
||||||
Total: searchRes.TotalHits,
|
Total: searchRes.EstimatedTotalHits,
|
||||||
Hits: hits,
|
Hits: hits,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func parseSortBy(sortBy internal.SortBy) string {
|
||||||
|
field := strings.TrimPrefix(string(sortBy), "-")
|
||||||
|
if strings.HasPrefix(string(sortBy), "-") {
|
||||||
|
return field + ":desc"
|
||||||
|
}
|
||||||
|
return field + ":asc"
|
||||||
|
}
|
||||||
|
|
50
modules/indexer/issues/meilisearch/meilisearch_test.go
Normal file
50
modules/indexer/issues/meilisearch/meilisearch_test.go
Normal file
|
@ -0,0 +1,50 @@
|
||||||
|
// Copyright 2023 The Gitea Authors. All rights reserved.
|
||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
|
||||||
|
package meilisearch
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"code.gitea.io/gitea/modules/indexer/issues/internal/tests"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestMeilisearchIndexer(t *testing.T) {
|
||||||
|
// The meilisearch instance started by pull-db-tests.yml > test-unit > services > meilisearch
|
||||||
|
url := "http://meilisearch:7700"
|
||||||
|
key := "" // auth has been disabled in test environment
|
||||||
|
|
||||||
|
if os.Getenv("CI") == "" {
|
||||||
|
// Make it possible to run tests against a local meilisearch instance
|
||||||
|
url = os.Getenv("TEST_MEILISEARCH_URL")
|
||||||
|
if url == "" {
|
||||||
|
t.Skip("TEST_MEILISEARCH_URL not set and not running in CI")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
key = os.Getenv("TEST_MEILISEARCH_KEY")
|
||||||
|
}
|
||||||
|
|
||||||
|
ok := false
|
||||||
|
for i := 0; i < 60; i++ {
|
||||||
|
resp, err := http.Get(url)
|
||||||
|
if err == nil && resp.StatusCode == http.StatusOK {
|
||||||
|
ok = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
t.Logf("Waiting for meilisearch to be up: %v", err)
|
||||||
|
time.Sleep(time.Second)
|
||||||
|
}
|
||||||
|
if !ok {
|
||||||
|
t.Fatalf("Failed to wait for meilisearch to be up")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
indexer := NewIndexer(url, key, fmt.Sprintf("test_meilisearch_indexer_%d", time.Now().Unix()))
|
||||||
|
defer indexer.Close()
|
||||||
|
|
||||||
|
tests.TestIndexer(t, indexer)
|
||||||
|
}
|
173
modules/indexer/issues/util.go
Normal file
173
modules/indexer/issues/util.go
Normal file
|
@ -0,0 +1,173 @@
|
||||||
|
// Copyright 2023 The Gitea Authors. All rights reserved.
|
||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
|
||||||
|
package issues
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"code.gitea.io/gitea/models/db"
|
||||||
|
issue_model "code.gitea.io/gitea/models/issues"
|
||||||
|
"code.gitea.io/gitea/modules/container"
|
||||||
|
"code.gitea.io/gitea/modules/indexer/issues/internal"
|
||||||
|
"code.gitea.io/gitea/modules/log"
|
||||||
|
"code.gitea.io/gitea/modules/queue"
|
||||||
|
)
|
||||||
|
|
||||||
|
// getIssueIndexerData returns the indexer data of an issue and a bool value indicating whether the issue exists.
|
||||||
|
func getIssueIndexerData(ctx context.Context, issueID int64) (*internal.IndexerData, bool, error) {
|
||||||
|
issue, err := issue_model.GetIssueByID(ctx, issueID)
|
||||||
|
if err != nil {
|
||||||
|
if issue_model.IsErrIssueNotExist(err) {
|
||||||
|
return nil, false, nil
|
||||||
|
}
|
||||||
|
return nil, false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// FIXME: what if users want to search for a review comment of a pull request?
|
||||||
|
// The comment type is CommentTypeCode or CommentTypeReview.
|
||||||
|
// But LoadDiscussComments only loads CommentTypeComment.
|
||||||
|
if err := issue.LoadDiscussComments(ctx); err != nil {
|
||||||
|
return nil, false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
comments := make([]string, 0, len(issue.Comments))
|
||||||
|
for _, comment := range issue.Comments {
|
||||||
|
if comment.Content != "" {
|
||||||
|
// what ever the comment type is, index the content if it is not empty.
|
||||||
|
comments = append(comments, comment.Content)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := issue.LoadAttributes(ctx); err != nil {
|
||||||
|
return nil, false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
labels := make([]int64, 0, len(issue.Labels))
|
||||||
|
for _, label := range issue.Labels {
|
||||||
|
labels = append(labels, label.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
mentionIDs, err := issue_model.GetIssueMentionIDs(ctx, issueID)
|
||||||
|
if err != nil {
|
||||||
|
return nil, false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
reviewedIDs []int64
|
||||||
|
reviewRequestedIDs []int64
|
||||||
|
)
|
||||||
|
{
|
||||||
|
reviews, err := issue_model.FindReviews(ctx, issue_model.FindReviewOptions{
|
||||||
|
ListOptions: db.ListOptions{
|
||||||
|
ListAll: true,
|
||||||
|
},
|
||||||
|
IssueID: issueID,
|
||||||
|
OfficialOnly: false,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
reviewedIDsSet := make(container.Set[int64], len(reviews))
|
||||||
|
reviewRequestedIDsSet := make(container.Set[int64], len(reviews))
|
||||||
|
for _, review := range reviews {
|
||||||
|
if review.Type == issue_model.ReviewTypeRequest {
|
||||||
|
reviewRequestedIDsSet.Add(review.ReviewerID)
|
||||||
|
} else {
|
||||||
|
reviewedIDsSet.Add(review.ReviewerID)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
reviewedIDs = reviewedIDsSet.Values()
|
||||||
|
reviewRequestedIDs = reviewRequestedIDsSet.Values()
|
||||||
|
}
|
||||||
|
|
||||||
|
subscriberIDs, err := issue_model.GetIssueWatchersIDs(ctx, issue.ID, true)
|
||||||
|
if err != nil {
|
||||||
|
return nil, false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var projectID int64
|
||||||
|
if issue.Project != nil {
|
||||||
|
projectID = issue.Project.ID
|
||||||
|
}
|
||||||
|
|
||||||
|
return &internal.IndexerData{
|
||||||
|
ID: issue.ID,
|
||||||
|
RepoID: issue.RepoID,
|
||||||
|
IsPublic: !issue.Repo.IsPrivate,
|
||||||
|
Title: issue.Title,
|
||||||
|
Content: issue.Content,
|
||||||
|
Comments: comments,
|
||||||
|
IsPull: issue.IsPull,
|
||||||
|
IsClosed: issue.IsClosed,
|
||||||
|
LabelIDs: labels,
|
||||||
|
NoLabel: len(labels) == 0,
|
||||||
|
MilestoneID: issue.MilestoneID,
|
||||||
|
ProjectID: projectID,
|
||||||
|
ProjectBoardID: issue.ProjectBoardID(),
|
||||||
|
PosterID: issue.PosterID,
|
||||||
|
AssigneeID: issue.AssigneeID,
|
||||||
|
MentionIDs: mentionIDs,
|
||||||
|
ReviewedIDs: reviewedIDs,
|
||||||
|
ReviewRequestedIDs: reviewRequestedIDs,
|
||||||
|
SubscriberIDs: subscriberIDs,
|
||||||
|
UpdatedUnix: issue.UpdatedUnix,
|
||||||
|
CreatedUnix: issue.CreatedUnix,
|
||||||
|
DeadlineUnix: issue.DeadlineUnix,
|
||||||
|
CommentCount: int64(len(issue.Comments)),
|
||||||
|
}, true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func updateRepoIndexer(ctx context.Context, repoID int64) error {
|
||||||
|
ids, err := issue_model.GetIssueIDsByRepoID(ctx, repoID)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("issue_model.GetIssueIDsByRepoID: %w", err)
|
||||||
|
}
|
||||||
|
for _, id := range ids {
|
||||||
|
if err := updateIssueIndexer(id); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func updateIssueIndexer(issueID int64) error {
|
||||||
|
return pushIssueIndexerQueue(&IndexerMetadata{ID: issueID})
|
||||||
|
}
|
||||||
|
|
||||||
|
func deleteRepoIssueIndexer(ctx context.Context, repoID int64) error {
|
||||||
|
var ids []int64
|
||||||
|
ids, err := issue_model.GetIssueIDsByRepoID(ctx, repoID)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("issue_model.GetIssueIDsByRepoID: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(ids) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return pushIssueIndexerQueue(&IndexerMetadata{
|
||||||
|
IDs: ids,
|
||||||
|
IsDelete: true,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func pushIssueIndexerQueue(data *IndexerMetadata) error {
|
||||||
|
if issueIndexerQueue == nil {
|
||||||
|
// Some unit tests will trigger indexing, but the queue is not initialized.
|
||||||
|
// It's OK to ignore it, but log a warning message in case it's not a unit test.
|
||||||
|
log.Warn("Trying to push %+v to issue indexer queue, but the queue is not initialized, it's OK if it's a unit test", data)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
err := issueIndexerQueue.Push(data)
|
||||||
|
if errors.Is(err, queue.ErrAlreadyInQueue) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if errors.Is(err, context.DeadlineExceeded) {
|
||||||
|
log.Warn("It seems that issue indexer is slow and the queue is full. Please check the issue indexer or increase the queue size.")
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
|
@ -46,40 +46,22 @@ func (r *indexerNotifier) NotifyCreateIssueComment(ctx context.Context, doer *us
|
||||||
issue.Comments = append(issue.Comments, comment)
|
issue.Comments = append(issue.Comments, comment)
|
||||||
}
|
}
|
||||||
|
|
||||||
issue_indexer.UpdateIssueIndexer(issue)
|
issue_indexer.UpdateIssueIndexer(issue.ID)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *indexerNotifier) NotifyNewIssue(ctx context.Context, issue *issues_model.Issue, mentions []*user_model.User) {
|
func (r *indexerNotifier) NotifyNewIssue(ctx context.Context, issue *issues_model.Issue, mentions []*user_model.User) {
|
||||||
issue_indexer.UpdateIssueIndexer(issue)
|
issue_indexer.UpdateIssueIndexer(issue.ID)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *indexerNotifier) NotifyNewPullRequest(ctx context.Context, pr *issues_model.PullRequest, mentions []*user_model.User) {
|
func (r *indexerNotifier) NotifyNewPullRequest(ctx context.Context, pr *issues_model.PullRequest, mentions []*user_model.User) {
|
||||||
issue_indexer.UpdateIssueIndexer(pr.Issue)
|
issue_indexer.UpdateIssueIndexer(pr.Issue.ID)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *indexerNotifier) NotifyUpdateComment(ctx context.Context, doer *user_model.User, c *issues_model.Comment, oldContent string) {
|
func (r *indexerNotifier) NotifyUpdateComment(ctx context.Context, doer *user_model.User, c *issues_model.Comment, oldContent string) {
|
||||||
if c.Type == issues_model.CommentTypeComment {
|
// Whatever the comment type is, just update the issue indexer.
|
||||||
var found bool
|
// So that the issue indexer will be updated when Status/Assignee/Label and so on changed.
|
||||||
if c.Issue.Comments != nil {
|
issue_indexer.UpdateIssueIndexer(c.Issue.ID)
|
||||||
for i := 0; i < len(c.Issue.Comments); i++ {
|
|
||||||
if c.Issue.Comments[i].ID == c.ID {
|
|
||||||
c.Issue.Comments[i] = c
|
|
||||||
found = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if !found {
|
|
||||||
if err := c.Issue.LoadDiscussComments(ctx); err != nil {
|
|
||||||
log.Error("LoadDiscussComments failed: %v", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
issue_indexer.UpdateIssueIndexer(c.Issue)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *indexerNotifier) NotifyDeleteComment(ctx context.Context, doer *user_model.User, comment *issues_model.Comment) {
|
func (r *indexerNotifier) NotifyDeleteComment(ctx context.Context, doer *user_model.User, comment *issues_model.Comment) {
|
||||||
|
@ -107,19 +89,19 @@ func (r *indexerNotifier) NotifyDeleteComment(ctx context.Context, doer *user_mo
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// reload comments to delete the old comment
|
// reload comments to delete the old comment
|
||||||
issue_indexer.UpdateIssueIndexer(comment.Issue)
|
issue_indexer.UpdateIssueIndexer(comment.Issue.ID)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *indexerNotifier) NotifyDeleteRepository(ctx context.Context, doer *user_model.User, repo *repo_model.Repository) {
|
func (r *indexerNotifier) NotifyDeleteRepository(ctx context.Context, doer *user_model.User, repo *repo_model.Repository) {
|
||||||
issue_indexer.DeleteRepoIssueIndexer(ctx, repo)
|
issue_indexer.DeleteRepoIssueIndexer(ctx, repo.ID)
|
||||||
if setting.Indexer.RepoIndexerEnabled {
|
if setting.Indexer.RepoIndexerEnabled {
|
||||||
code_indexer.UpdateRepoIndexer(repo)
|
code_indexer.UpdateRepoIndexer(repo)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *indexerNotifier) NotifyMigrateRepository(ctx context.Context, doer, u *user_model.User, repo *repo_model.Repository) {
|
func (r *indexerNotifier) NotifyMigrateRepository(ctx context.Context, doer, u *user_model.User, repo *repo_model.Repository) {
|
||||||
issue_indexer.UpdateRepoIndexer(ctx, repo)
|
issue_indexer.UpdateRepoIndexer(ctx, repo.ID)
|
||||||
if setting.Indexer.RepoIndexerEnabled && !repo.IsEmpty {
|
if setting.Indexer.RepoIndexerEnabled && !repo.IsEmpty {
|
||||||
code_indexer.UpdateRepoIndexer(repo)
|
code_indexer.UpdateRepoIndexer(repo)
|
||||||
}
|
}
|
||||||
|
@ -155,13 +137,13 @@ func (r *indexerNotifier) NotifySyncPushCommits(ctx context.Context, pusher *use
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *indexerNotifier) NotifyIssueChangeContent(ctx context.Context, doer *user_model.User, issue *issues_model.Issue, oldContent string) {
|
func (r *indexerNotifier) NotifyIssueChangeContent(ctx context.Context, doer *user_model.User, issue *issues_model.Issue, oldContent string) {
|
||||||
issue_indexer.UpdateIssueIndexer(issue)
|
issue_indexer.UpdateIssueIndexer(issue.ID)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *indexerNotifier) NotifyIssueChangeTitle(ctx context.Context, doer *user_model.User, issue *issues_model.Issue, oldTitle string) {
|
func (r *indexerNotifier) NotifyIssueChangeTitle(ctx context.Context, doer *user_model.User, issue *issues_model.Issue, oldTitle string) {
|
||||||
issue_indexer.UpdateIssueIndexer(issue)
|
issue_indexer.UpdateIssueIndexer(issue.ID)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *indexerNotifier) NotifyIssueChangeRef(ctx context.Context, doer *user_model.User, issue *issues_model.Issue, oldRef string) {
|
func (r *indexerNotifier) NotifyIssueChangeRef(ctx context.Context, doer *user_model.User, issue *issues_model.Issue, oldRef string) {
|
||||||
issue_indexer.UpdateIssueIndexer(issue)
|
issue_indexer.UpdateIssueIndexer(issue.ID)
|
||||||
}
|
}
|
||||||
|
|
|
@ -23,6 +23,7 @@ import (
|
||||||
user_model "code.gitea.io/gitea/models/user"
|
user_model "code.gitea.io/gitea/models/user"
|
||||||
"code.gitea.io/gitea/models/webhook"
|
"code.gitea.io/gitea/models/webhook"
|
||||||
"code.gitea.io/gitea/modules/git"
|
"code.gitea.io/gitea/modules/git"
|
||||||
|
issue_indexer "code.gitea.io/gitea/modules/indexer/issues"
|
||||||
"code.gitea.io/gitea/modules/log"
|
"code.gitea.io/gitea/modules/log"
|
||||||
"code.gitea.io/gitea/modules/setting"
|
"code.gitea.io/gitea/modules/setting"
|
||||||
api "code.gitea.io/gitea/modules/structs"
|
api "code.gitea.io/gitea/modules/structs"
|
||||||
|
@ -418,6 +419,10 @@ func UpdateRepository(ctx context.Context, repo *repo_model.Repository, visibili
|
||||||
return fmt.Errorf("updateRepository[%d]: %w", forkRepos[i].ID, err)
|
return fmt.Errorf("updateRepository[%d]: %w", forkRepos[i].ID, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If visibility is changed, we need to update the issue indexer.
|
||||||
|
// Since the data in the issue indexer have field to indicate if the repo is public or not.
|
||||||
|
issue_indexer.UpdateRepoIndexer(ctx, repo.ID)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -132,6 +132,11 @@ func SearchIssues(ctx *context.APIContext) {
|
||||||
isClosed = util.OptionalBoolFalse
|
isClosed = util.OptionalBoolFalse
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
repoIDs []int64
|
||||||
|
allPublic bool
|
||||||
|
)
|
||||||
|
{
|
||||||
// find repos user can access (for issue search)
|
// find repos user can access (for issue search)
|
||||||
opts := &repo_model.SearchRepoOptions{
|
opts := &repo_model.SearchRepoOptions{
|
||||||
Private: false,
|
Private: false,
|
||||||
|
@ -179,27 +184,21 @@ func SearchIssues(ctx *context.APIContext) {
|
||||||
opts.TeamID = team.ID
|
opts.TeamID = team.ID
|
||||||
}
|
}
|
||||||
|
|
||||||
repoCond := repo_model.SearchRepositoryCondition(opts)
|
if opts.AllPublic {
|
||||||
repoIDs, _, err := repo_model.SearchRepositoryIDs(opts)
|
allPublic = true
|
||||||
|
opts.AllPublic = false // set it false to avoid returning too many repos, we could filter by indexer
|
||||||
|
}
|
||||||
|
repoIDs, _, err = repo_model.SearchRepositoryIDs(opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ctx.Error(http.StatusInternalServerError, "SearchRepositoryIDs", err)
|
ctx.Error(http.StatusInternalServerError, "SearchRepositoryIDs", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
}
|
||||||
var issues []*issues_model.Issue
|
|
||||||
var filteredCount int64
|
|
||||||
|
|
||||||
keyword := ctx.FormTrim("q")
|
keyword := ctx.FormTrim("q")
|
||||||
if strings.IndexByte(keyword, 0) >= 0 {
|
if strings.IndexByte(keyword, 0) >= 0 {
|
||||||
keyword = ""
|
keyword = ""
|
||||||
}
|
}
|
||||||
var issueIDs []int64
|
|
||||||
if len(keyword) > 0 && len(repoIDs) > 0 {
|
|
||||||
if issueIDs, err = issue_indexer.SearchIssuesByKeyword(ctx, repoIDs, keyword, ctx.FormString("state")); err != nil {
|
|
||||||
ctx.Error(http.StatusInternalServerError, "SearchIssuesByKeyword", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var isPull util.OptionalBool
|
var isPull util.OptionalBool
|
||||||
switch ctx.FormString("type") {
|
switch ctx.FormString("type") {
|
||||||
|
@ -211,16 +210,33 @@ func SearchIssues(ctx *context.APIContext) {
|
||||||
isPull = util.OptionalBoolNone
|
isPull = util.OptionalBoolNone
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var includedAnyLabels []int64
|
||||||
|
{
|
||||||
|
|
||||||
labels := ctx.FormTrim("labels")
|
labels := ctx.FormTrim("labels")
|
||||||
var includedLabelNames []string
|
var includedLabelNames []string
|
||||||
if len(labels) > 0 {
|
if len(labels) > 0 {
|
||||||
includedLabelNames = strings.Split(labels, ",")
|
includedLabelNames = strings.Split(labels, ",")
|
||||||
}
|
}
|
||||||
|
includedAnyLabels, err = issues_model.GetLabelIDsByNames(ctx, includedLabelNames)
|
||||||
|
if err != nil {
|
||||||
|
ctx.Error(http.StatusInternalServerError, "GetLabelIDsByNames", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var includedMilestones []int64
|
||||||
|
{
|
||||||
milestones := ctx.FormTrim("milestones")
|
milestones := ctx.FormTrim("milestones")
|
||||||
var includedMilestones []string
|
var includedMilestoneNames []string
|
||||||
if len(milestones) > 0 {
|
if len(milestones) > 0 {
|
||||||
includedMilestones = strings.Split(milestones, ",")
|
includedMilestoneNames = strings.Split(milestones, ",")
|
||||||
|
}
|
||||||
|
includedMilestones, err = issues_model.GetMilestoneIDsByNames(ctx, includedMilestoneNames)
|
||||||
|
if err != nil {
|
||||||
|
ctx.Error(http.StatusInternalServerError, "GetMilestoneIDsByNames", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// this api is also used in UI,
|
// this api is also used in UI,
|
||||||
|
@ -232,64 +248,64 @@ func SearchIssues(ctx *context.APIContext) {
|
||||||
limit = setting.API.MaxResponseItems
|
limit = setting.API.MaxResponseItems
|
||||||
}
|
}
|
||||||
|
|
||||||
// Only fetch the issues if we either don't have a keyword or the search returned issues
|
searchOpt := &issue_indexer.SearchOptions{
|
||||||
// This would otherwise return all issues if no issues were found by the search.
|
Paginator: &db.ListOptions{
|
||||||
if len(keyword) == 0 || len(issueIDs) > 0 || len(includedLabelNames) > 0 || len(includedMilestones) > 0 {
|
|
||||||
issuesOpt := &issues_model.IssuesOptions{
|
|
||||||
ListOptions: db.ListOptions{
|
|
||||||
Page: ctx.FormInt("page"),
|
|
||||||
PageSize: limit,
|
PageSize: limit,
|
||||||
|
Page: ctx.FormInt("page"),
|
||||||
},
|
},
|
||||||
RepoCond: repoCond,
|
Keyword: keyword,
|
||||||
IsClosed: isClosed,
|
RepoIDs: repoIDs,
|
||||||
IssueIDs: issueIDs,
|
AllPublic: allPublic,
|
||||||
IncludedLabelNames: includedLabelNames,
|
|
||||||
IncludeMilestones: includedMilestones,
|
|
||||||
SortType: "priorityrepo",
|
|
||||||
PriorityRepoID: ctx.FormInt64("priority_repo_id"),
|
|
||||||
IsPull: isPull,
|
IsPull: isPull,
|
||||||
UpdatedBeforeUnix: before,
|
IsClosed: isClosed,
|
||||||
UpdatedAfterUnix: since,
|
IncludedAnyLabelIDs: includedAnyLabels,
|
||||||
|
MilestoneIDs: includedMilestones,
|
||||||
|
SortBy: issue_indexer.SortByCreatedDesc,
|
||||||
|
}
|
||||||
|
|
||||||
|
if since != 0 {
|
||||||
|
searchOpt.UpdatedAfterUnix = &since
|
||||||
|
}
|
||||||
|
if before != 0 {
|
||||||
|
searchOpt.UpdatedBeforeUnix = &before
|
||||||
}
|
}
|
||||||
|
|
||||||
ctxUserID := int64(0)
|
|
||||||
if ctx.IsSigned {
|
if ctx.IsSigned {
|
||||||
ctxUserID = ctx.Doer.ID
|
ctxUserID := ctx.Doer.ID
|
||||||
}
|
|
||||||
|
|
||||||
// Filter for: Created by User, Assigned to User, Mentioning User, Review of User Requested
|
|
||||||
if ctx.FormBool("created") {
|
if ctx.FormBool("created") {
|
||||||
issuesOpt.PosterID = ctxUserID
|
searchOpt.PosterID = &ctxUserID
|
||||||
}
|
}
|
||||||
if ctx.FormBool("assigned") {
|
if ctx.FormBool("assigned") {
|
||||||
issuesOpt.AssigneeID = ctxUserID
|
searchOpt.AssigneeID = &ctxUserID
|
||||||
}
|
}
|
||||||
if ctx.FormBool("mentioned") {
|
if ctx.FormBool("mentioned") {
|
||||||
issuesOpt.MentionedID = ctxUserID
|
searchOpt.MentionID = &ctxUserID
|
||||||
}
|
}
|
||||||
if ctx.FormBool("review_requested") {
|
if ctx.FormBool("review_requested") {
|
||||||
issuesOpt.ReviewRequestedID = ctxUserID
|
searchOpt.ReviewRequestedID = &ctxUserID
|
||||||
}
|
}
|
||||||
if ctx.FormBool("reviewed") {
|
if ctx.FormBool("reviewed") {
|
||||||
issuesOpt.ReviewedID = ctxUserID
|
searchOpt.ReviewedID = &ctxUserID
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if issues, err = issues_model.Issues(ctx, issuesOpt); err != nil {
|
// FIXME: It's unsupported to sort by priority repo when searching by indexer,
|
||||||
ctx.Error(http.StatusInternalServerError, "Issues", err)
|
// it's indeed an regression, but I think it is worth to support filtering by indexer first.
|
||||||
|
_ = ctx.FormInt64("priority_repo_id")
|
||||||
|
|
||||||
|
ids, total, err := issue_indexer.SearchIssues(ctx, searchOpt)
|
||||||
|
if err != nil {
|
||||||
|
ctx.Error(http.StatusInternalServerError, "SearchIssues", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
issues, err := issues_model.GetIssuesByIDs(ctx, ids, true)
|
||||||
|
if err != nil {
|
||||||
|
ctx.Error(http.StatusInternalServerError, "FindIssuesByIDs", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
issuesOpt.ListOptions = db.ListOptions{
|
ctx.SetLinkHeader(int(total), limit)
|
||||||
Page: -1,
|
ctx.SetTotalCountHeader(total)
|
||||||
}
|
|
||||||
if filteredCount, err = issues_model.CountIssues(ctx, issuesOpt); err != nil {
|
|
||||||
ctx.Error(http.StatusInternalServerError, "CountIssues", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx.SetLinkHeader(int(filteredCount), limit)
|
|
||||||
ctx.SetTotalCountHeader(filteredCount)
|
|
||||||
ctx.JSON(http.StatusOK, convert.ToAPIIssueList(ctx, issues))
|
ctx.JSON(http.StatusOK, convert.ToAPIIssueList(ctx, issues))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -384,23 +400,12 @@ func ListIssues(ctx *context.APIContext) {
|
||||||
isClosed = util.OptionalBoolFalse
|
isClosed = util.OptionalBoolFalse
|
||||||
}
|
}
|
||||||
|
|
||||||
var issues []*issues_model.Issue
|
|
||||||
var filteredCount int64
|
|
||||||
|
|
||||||
keyword := ctx.FormTrim("q")
|
keyword := ctx.FormTrim("q")
|
||||||
if strings.IndexByte(keyword, 0) >= 0 {
|
if strings.IndexByte(keyword, 0) >= 0 {
|
||||||
keyword = ""
|
keyword = ""
|
||||||
}
|
}
|
||||||
var issueIDs []int64
|
|
||||||
var labelIDs []int64
|
|
||||||
if len(keyword) > 0 {
|
|
||||||
issueIDs, err = issue_indexer.SearchIssuesByKeyword(ctx, []int64{ctx.Repo.Repository.ID}, keyword, ctx.FormString("state"))
|
|
||||||
if err != nil {
|
|
||||||
ctx.Error(http.StatusInternalServerError, "SearchIssuesByKeyword", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
var labelIDs []int64
|
||||||
if splitted := strings.Split(ctx.FormString("labels"), ","); len(splitted) > 0 {
|
if splitted := strings.Split(ctx.FormString("labels"), ","); len(splitted) > 0 {
|
||||||
labelIDs, err = issues_model.GetLabelIDsInRepoByNames(ctx.Repo.Repository.ID, splitted)
|
labelIDs, err = issues_model.GetLabelIDsInRepoByNames(ctx.Repo.Repository.ID, splitted)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -465,40 +470,61 @@ func ListIssues(ctx *context.APIContext) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Only fetch the issues if we either don't have a keyword or the search returned issues
|
searchOpt := &issue_indexer.SearchOptions{
|
||||||
// This would otherwise return all issues if no issues were found by the search.
|
Paginator: &listOptions,
|
||||||
if len(keyword) == 0 || len(issueIDs) > 0 || len(labelIDs) > 0 {
|
Keyword: keyword,
|
||||||
issuesOpt := &issues_model.IssuesOptions{
|
|
||||||
ListOptions: listOptions,
|
|
||||||
RepoIDs: []int64{ctx.Repo.Repository.ID},
|
RepoIDs: []int64{ctx.Repo.Repository.ID},
|
||||||
IsClosed: isClosed,
|
|
||||||
IssueIDs: issueIDs,
|
|
||||||
LabelIDs: labelIDs,
|
|
||||||
MilestoneIDs: mileIDs,
|
|
||||||
IsPull: isPull,
|
IsPull: isPull,
|
||||||
UpdatedBeforeUnix: before,
|
IsClosed: isClosed,
|
||||||
UpdatedAfterUnix: since,
|
SortBy: issue_indexer.SortByCreatedDesc,
|
||||||
PosterID: createdByID,
|
}
|
||||||
AssigneeID: assignedByID,
|
if since != 0 {
|
||||||
MentionedID: mentionedByID,
|
searchOpt.UpdatedAfterUnix = &since
|
||||||
|
}
|
||||||
|
if before != 0 {
|
||||||
|
searchOpt.UpdatedBeforeUnix = &before
|
||||||
|
}
|
||||||
|
if len(labelIDs) == 1 && labelIDs[0] == 0 {
|
||||||
|
searchOpt.NoLabelOnly = true
|
||||||
|
} else {
|
||||||
|
for _, labelID := range labelIDs {
|
||||||
|
if labelID > 0 {
|
||||||
|
searchOpt.IncludedLabelIDs = append(searchOpt.IncludedLabelIDs, labelID)
|
||||||
|
} else {
|
||||||
|
searchOpt.ExcludedLabelIDs = append(searchOpt.ExcludedLabelIDs, -labelID)
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if issues, err = issues_model.Issues(ctx, issuesOpt); err != nil {
|
if len(mileIDs) == 1 && mileIDs[0] == db.NoConditionID {
|
||||||
ctx.Error(http.StatusInternalServerError, "Issues", err)
|
searchOpt.MilestoneIDs = []int64{0}
|
||||||
|
} else {
|
||||||
|
searchOpt.MilestoneIDs = mileIDs
|
||||||
|
}
|
||||||
|
|
||||||
|
if createdByID > 0 {
|
||||||
|
searchOpt.PosterID = &createdByID
|
||||||
|
}
|
||||||
|
if assignedByID > 0 {
|
||||||
|
searchOpt.AssigneeID = &assignedByID
|
||||||
|
}
|
||||||
|
if mentionedByID > 0 {
|
||||||
|
searchOpt.MentionID = &mentionedByID
|
||||||
|
}
|
||||||
|
|
||||||
|
ids, total, err := issue_indexer.SearchIssues(ctx, searchOpt)
|
||||||
|
if err != nil {
|
||||||
|
ctx.Error(http.StatusInternalServerError, "SearchIssues", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
issues, err := issues_model.GetIssuesByIDs(ctx, ids, true)
|
||||||
|
if err != nil {
|
||||||
|
ctx.Error(http.StatusInternalServerError, "FindIssuesByIDs", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
issuesOpt.ListOptions = db.ListOptions{
|
ctx.SetLinkHeader(int(total), listOptions.PageSize)
|
||||||
Page: -1,
|
ctx.SetTotalCountHeader(total)
|
||||||
}
|
|
||||||
if filteredCount, err = issues_model.CountIssues(ctx, issuesOpt); err != nil {
|
|
||||||
ctx.Error(http.StatusInternalServerError, "CountIssues", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx.SetLinkHeader(int(filteredCount), listOptions.PageSize)
|
|
||||||
ctx.SetTotalCountHeader(filteredCount)
|
|
||||||
ctx.JSON(http.StatusOK, convert.ToAPIIssueList(ctx, issues))
|
ctx.JSON(http.StatusOK, convert.ToAPIIssueList(ctx, issues))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -309,7 +309,7 @@ func prepareForReplaceOrAdd(ctx *context.APIContext, form api.IssueLabelsOption)
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
labels, err := issues_model.GetLabelsByIDs(form.Labels)
|
labels, err := issues_model.GetLabelsByIDs(form.Labels, "id", "repo_id", "org_id")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ctx.Error(http.StatusInternalServerError, "GetLabelsByIDs", err)
|
ctx.Error(http.StatusInternalServerError, "GetLabelsByIDs", err)
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
|
|
|
@ -150,7 +150,6 @@ func issues(ctx *context.Context, milestoneID, projectID int64, isPullOption uti
|
||||||
mentionedID int64
|
mentionedID int64
|
||||||
reviewRequestedID int64
|
reviewRequestedID int64
|
||||||
reviewedID int64
|
reviewedID int64
|
||||||
forceEmpty bool
|
|
||||||
)
|
)
|
||||||
|
|
||||||
if ctx.IsSigned {
|
if ctx.IsSigned {
|
||||||
|
@ -191,31 +190,14 @@ func issues(ctx *context.Context, milestoneID, projectID int64, isPullOption uti
|
||||||
keyword = ""
|
keyword = ""
|
||||||
}
|
}
|
||||||
|
|
||||||
var issueIDs []int64
|
|
||||||
if len(keyword) > 0 {
|
|
||||||
issueIDs, err = issue_indexer.SearchIssuesByKeyword(ctx, []int64{repo.ID}, keyword, ctx.FormString("state"))
|
|
||||||
if err != nil {
|
|
||||||
if issue_indexer.IsAvailable(ctx) {
|
|
||||||
ctx.ServerError("issueIndexer.Search", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
ctx.Data["IssueIndexerUnavailable"] = true
|
|
||||||
}
|
|
||||||
if len(issueIDs) == 0 {
|
|
||||||
forceEmpty = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var mileIDs []int64
|
var mileIDs []int64
|
||||||
if milestoneID > 0 || milestoneID == db.NoConditionID { // -1 to get those issues which have no any milestone assigned
|
if milestoneID > 0 || milestoneID == db.NoConditionID { // -1 to get those issues which have no any milestone assigned
|
||||||
mileIDs = []int64{milestoneID}
|
mileIDs = []int64{milestoneID}
|
||||||
}
|
}
|
||||||
|
|
||||||
var issueStats *issues_model.IssueStats
|
var issueStats *issues_model.IssueStats
|
||||||
if forceEmpty {
|
{
|
||||||
issueStats = &issues_model.IssueStats{}
|
statsOpts := &issues_model.IssuesOptions{
|
||||||
} else {
|
|
||||||
issueStats, err = issues_model.GetIssueStats(&issues_model.IssuesOptions{
|
|
||||||
RepoIDs: []int64{repo.ID},
|
RepoIDs: []int64{repo.ID},
|
||||||
LabelIDs: labelIDs,
|
LabelIDs: labelIDs,
|
||||||
MilestoneIDs: mileIDs,
|
MilestoneIDs: mileIDs,
|
||||||
|
@ -226,14 +208,36 @@ func issues(ctx *context.Context, milestoneID, projectID int64, isPullOption uti
|
||||||
ReviewRequestedID: reviewRequestedID,
|
ReviewRequestedID: reviewRequestedID,
|
||||||
ReviewedID: reviewedID,
|
ReviewedID: reviewedID,
|
||||||
IsPull: isPullOption,
|
IsPull: isPullOption,
|
||||||
IssueIDs: issueIDs,
|
IssueIDs: nil,
|
||||||
})
|
}
|
||||||
|
if keyword != "" {
|
||||||
|
allIssueIDs, err := issueIDsFromSearch(ctx, keyword, statsOpts)
|
||||||
|
if err != nil {
|
||||||
|
if issue_indexer.IsAvailable(ctx) {
|
||||||
|
ctx.ServerError("issueIDsFromSearch", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
ctx.Data["IssueIndexerUnavailable"] = true
|
||||||
|
return
|
||||||
|
}
|
||||||
|
statsOpts.IssueIDs = allIssueIDs
|
||||||
|
}
|
||||||
|
if keyword != "" && len(statsOpts.IssueIDs) == 0 {
|
||||||
|
// So it did search with the keyword, but no issue found.
|
||||||
|
// Just set issueStats to empty.
|
||||||
|
issueStats = &issues_model.IssueStats{}
|
||||||
|
} else {
|
||||||
|
// So it did search with the keyword, and found some issues. It needs to get issueStats of these issues.
|
||||||
|
// Or the keyword is empty, so it doesn't need issueIDs as filter, just get issueStats with statsOpts.
|
||||||
|
issueStats, err = issues_model.GetIssueStats(statsOpts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ctx.ServerError("GetIssueStats", err)
|
ctx.ServerError("GetIssueStats", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
isShowClosed := ctx.FormString("state") == "closed"
|
isShowClosed := ctx.FormString("state") == "closed"
|
||||||
// if open issues are zero and close don't, use closed as default
|
// if open issues are zero and close don't, use closed as default
|
||||||
if len(ctx.FormString("state")) == 0 && issueStats.OpenCount == 0 && issueStats.ClosedCount != 0 {
|
if len(ctx.FormString("state")) == 0 && issueStats.OpenCount == 0 && issueStats.ClosedCount != 0 {
|
||||||
|
@ -253,12 +257,10 @@ func issues(ctx *context.Context, milestoneID, projectID int64, isPullOption uti
|
||||||
}
|
}
|
||||||
pager := context.NewPagination(total, setting.UI.IssuePagingNum, page, 5)
|
pager := context.NewPagination(total, setting.UI.IssuePagingNum, page, 5)
|
||||||
|
|
||||||
var issues []*issues_model.Issue
|
var issues issues_model.IssueList
|
||||||
if forceEmpty {
|
{
|
||||||
issues = []*issues_model.Issue{}
|
ids, err := issueIDsFromSearch(ctx, keyword, &issues_model.IssuesOptions{
|
||||||
} else {
|
Paginator: &db.ListOptions{
|
||||||
issues, err = issues_model.Issues(ctx, &issues_model.IssuesOptions{
|
|
||||||
ListOptions: db.ListOptions{
|
|
||||||
Page: pager.Paginater.Current(),
|
Page: pager.Paginater.Current(),
|
||||||
PageSize: setting.UI.IssuePagingNum,
|
PageSize: setting.UI.IssuePagingNum,
|
||||||
},
|
},
|
||||||
|
@ -274,16 +276,23 @@ func issues(ctx *context.Context, milestoneID, projectID int64, isPullOption uti
|
||||||
IsPull: isPullOption,
|
IsPull: isPullOption,
|
||||||
LabelIDs: labelIDs,
|
LabelIDs: labelIDs,
|
||||||
SortType: sortType,
|
SortType: sortType,
|
||||||
IssueIDs: issueIDs,
|
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ctx.ServerError("Issues", err)
|
if issue_indexer.IsAvailable(ctx) {
|
||||||
|
ctx.ServerError("issueIDsFromSearch", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
ctx.Data["IssueIndexerUnavailable"] = true
|
||||||
|
return
|
||||||
|
}
|
||||||
|
issues, err = issues_model.GetIssuesByIDs(ctx, ids, true)
|
||||||
|
if err != nil {
|
||||||
|
ctx.ServerError("GetIssuesByIDs", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
issueList := issues_model.IssueList(issues)
|
approvalCounts, err := issues.GetApprovalCounts(ctx)
|
||||||
approvalCounts, err := issueList.GetApprovalCounts(ctx)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ctx.ServerError("ApprovalCounts", err)
|
ctx.ServerError("ApprovalCounts", err)
|
||||||
return
|
return
|
||||||
|
@ -306,6 +315,11 @@ func issues(ctx *context.Context, milestoneID, projectID int64, isPullOption uti
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if err := issues.LoadAttributes(ctx); err != nil {
|
||||||
|
ctx.ServerError("issues.LoadAttributes", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
ctx.Data["Issues"] = issues
|
ctx.Data["Issues"] = issues
|
||||||
ctx.Data["CommitLastStatus"] = lastStatus
|
ctx.Data["CommitLastStatus"] = lastStatus
|
||||||
ctx.Data["CommitStatuses"] = commitStatuses
|
ctx.Data["CommitStatuses"] = commitStatuses
|
||||||
|
@ -429,6 +443,14 @@ func issues(ctx *context.Context, milestoneID, projectID int64, isPullOption uti
|
||||||
ctx.Data["Page"] = pager
|
ctx.Data["Page"] = pager
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func issueIDsFromSearch(ctx *context.Context, keyword string, opts *issues_model.IssuesOptions) ([]int64, error) {
|
||||||
|
ids, _, err := issue_indexer.SearchIssues(ctx, issue_indexer.ToSearchOptions(keyword, opts))
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("SearchIssues: %w", err)
|
||||||
|
}
|
||||||
|
return ids, nil
|
||||||
|
}
|
||||||
|
|
||||||
// Issues render issues page
|
// Issues render issues page
|
||||||
func Issues(ctx *context.Context) {
|
func Issues(ctx *context.Context) {
|
||||||
isPullList := ctx.Params(":type") == "pulls"
|
isPullList := ctx.Params(":type") == "pulls"
|
||||||
|
@ -2419,6 +2441,11 @@ func SearchIssues(ctx *context.Context) {
|
||||||
isClosed = util.OptionalBoolFalse
|
isClosed = util.OptionalBoolFalse
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
repoIDs []int64
|
||||||
|
allPublic bool
|
||||||
|
)
|
||||||
|
{
|
||||||
// find repos user can access (for issue search)
|
// find repos user can access (for issue search)
|
||||||
opts := &repo_model.SearchRepoOptions{
|
opts := &repo_model.SearchRepoOptions{
|
||||||
Private: false,
|
Private: false,
|
||||||
|
@ -2466,27 +2493,21 @@ func SearchIssues(ctx *context.Context) {
|
||||||
opts.TeamID = team.ID
|
opts.TeamID = team.ID
|
||||||
}
|
}
|
||||||
|
|
||||||
repoCond := repo_model.SearchRepositoryCondition(opts)
|
if opts.AllPublic {
|
||||||
repoIDs, _, err := repo_model.SearchRepositoryIDs(opts)
|
allPublic = true
|
||||||
|
opts.AllPublic = false // set it false to avoid returning too many repos, we could filter by indexer
|
||||||
|
}
|
||||||
|
repoIDs, _, err = repo_model.SearchRepositoryIDs(opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ctx.Error(http.StatusInternalServerError, "SearchRepositoryIDs", err.Error())
|
ctx.Error(http.StatusInternalServerError, "SearchRepositoryIDs", err.Error())
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
}
|
||||||
var issues []*issues_model.Issue
|
|
||||||
var filteredCount int64
|
|
||||||
|
|
||||||
keyword := ctx.FormTrim("q")
|
keyword := ctx.FormTrim("q")
|
||||||
if strings.IndexByte(keyword, 0) >= 0 {
|
if strings.IndexByte(keyword, 0) >= 0 {
|
||||||
keyword = ""
|
keyword = ""
|
||||||
}
|
}
|
||||||
var issueIDs []int64
|
|
||||||
if len(keyword) > 0 && len(repoIDs) > 0 {
|
|
||||||
if issueIDs, err = issue_indexer.SearchIssuesByKeyword(ctx, repoIDs, keyword, ctx.FormString("state")); err != nil {
|
|
||||||
ctx.Error(http.StatusInternalServerError, "SearchIssuesByKeyword", err.Error())
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var isPull util.OptionalBool
|
var isPull util.OptionalBool
|
||||||
switch ctx.FormString("type") {
|
switch ctx.FormString("type") {
|
||||||
|
@ -2498,19 +2519,39 @@ func SearchIssues(ctx *context.Context) {
|
||||||
isPull = util.OptionalBoolNone
|
isPull = util.OptionalBoolNone
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var includedAnyLabels []int64
|
||||||
|
{
|
||||||
|
|
||||||
labels := ctx.FormTrim("labels")
|
labels := ctx.FormTrim("labels")
|
||||||
var includedLabelNames []string
|
var includedLabelNames []string
|
||||||
if len(labels) > 0 {
|
if len(labels) > 0 {
|
||||||
includedLabelNames = strings.Split(labels, ",")
|
includedLabelNames = strings.Split(labels, ",")
|
||||||
}
|
}
|
||||||
|
includedAnyLabels, err = issues_model.GetLabelIDsByNames(ctx, includedLabelNames)
|
||||||
milestones := ctx.FormTrim("milestones")
|
if err != nil {
|
||||||
var includedMilestones []string
|
ctx.Error(http.StatusInternalServerError, "GetLabelIDsByNames", err.Error())
|
||||||
if len(milestones) > 0 {
|
return
|
||||||
includedMilestones = strings.Split(milestones, ",")
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
projectID := ctx.FormInt64("project")
|
var includedMilestones []int64
|
||||||
|
{
|
||||||
|
milestones := ctx.FormTrim("milestones")
|
||||||
|
var includedMilestoneNames []string
|
||||||
|
if len(milestones) > 0 {
|
||||||
|
includedMilestoneNames = strings.Split(milestones, ",")
|
||||||
|
}
|
||||||
|
includedMilestones, err = issues_model.GetMilestoneIDsByNames(ctx, includedMilestoneNames)
|
||||||
|
if err != nil {
|
||||||
|
ctx.Error(http.StatusInternalServerError, "GetMilestoneIDsByNames", err.Error())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var projectID *int64
|
||||||
|
if v := ctx.FormInt64("project"); v > 0 {
|
||||||
|
projectID = &v
|
||||||
|
}
|
||||||
|
|
||||||
// this api is also used in UI,
|
// this api is also used in UI,
|
||||||
// so the default limit is set to fit UI needs
|
// so the default limit is set to fit UI needs
|
||||||
|
@ -2521,64 +2562,64 @@ func SearchIssues(ctx *context.Context) {
|
||||||
limit = setting.API.MaxResponseItems
|
limit = setting.API.MaxResponseItems
|
||||||
}
|
}
|
||||||
|
|
||||||
// Only fetch the issues if we either don't have a keyword or the search returned issues
|
searchOpt := &issue_indexer.SearchOptions{
|
||||||
// This would otherwise return all issues if no issues were found by the search.
|
Paginator: &db.ListOptions{
|
||||||
if len(keyword) == 0 || len(issueIDs) > 0 || len(includedLabelNames) > 0 || len(includedMilestones) > 0 {
|
|
||||||
issuesOpt := &issues_model.IssuesOptions{
|
|
||||||
ListOptions: db.ListOptions{
|
|
||||||
Page: ctx.FormInt("page"),
|
Page: ctx.FormInt("page"),
|
||||||
PageSize: limit,
|
PageSize: limit,
|
||||||
},
|
},
|
||||||
RepoCond: repoCond,
|
Keyword: keyword,
|
||||||
IsClosed: isClosed,
|
RepoIDs: repoIDs,
|
||||||
IssueIDs: issueIDs,
|
AllPublic: allPublic,
|
||||||
IncludedLabelNames: includedLabelNames,
|
|
||||||
IncludeMilestones: includedMilestones,
|
|
||||||
ProjectID: projectID,
|
|
||||||
SortType: "priorityrepo",
|
|
||||||
PriorityRepoID: ctx.FormInt64("priority_repo_id"),
|
|
||||||
IsPull: isPull,
|
IsPull: isPull,
|
||||||
UpdatedBeforeUnix: before,
|
IsClosed: isClosed,
|
||||||
UpdatedAfterUnix: since,
|
IncludedAnyLabelIDs: includedAnyLabels,
|
||||||
|
MilestoneIDs: includedMilestones,
|
||||||
|
ProjectID: projectID,
|
||||||
|
SortBy: issue_indexer.SortByCreatedDesc,
|
||||||
|
}
|
||||||
|
|
||||||
|
if since != 0 {
|
||||||
|
searchOpt.UpdatedAfterUnix = &since
|
||||||
|
}
|
||||||
|
if before != 0 {
|
||||||
|
searchOpt.UpdatedBeforeUnix = &before
|
||||||
}
|
}
|
||||||
|
|
||||||
ctxUserID := int64(0)
|
|
||||||
if ctx.IsSigned {
|
if ctx.IsSigned {
|
||||||
ctxUserID = ctx.Doer.ID
|
ctxUserID := ctx.Doer.ID
|
||||||
}
|
|
||||||
|
|
||||||
// Filter for: Created by User, Assigned to User, Mentioning User, Review of User Requested
|
|
||||||
if ctx.FormBool("created") {
|
if ctx.FormBool("created") {
|
||||||
issuesOpt.PosterID = ctxUserID
|
searchOpt.PosterID = &ctxUserID
|
||||||
}
|
}
|
||||||
if ctx.FormBool("assigned") {
|
if ctx.FormBool("assigned") {
|
||||||
issuesOpt.AssigneeID = ctxUserID
|
searchOpt.AssigneeID = &ctxUserID
|
||||||
}
|
}
|
||||||
if ctx.FormBool("mentioned") {
|
if ctx.FormBool("mentioned") {
|
||||||
issuesOpt.MentionedID = ctxUserID
|
searchOpt.MentionID = &ctxUserID
|
||||||
}
|
}
|
||||||
if ctx.FormBool("review_requested") {
|
if ctx.FormBool("review_requested") {
|
||||||
issuesOpt.ReviewRequestedID = ctxUserID
|
searchOpt.ReviewRequestedID = &ctxUserID
|
||||||
}
|
}
|
||||||
if ctx.FormBool("reviewed") {
|
if ctx.FormBool("reviewed") {
|
||||||
issuesOpt.ReviewedID = ctxUserID
|
searchOpt.ReviewedID = &ctxUserID
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if issues, err = issues_model.Issues(ctx, issuesOpt); err != nil {
|
// FIXME: It's unsupported to sort by priority repo when searching by indexer,
|
||||||
ctx.Error(http.StatusInternalServerError, "Issues", err.Error())
|
// it's indeed an regression, but I think it is worth to support filtering by indexer first.
|
||||||
|
_ = ctx.FormInt64("priority_repo_id")
|
||||||
|
|
||||||
|
ids, total, err := issue_indexer.SearchIssues(ctx, searchOpt)
|
||||||
|
if err != nil {
|
||||||
|
ctx.Error(http.StatusInternalServerError, "SearchIssues", err.Error())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
issues, err := issues_model.GetIssuesByIDs(ctx, ids, true)
|
||||||
|
if err != nil {
|
||||||
|
ctx.Error(http.StatusInternalServerError, "FindIssuesByIDs", err.Error())
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
issuesOpt.ListOptions = db.ListOptions{
|
ctx.SetTotalCountHeader(total)
|
||||||
Page: -1,
|
|
||||||
}
|
|
||||||
if filteredCount, err = issues_model.CountIssues(ctx, issuesOpt); err != nil {
|
|
||||||
ctx.Error(http.StatusInternalServerError, "CountIssues", err.Error())
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx.SetTotalCountHeader(filteredCount)
|
|
||||||
ctx.JSON(http.StatusOK, convert.ToIssueList(ctx, issues))
|
ctx.JSON(http.StatusOK, convert.ToIssueList(ctx, issues))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2620,23 +2661,12 @@ func ListIssues(ctx *context.Context) {
|
||||||
isClosed = util.OptionalBoolFalse
|
isClosed = util.OptionalBoolFalse
|
||||||
}
|
}
|
||||||
|
|
||||||
var issues []*issues_model.Issue
|
|
||||||
var filteredCount int64
|
|
||||||
|
|
||||||
keyword := ctx.FormTrim("q")
|
keyword := ctx.FormTrim("q")
|
||||||
if strings.IndexByte(keyword, 0) >= 0 {
|
if strings.IndexByte(keyword, 0) >= 0 {
|
||||||
keyword = ""
|
keyword = ""
|
||||||
}
|
}
|
||||||
var issueIDs []int64
|
|
||||||
var labelIDs []int64
|
|
||||||
if len(keyword) > 0 {
|
|
||||||
issueIDs, err = issue_indexer.SearchIssuesByKeyword(ctx, []int64{ctx.Repo.Repository.ID}, keyword, ctx.FormString("state"))
|
|
||||||
if err != nil {
|
|
||||||
ctx.Error(http.StatusInternalServerError, err.Error())
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
var labelIDs []int64
|
||||||
if splitted := strings.Split(ctx.FormString("labels"), ","); len(splitted) > 0 {
|
if splitted := strings.Split(ctx.FormString("labels"), ","); len(splitted) > 0 {
|
||||||
labelIDs, err = issues_model.GetLabelIDsInRepoByNames(ctx.Repo.Repository.ID, splitted)
|
labelIDs, err = issues_model.GetLabelIDsInRepoByNames(ctx.Repo.Repository.ID, splitted)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -2675,11 +2705,9 @@ func ListIssues(ctx *context.Context) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
projectID := ctx.FormInt64("project")
|
var projectID *int64
|
||||||
|
if v := ctx.FormInt64("project"); v > 0 {
|
||||||
listOptions := db.ListOptions{
|
projectID = &v
|
||||||
Page: ctx.FormInt("page"),
|
|
||||||
PageSize: convert.ToCorrectPageSize(ctx.FormInt("limit")),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var isPull util.OptionalBool
|
var isPull util.OptionalBool
|
||||||
|
@ -2706,40 +2734,64 @@ func ListIssues(ctx *context.Context) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Only fetch the issues if we either don't have a keyword or the search returned issues
|
searchOpt := &issue_indexer.SearchOptions{
|
||||||
// This would otherwise return all issues if no issues were found by the search.
|
Paginator: &db.ListOptions{
|
||||||
if len(keyword) == 0 || len(issueIDs) > 0 || len(labelIDs) > 0 {
|
Page: ctx.FormInt("page"),
|
||||||
issuesOpt := &issues_model.IssuesOptions{
|
PageSize: convert.ToCorrectPageSize(ctx.FormInt("limit")),
|
||||||
ListOptions: listOptions,
|
},
|
||||||
|
Keyword: keyword,
|
||||||
RepoIDs: []int64{ctx.Repo.Repository.ID},
|
RepoIDs: []int64{ctx.Repo.Repository.ID},
|
||||||
IsClosed: isClosed,
|
|
||||||
IssueIDs: issueIDs,
|
|
||||||
LabelIDs: labelIDs,
|
|
||||||
MilestoneIDs: mileIDs,
|
|
||||||
ProjectID: projectID,
|
|
||||||
IsPull: isPull,
|
IsPull: isPull,
|
||||||
UpdatedBeforeUnix: before,
|
IsClosed: isClosed,
|
||||||
UpdatedAfterUnix: since,
|
ProjectBoardID: projectID,
|
||||||
PosterID: createdByID,
|
SortBy: issue_indexer.SortByCreatedDesc,
|
||||||
AssigneeID: assignedByID,
|
}
|
||||||
MentionedID: mentionedByID,
|
if since != 0 {
|
||||||
|
searchOpt.UpdatedAfterUnix = &since
|
||||||
|
}
|
||||||
|
if before != 0 {
|
||||||
|
searchOpt.UpdatedBeforeUnix = &before
|
||||||
|
}
|
||||||
|
if len(labelIDs) == 1 && labelIDs[0] == 0 {
|
||||||
|
searchOpt.NoLabelOnly = true
|
||||||
|
} else {
|
||||||
|
for _, labelID := range labelIDs {
|
||||||
|
if labelID > 0 {
|
||||||
|
searchOpt.IncludedLabelIDs = append(searchOpt.IncludedLabelIDs, labelID)
|
||||||
|
} else {
|
||||||
|
searchOpt.ExcludedLabelIDs = append(searchOpt.ExcludedLabelIDs, -labelID)
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if issues, err = issues_model.Issues(ctx, issuesOpt); err != nil {
|
if len(mileIDs) == 1 && mileIDs[0] == db.NoConditionID {
|
||||||
ctx.Error(http.StatusInternalServerError, err.Error())
|
searchOpt.MilestoneIDs = []int64{0}
|
||||||
|
} else {
|
||||||
|
searchOpt.MilestoneIDs = mileIDs
|
||||||
|
}
|
||||||
|
|
||||||
|
if createdByID > 0 {
|
||||||
|
searchOpt.PosterID = &createdByID
|
||||||
|
}
|
||||||
|
if assignedByID > 0 {
|
||||||
|
searchOpt.AssigneeID = &assignedByID
|
||||||
|
}
|
||||||
|
if mentionedByID > 0 {
|
||||||
|
searchOpt.MentionID = &mentionedByID
|
||||||
|
}
|
||||||
|
|
||||||
|
ids, total, err := issue_indexer.SearchIssues(ctx, searchOpt)
|
||||||
|
if err != nil {
|
||||||
|
ctx.Error(http.StatusInternalServerError, "SearchIssues", err.Error())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
issues, err := issues_model.GetIssuesByIDs(ctx, ids, true)
|
||||||
|
if err != nil {
|
||||||
|
ctx.Error(http.StatusInternalServerError, "FindIssuesByIDs", err.Error())
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
issuesOpt.ListOptions = db.ListOptions{
|
ctx.SetTotalCountHeader(total)
|
||||||
Page: -1,
|
|
||||||
}
|
|
||||||
if filteredCount, err = issues_model.CountIssues(ctx, issuesOpt); err != nil {
|
|
||||||
ctx.Error(http.StatusInternalServerError, err.Error())
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx.SetTotalCountHeader(filteredCount)
|
|
||||||
ctx.JSON(http.StatusOK, convert.ToIssueList(ctx, issues))
|
ctx.JSON(http.StatusOK, convert.ToIssueList(ctx, issues))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -22,6 +22,7 @@ import (
|
||||||
"code.gitea.io/gitea/models/unit"
|
"code.gitea.io/gitea/models/unit"
|
||||||
user_model "code.gitea.io/gitea/models/user"
|
user_model "code.gitea.io/gitea/models/user"
|
||||||
"code.gitea.io/gitea/modules/base"
|
"code.gitea.io/gitea/modules/base"
|
||||||
|
"code.gitea.io/gitea/modules/container"
|
||||||
"code.gitea.io/gitea/modules/context"
|
"code.gitea.io/gitea/modules/context"
|
||||||
issue_indexer "code.gitea.io/gitea/modules/indexer/issues"
|
issue_indexer "code.gitea.io/gitea/modules/indexer/issues"
|
||||||
"code.gitea.io/gitea/modules/json"
|
"code.gitea.io/gitea/modules/json"
|
||||||
|
@ -466,21 +467,16 @@ func buildIssueOverview(ctx *context.Context, unitType unit.Type) {
|
||||||
keyword := strings.Trim(ctx.FormString("q"), " ")
|
keyword := strings.Trim(ctx.FormString("q"), " ")
|
||||||
ctx.Data["Keyword"] = keyword
|
ctx.Data["Keyword"] = keyword
|
||||||
|
|
||||||
// Execute keyword search for issues.
|
accessibleRepos := container.Set[int64]{}
|
||||||
// USING NON-FINAL STATE OF opts FOR A QUERY.
|
{
|
||||||
issueIDsFromSearch, err := issueIDsFromSearch(ctx, ctxUser, keyword, opts)
|
ids, err := issues_model.GetRepoIDsForIssuesOptions(opts, ctxUser)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ctx.ServerError("issueIDsFromSearch", err)
|
ctx.ServerError("GetRepoIDsForIssuesOptions", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
for _, id := range ids {
|
||||||
// Ensure no issues are returned if a keyword was provided that didn't match any issues.
|
accessibleRepos.Add(id)
|
||||||
var forceEmpty bool
|
}
|
||||||
|
|
||||||
if len(issueIDsFromSearch) > 0 {
|
|
||||||
opts.IssueIDs = issueIDsFromSearch
|
|
||||||
} else if len(keyword) > 0 {
|
|
||||||
forceEmpty = true
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Educated guess: Do or don't show closed issues.
|
// Educated guess: Do or don't show closed issues.
|
||||||
|
@ -490,12 +486,21 @@ func buildIssueOverview(ctx *context.Context, unitType unit.Type) {
|
||||||
// Filter repos and count issues in them. Count will be used later.
|
// Filter repos and count issues in them. Count will be used later.
|
||||||
// USING NON-FINAL STATE OF opts FOR A QUERY.
|
// USING NON-FINAL STATE OF opts FOR A QUERY.
|
||||||
var issueCountByRepo map[int64]int64
|
var issueCountByRepo map[int64]int64
|
||||||
if !forceEmpty {
|
{
|
||||||
|
issueIDs, err := issueIDsFromSearch(ctx, keyword, opts)
|
||||||
|
if err != nil {
|
||||||
|
ctx.ServerError("issueIDsFromSearch", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if len(issueIDs) > 0 { // else, no issues found, just leave issueCountByRepo empty
|
||||||
|
opts.IssueIDs = issueIDs
|
||||||
issueCountByRepo, err = issues_model.CountIssuesByRepo(ctx, opts)
|
issueCountByRepo, err = issues_model.CountIssuesByRepo(ctx, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ctx.ServerError("CountIssuesByRepo", err)
|
ctx.ServerError("CountIssuesByRepo", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
opts.IssueIDs = nil // reset, the opts will be used later
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Make sure page number is at least 1. Will be posted to ctx.Data.
|
// Make sure page number is at least 1. Will be posted to ctx.Data.
|
||||||
|
@ -503,14 +508,17 @@ func buildIssueOverview(ctx *context.Context, unitType unit.Type) {
|
||||||
if page <= 1 {
|
if page <= 1 {
|
||||||
page = 1
|
page = 1
|
||||||
}
|
}
|
||||||
opts.Page = page
|
opts.Paginator = &db.ListOptions{
|
||||||
opts.PageSize = setting.UI.IssuePagingNum
|
Page: page,
|
||||||
|
PageSize: setting.UI.IssuePagingNum,
|
||||||
|
}
|
||||||
|
|
||||||
// Get IDs for labels (a filter option for issues/pulls).
|
// Get IDs for labels (a filter option for issues/pulls).
|
||||||
// Required for IssuesOptions.
|
// Required for IssuesOptions.
|
||||||
var labelIDs []int64
|
var labelIDs []int64
|
||||||
selectedLabels := ctx.FormString("labels")
|
selectedLabels := ctx.FormString("labels")
|
||||||
if len(selectedLabels) > 0 && selectedLabels != "0" {
|
if len(selectedLabels) > 0 && selectedLabels != "0" {
|
||||||
|
var err error
|
||||||
labelIDs, err = base.StringsToInt64s(strings.Split(selectedLabels, ","))
|
labelIDs, err = base.StringsToInt64s(strings.Split(selectedLabels, ","))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ctx.ServerError("StringsToInt64s", err)
|
ctx.ServerError("StringsToInt64s", err)
|
||||||
|
@ -521,7 +529,16 @@ func buildIssueOverview(ctx *context.Context, unitType unit.Type) {
|
||||||
|
|
||||||
// Parse ctx.FormString("repos") and remember matched repo IDs for later.
|
// Parse ctx.FormString("repos") and remember matched repo IDs for later.
|
||||||
// Gets set when clicking filters on the issues overview page.
|
// Gets set when clicking filters on the issues overview page.
|
||||||
opts.RepoIDs = getRepoIDs(ctx.FormString("repos"))
|
repoIDs := getRepoIDs(ctx.FormString("repos"))
|
||||||
|
if len(repoIDs) == 0 {
|
||||||
|
repoIDs = accessibleRepos.Values()
|
||||||
|
} else {
|
||||||
|
// Remove repo IDs that are not accessible to the user.
|
||||||
|
repoIDs = util.SliceRemoveAllFunc(repoIDs, func(v int64) bool {
|
||||||
|
return !accessibleRepos.Contains(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
opts.RepoIDs = repoIDs
|
||||||
|
|
||||||
// ------------------------------
|
// ------------------------------
|
||||||
// Get issues as defined by opts.
|
// Get issues as defined by opts.
|
||||||
|
@ -529,15 +546,18 @@ func buildIssueOverview(ctx *context.Context, unitType unit.Type) {
|
||||||
|
|
||||||
// Slice of Issues that will be displayed on the overview page
|
// Slice of Issues that will be displayed on the overview page
|
||||||
// USING FINAL STATE OF opts FOR A QUERY.
|
// USING FINAL STATE OF opts FOR A QUERY.
|
||||||
var issues []*issues_model.Issue
|
var issues issues_model.IssueList
|
||||||
if !forceEmpty {
|
{
|
||||||
issues, err = issues_model.Issues(ctx, opts)
|
issueIDs, err := issueIDsFromSearch(ctx, keyword, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ctx.ServerError("Issues", err)
|
ctx.ServerError("issueIDsFromSearch", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
issues, err = issues_model.GetIssuesByIDs(ctx, issueIDs, true)
|
||||||
|
if err != nil {
|
||||||
|
ctx.ServerError("GetIssuesByIDs", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
issues = []*issues_model.Issue{}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ----------------------------------
|
// ----------------------------------
|
||||||
|
@ -576,12 +596,12 @@ func buildIssueOverview(ctx *context.Context, unitType unit.Type) {
|
||||||
// Fill stats to post to ctx.Data.
|
// Fill stats to post to ctx.Data.
|
||||||
// -------------------------------
|
// -------------------------------
|
||||||
var issueStats *issues_model.IssueStats
|
var issueStats *issues_model.IssueStats
|
||||||
if !forceEmpty {
|
{
|
||||||
statsOpts := issues_model.IssuesOptions{
|
statsOpts := issues_model.IssuesOptions{
|
||||||
User: ctx.Doer,
|
User: ctx.Doer,
|
||||||
IsPull: util.OptionalBoolOf(isPullList),
|
IsPull: util.OptionalBoolOf(isPullList),
|
||||||
IsClosed: util.OptionalBoolOf(isShowClosed),
|
IsClosed: util.OptionalBoolOf(isShowClosed),
|
||||||
IssueIDs: issueIDsFromSearch,
|
IssueIDs: nil,
|
||||||
IsArchived: util.OptionalBoolFalse,
|
IsArchived: util.OptionalBoolFalse,
|
||||||
LabelIDs: opts.LabelIDs,
|
LabelIDs: opts.LabelIDs,
|
||||||
Org: org,
|
Org: org,
|
||||||
|
@ -589,13 +609,29 @@ func buildIssueOverview(ctx *context.Context, unitType unit.Type) {
|
||||||
RepoCond: opts.RepoCond,
|
RepoCond: opts.RepoCond,
|
||||||
}
|
}
|
||||||
|
|
||||||
issueStats, err = issues_model.GetUserIssueStats(filterMode, statsOpts)
|
if keyword != "" {
|
||||||
|
statsOpts.RepoIDs = opts.RepoIDs
|
||||||
|
allIssueIDs, err := issueIDsFromSearch(ctx, keyword, &statsOpts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ctx.ServerError("GetUserIssueStats Shown", err)
|
ctx.ServerError("issueIDsFromSearch", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
} else {
|
statsOpts.IssueIDs = allIssueIDs
|
||||||
|
}
|
||||||
|
|
||||||
|
if keyword != "" && len(statsOpts.IssueIDs) == 0 {
|
||||||
|
// So it did search with the keyword, but no issue found.
|
||||||
|
// Just set issueStats to empty.
|
||||||
issueStats = &issues_model.IssueStats{}
|
issueStats = &issues_model.IssueStats{}
|
||||||
|
} else {
|
||||||
|
// So it did search with the keyword, and found some issues. It needs to get issueStats of these issues.
|
||||||
|
// Or the keyword is empty, so it doesn't need issueIDs as filter, just get issueStats with statsOpts.
|
||||||
|
issueStats, err = issues_model.GetUserIssueStats(filterMode, statsOpts)
|
||||||
|
if err != nil {
|
||||||
|
ctx.ServerError("GetUserIssueStats", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Will be posted to ctx.Data.
|
// Will be posted to ctx.Data.
|
||||||
|
@ -629,9 +665,13 @@ func buildIssueOverview(ctx *context.Context, unitType unit.Type) {
|
||||||
|
|
||||||
ctx.Data["IssueRefEndNames"], ctx.Data["IssueRefURLs"] = issue_service.GetRefEndNamesAndURLs(issues, ctx.FormString("RepoLink"))
|
ctx.Data["IssueRefEndNames"], ctx.Data["IssueRefURLs"] = issue_service.GetRefEndNamesAndURLs(issues, ctx.FormString("RepoLink"))
|
||||||
|
|
||||||
|
if err := issues.LoadAttributes(ctx); err != nil {
|
||||||
|
ctx.ServerError("issues.LoadAttributes", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
ctx.Data["Issues"] = issues
|
ctx.Data["Issues"] = issues
|
||||||
|
|
||||||
approvalCounts, err := issues_model.IssueList(issues).GetApprovalCounts(ctx)
|
approvalCounts, err := issues.GetApprovalCounts(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ctx.ServerError("ApprovalCounts", err)
|
ctx.ServerError("ApprovalCounts", err)
|
||||||
return
|
return
|
||||||
|
@ -716,21 +756,12 @@ func getRepoIDs(reposQuery string) []int64 {
|
||||||
return repoIDs
|
return repoIDs
|
||||||
}
|
}
|
||||||
|
|
||||||
func issueIDsFromSearch(ctx *context.Context, ctxUser *user_model.User, keyword string, opts *issues_model.IssuesOptions) ([]int64, error) {
|
func issueIDsFromSearch(ctx *context.Context, keyword string, opts *issues_model.IssuesOptions) ([]int64, error) {
|
||||||
if len(keyword) == 0 {
|
ids, _, err := issue_indexer.SearchIssues(ctx, issue_indexer.ToSearchOptions(keyword, opts))
|
||||||
return []int64{}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
searchRepoIDs, err := issues_model.GetRepoIDsForIssuesOptions(opts, ctxUser)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("GetRepoIDsForIssuesOptions: %w", err)
|
return nil, fmt.Errorf("SearchIssues: %w", err)
|
||||||
}
|
}
|
||||||
issueIDsFromSearch, err := issue_indexer.SearchIssuesByKeyword(ctx, searchRepoIDs, keyword, ctx.FormString("state"))
|
return ids, nil
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("SearchIssuesByKeyword: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return issueIDsFromSearch, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func loadRepoByIDs(ctxUser *user_model.User, issueCountByRepo map[int64]int64, unitType unit.Type) (map[int64]*repo_model.Repository, error) {
|
func loadRepoByIDs(ctxUser *user_model.User, issueCountByRepo map[int64]int64, unitType unit.Type) (map[int64]*repo_model.Repository, error) {
|
||||||
|
|
|
@ -263,7 +263,7 @@ func NotificationSubscriptions(ctx *context.Context) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
issues, err := issues_model.Issues(ctx, &issues_model.IssuesOptions{
|
issues, err := issues_model.Issues(ctx, &issues_model.IssuesOptions{
|
||||||
ListOptions: db.ListOptions{
|
Paginator: &db.ListOptions{
|
||||||
PageSize: setting.UI.IssuePagingNum,
|
PageSize: setting.UI.IssuePagingNum,
|
||||||
Page: page,
|
Page: page,
|
||||||
},
|
},
|
||||||
|
|
|
@ -99,7 +99,7 @@ func TestViewIssuesKeyword(t *testing.T) {
|
||||||
RepoID: repo.ID,
|
RepoID: repo.ID,
|
||||||
Index: 1,
|
Index: 1,
|
||||||
})
|
})
|
||||||
issues.UpdateIssueIndexer(issue)
|
issues.UpdateIssueIndexer(issue.ID)
|
||||||
time.Sleep(time.Second * 1)
|
time.Sleep(time.Second * 1)
|
||||||
const keyword = "first"
|
const keyword = "first"
|
||||||
req := NewRequestf(t, "GET", "%s/issues?q=%s", repo.Link(), keyword)
|
req := NewRequestf(t, "GET", "%s/issues?q=%s", repo.Link(), keyword)
|
||||||
|
|
Loading…
Reference in a new issue