Refactor code indexer (#9313)
* Refactor code indexer * fix test * fix test * refactor code indexer * fix import * improve code * fix typo * fix test and make code clean * fix linttokarchuk/v1.17
parent
2f9564f993
commit
89b4e0477b
@ -0,0 +1,147 @@ |
||||
// Copyright 2019 The Gitea Authors. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package code |
||||
|
||||
import ( |
||||
"strconv" |
||||
"strings" |
||||
|
||||
"code.gitea.io/gitea/models" |
||||
"code.gitea.io/gitea/modules/git" |
||||
"code.gitea.io/gitea/modules/log" |
||||
"code.gitea.io/gitea/modules/setting" |
||||
) |
||||
|
||||
type fileUpdate struct { |
||||
Filename string |
||||
BlobSha string |
||||
} |
||||
|
||||
// repoChanges changes (file additions/updates/removals) to a repo
|
||||
type repoChanges struct { |
||||
Updates []fileUpdate |
||||
RemovedFilenames []string |
||||
} |
||||
|
||||
func getDefaultBranchSha(repo *models.Repository) (string, error) { |
||||
stdout, err := git.NewCommand("show-ref", "-s", git.BranchPrefix+repo.DefaultBranch).RunInDir(repo.RepoPath()) |
||||
if err != nil { |
||||
return "", err |
||||
} |
||||
return strings.TrimSpace(stdout), nil |
||||
} |
||||
|
||||
// getRepoChanges returns changes to repo since last indexer update
|
||||
func getRepoChanges(repo *models.Repository, revision string) (*repoChanges, error) { |
||||
if err := repo.GetIndexerStatus(); err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
if len(repo.IndexerStatus.CommitSha) == 0 { |
||||
return genesisChanges(repo, revision) |
||||
} |
||||
return nonGenesisChanges(repo, revision) |
||||
} |
||||
|
||||
func isIndexable(entry *git.TreeEntry) bool { |
||||
if !entry.IsRegular() && !entry.IsExecutable() { |
||||
return false |
||||
} |
||||
name := strings.ToLower(entry.Name()) |
||||
for _, g := range setting.Indexer.ExcludePatterns { |
||||
if g.Match(name) { |
||||
return false |
||||
} |
||||
} |
||||
for _, g := range setting.Indexer.IncludePatterns { |
||||
if g.Match(name) { |
||||
return true |
||||
} |
||||
} |
||||
return len(setting.Indexer.IncludePatterns) == 0 |
||||
} |
||||
|
||||
// parseGitLsTreeOutput parses the output of a `git ls-tree -r --full-name` command
|
||||
func parseGitLsTreeOutput(stdout []byte) ([]fileUpdate, error) { |
||||
entries, err := git.ParseTreeEntries(stdout) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
var idxCount = 0 |
||||
updates := make([]fileUpdate, len(entries)) |
||||
for _, entry := range entries { |
||||
if isIndexable(entry) { |
||||
updates[idxCount] = fileUpdate{ |
||||
Filename: entry.Name(), |
||||
BlobSha: entry.ID.String(), |
||||
} |
||||
idxCount++ |
||||
} |
||||
} |
||||
return updates[:idxCount], nil |
||||
} |
||||
|
||||
// genesisChanges get changes to add repo to the indexer for the first time
|
||||
func genesisChanges(repo *models.Repository, revision string) (*repoChanges, error) { |
||||
var changes repoChanges |
||||
stdout, err := git.NewCommand("ls-tree", "--full-tree", "-r", revision). |
||||
RunInDirBytes(repo.RepoPath()) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
changes.Updates, err = parseGitLsTreeOutput(stdout) |
||||
return &changes, err |
||||
} |
||||
|
||||
// nonGenesisChanges get changes since the previous indexer update
|
||||
func nonGenesisChanges(repo *models.Repository, revision string) (*repoChanges, error) { |
||||
diffCmd := git.NewCommand("diff", "--name-status", |
||||
repo.IndexerStatus.CommitSha, revision) |
||||
stdout, err := diffCmd.RunInDir(repo.RepoPath()) |
||||
if err != nil { |
||||
// previous commit sha may have been removed by a force push, so
|
||||
// try rebuilding from scratch
|
||||
log.Warn("git diff: %v", err) |
||||
if err = indexer.Delete(repo.ID); err != nil { |
||||
return nil, err |
||||
} |
||||
return genesisChanges(repo, revision) |
||||
} |
||||
var changes repoChanges |
||||
updatedFilenames := make([]string, 0, 10) |
||||
for _, line := range strings.Split(stdout, "\n") { |
||||
line = strings.TrimSpace(line) |
||||
if len(line) == 0 { |
||||
continue |
||||
} |
||||
filename := strings.TrimSpace(line[1:]) |
||||
if len(filename) == 0 { |
||||
continue |
||||
} else if filename[0] == '"' { |
||||
filename, err = strconv.Unquote(filename) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
} |
||||
|
||||
switch status := line[0]; status { |
||||
case 'M', 'A': |
||||
updatedFilenames = append(updatedFilenames, filename) |
||||
case 'D': |
||||
changes.RemovedFilenames = append(changes.RemovedFilenames, filename) |
||||
default: |
||||
log.Warn("Unrecognized status: %c (line=%s)", status, line) |
||||
} |
||||
} |
||||
|
||||
cmd := git.NewCommand("ls-tree", "--full-tree", revision, "--") |
||||
cmd.AddArguments(updatedFilenames...) |
||||
lsTreeStdout, err := cmd.RunInDirBytes(repo.RepoPath()) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
changes.Updates, err = parseGitLsTreeOutput(lsTreeStdout) |
||||
return &changes, err |
||||
} |
@ -0,0 +1,133 @@ |
||||
// Copyright 2019 The Gitea Authors. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package code |
||||
|
||||
import ( |
||||
"os" |
||||
|
||||
"code.gitea.io/gitea/models" |
||||
"code.gitea.io/gitea/modules/graceful" |
||||
"code.gitea.io/gitea/modules/log" |
||||
"code.gitea.io/gitea/modules/setting" |
||||
) |
||||
|
||||
type repoIndexerOperation struct { |
||||
repoID int64 |
||||
deleted bool |
||||
watchers []chan<- error |
||||
} |
||||
|
||||
var repoIndexerOperationQueue chan repoIndexerOperation |
||||
|
||||
func processRepoIndexerOperationQueue(indexer Indexer) { |
||||
defer indexer.Close() |
||||
|
||||
repoIndexerOperationQueue = make(chan repoIndexerOperation, setting.Indexer.UpdateQueueLength) |
||||
for { |
||||
select { |
||||
case op := <-repoIndexerOperationQueue: |
||||
var err error |
||||
if op.deleted { |
||||
if err = indexer.Delete(op.repoID); err != nil { |
||||
log.Error("indexer.Delete: %v", err) |
||||
} |
||||
} else { |
||||
if err = indexer.Index(op.repoID); err != nil { |
||||
log.Error("indexer.Index: %v", err) |
||||
} |
||||
} |
||||
for _, watcher := range op.watchers { |
||||
watcher <- err |
||||
} |
||||
case <-graceful.GetManager().IsShutdown(): |
||||
log.Info("PID: %d Repository indexer queue processing stopped", os.Getpid()) |
||||
return |
||||
} |
||||
} |
||||
} |
||||
|
||||
// DeleteRepoFromIndexer remove all of a repository's entries from the indexer
|
||||
func DeleteRepoFromIndexer(repo *models.Repository, watchers ...chan<- error) { |
||||
addOperationToQueue(repoIndexerOperation{repoID: repo.ID, deleted: true, watchers: watchers}) |
||||
} |
||||
|
||||
// UpdateRepoIndexer update a repository's entries in the indexer
|
||||
func UpdateRepoIndexer(repo *models.Repository, watchers ...chan<- error) { |
||||
addOperationToQueue(repoIndexerOperation{repoID: repo.ID, deleted: false, watchers: watchers}) |
||||
} |
||||
|
||||
func addOperationToQueue(op repoIndexerOperation) { |
||||
if !setting.Indexer.RepoIndexerEnabled { |
||||
return |
||||
} |
||||
select { |
||||
case repoIndexerOperationQueue <- op: |
||||
break |
||||
default: |
||||
go func() { |
||||
repoIndexerOperationQueue <- op |
||||
}() |
||||
} |
||||
} |
||||
|
||||
// populateRepoIndexer populate the repo indexer with pre-existing data. This
|
||||
// should only be run when the indexer is created for the first time.
|
||||
func populateRepoIndexer() { |
||||
log.Info("Populating the repo indexer with existing repositories") |
||||
|
||||
isShutdown := graceful.GetManager().IsShutdown() |
||||
|
||||
exist, err := models.IsTableNotEmpty("repository") |
||||
if err != nil { |
||||
log.Fatal("System error: %v", err) |
||||
} else if !exist { |
||||
return |
||||
} |
||||
|
||||
// if there is any existing repo indexer metadata in the DB, delete it
|
||||
// since we are starting afresh. Also, xorm requires deletes to have a
|
||||
// condition, and we want to delete everything, thus 1=1.
|
||||
if err := models.DeleteAllRecords("repo_indexer_status"); err != nil { |
||||
log.Fatal("System error: %v", err) |
||||
} |
||||
|
||||
var maxRepoID int64 |
||||
if maxRepoID, err = models.GetMaxID("repository"); err != nil { |
||||
log.Fatal("System error: %v", err) |
||||
} |
||||
|
||||
// start with the maximum existing repo ID and work backwards, so that we
|
||||
// don't include repos that are created after gitea starts; such repos will
|
||||
// already be added to the indexer, and we don't need to add them again.
|
||||
for maxRepoID > 0 { |
||||
select { |
||||
case <-isShutdown: |
||||
log.Info("Repository Indexer population shutdown before completion") |
||||
return |
||||
default: |
||||
} |
||||
ids, err := models.GetUnindexedRepos(maxRepoID, 0, 50) |
||||
if err != nil { |
||||
log.Error("populateRepoIndexer: %v", err) |
||||
return |
||||
} else if len(ids) == 0 { |
||||
break |
||||
} |
||||
for _, id := range ids { |
||||
select { |
||||
case <-isShutdown: |
||||
log.Info("Repository Indexer population shutdown before completion") |
||||
return |
||||
default: |
||||
} |
||||
repoIndexerOperationQueue <- repoIndexerOperation{ |
||||
repoID: id, |
||||
deleted: false, |
||||
} |
||||
maxRepoID = id - 1 |
||||
} |
||||
} |
||||
log.Info("Done (re)populating the repo indexer with existing repositories") |
||||
} |
@ -1,290 +0,0 @@ |
||||
// Copyright 2017 The Gitea Authors. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package code |
||||
|
||||
import ( |
||||
"context" |
||||
"os" |
||||
"strings" |
||||
"sync" |
||||
|
||||
"code.gitea.io/gitea/models" |
||||
"code.gitea.io/gitea/modules/graceful" |
||||
"code.gitea.io/gitea/modules/log" |
||||
"code.gitea.io/gitea/modules/setting" |
||||
|
||||
"github.com/blevesearch/bleve" |
||||
"github.com/blevesearch/bleve/analysis/analyzer/custom" |
||||
"github.com/blevesearch/bleve/analysis/token/lowercase" |
||||
"github.com/blevesearch/bleve/analysis/tokenizer/unicode" |
||||
"github.com/blevesearch/bleve/search/query" |
||||
"github.com/ethantkoenig/rupture" |
||||
) |
||||
|
||||
const ( |
||||
repoIndexerAnalyzer = "repoIndexerAnalyzer" |
||||
repoIndexerDocType = "repoIndexerDocType" |
||||
|
||||
repoIndexerLatestVersion = 4 |
||||
) |
||||
|
||||
type bleveIndexerHolder struct { |
||||
index bleve.Index |
||||
mutex sync.RWMutex |
||||
cond *sync.Cond |
||||
} |
||||
|
||||
func newBleveIndexerHolder() *bleveIndexerHolder { |
||||
b := &bleveIndexerHolder{} |
||||
b.cond = sync.NewCond(b.mutex.RLocker()) |
||||
return b |
||||
} |
||||
|
||||
func (r *bleveIndexerHolder) set(index bleve.Index) { |
||||
r.mutex.Lock() |
||||
defer r.mutex.Unlock() |
||||
r.index = index |
||||
r.cond.Broadcast() |
||||
} |
||||
|
||||
func (r *bleveIndexerHolder) get() bleve.Index { |
||||
r.mutex.RLock() |
||||
defer r.mutex.RUnlock() |
||||
if r.index == nil { |
||||
r.cond.Wait() |
||||
} |
||||
return r.index |
||||
} |
||||
|
||||
// repoIndexer (thread-safe) index for repository contents
|
||||
var indexerHolder = newBleveIndexerHolder() |
||||
|
||||
// RepoIndexerOp type of operation to perform on repo indexer
|
||||
type RepoIndexerOp int |
||||
|
||||
const ( |
||||
// RepoIndexerOpUpdate add/update a file's contents
|
||||
RepoIndexerOpUpdate = iota |
||||
|
||||
// RepoIndexerOpDelete delete a file
|
||||
RepoIndexerOpDelete |
||||
) |
||||
|
||||
// RepoIndexerData data stored in the repo indexer
|
||||
type RepoIndexerData struct { |
||||
RepoID int64 |
||||
Content string |
||||
} |
||||
|
||||
// Type returns the document type, for bleve's mapping.Classifier interface.
|
||||
func (d *RepoIndexerData) Type() string { |
||||
return repoIndexerDocType |
||||
} |
||||
|
||||
// RepoIndexerUpdate an update to the repo indexer
|
||||
type RepoIndexerUpdate struct { |
||||
Filepath string |
||||
Op RepoIndexerOp |
||||
Data *RepoIndexerData |
||||
} |
||||
|
||||
// AddToFlushingBatch adds the update to the given flushing batch.
|
||||
func (update RepoIndexerUpdate) AddToFlushingBatch(batch rupture.FlushingBatch) error { |
||||
id := filenameIndexerID(update.Data.RepoID, update.Filepath) |
||||
switch update.Op { |
||||
case RepoIndexerOpUpdate: |
||||
return batch.Index(id, update.Data) |
||||
case RepoIndexerOpDelete: |
||||
return batch.Delete(id) |
||||
default: |
||||
log.Error("Unrecognized repo indexer op: %d", update.Op) |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
// initRepoIndexer initialize repo indexer
|
||||
func initRepoIndexer(populateIndexer func() error) { |
||||
indexer, err := openIndexer(setting.Indexer.RepoPath, repoIndexerLatestVersion) |
||||
if err != nil { |
||||
log.Fatal("InitRepoIndexer %s: %v", setting.Indexer.RepoPath, err) |
||||
} |
||||
if indexer != nil { |
||||
indexerHolder.set(indexer) |
||||
closeAtTerminate() |
||||
|
||||
// Continue population from where left off
|
||||
if err = populateIndexer(); err != nil { |
||||
log.Fatal("PopulateRepoIndex: %v", err) |
||||
} |
||||
return |
||||
} |
||||
|
||||
if err = createRepoIndexer(setting.Indexer.RepoPath, repoIndexerLatestVersion); err != nil { |
||||
log.Fatal("CreateRepoIndexer: %v", err) |
||||
} |
||||
closeAtTerminate() |
||||
|
||||
// if there is any existing repo indexer metadata in the DB, delete it
|
||||
// since we are starting afresh. Also, xorm requires deletes to have a
|
||||
// condition, and we want to delete everything, thus 1=1.
|
||||
if err := models.DeleteAllRecords("repo_indexer_status"); err != nil { |
||||
log.Fatal("DeleteAllRepoIndexerStatus: %v", err) |
||||
} |
||||
|
||||
if err = populateIndexer(); err != nil { |
||||
log.Fatal("PopulateRepoIndex: %v", err) |
||||
} |
||||
} |
||||
|
||||
func closeAtTerminate() { |
||||
graceful.GetManager().RunAtTerminate(context.Background(), func() { |
||||
log.Debug("Closing repo indexer") |
||||
indexer := indexerHolder.get() |
||||
if indexer != nil { |
||||
err := indexer.Close() |
||||
if err != nil { |
||||
log.Error("Error whilst closing the repository indexer: %v", err) |
||||
} |
||||
} |
||||
log.Info("PID: %d Repository Indexer closed", os.Getpid()) |
||||
}) |
||||
} |
||||
|
||||
// createRepoIndexer create a repo indexer if one does not already exist
|
||||
func createRepoIndexer(path string, latestVersion int) error { |
||||
docMapping := bleve.NewDocumentMapping() |
||||
numericFieldMapping := bleve.NewNumericFieldMapping() |
||||
numericFieldMapping.IncludeInAll = false |
||||
docMapping.AddFieldMappingsAt("RepoID", numericFieldMapping) |
||||
|
||||
textFieldMapping := bleve.NewTextFieldMapping() |
||||
textFieldMapping.IncludeInAll = false |
||||
docMapping.AddFieldMappingsAt("Content", textFieldMapping) |
||||
|
||||
mapping := bleve.NewIndexMapping() |
||||
if err := addUnicodeNormalizeTokenFilter(mapping); err != nil { |
||||
return err |
||||
} else if err := mapping.AddCustomAnalyzer(repoIndexerAnalyzer, map[string]interface{}{ |
||||
"type": custom.Name, |
||||
"char_filters": []string{}, |
||||
"tokenizer": unicode.Name, |
||||
"token_filters": []string{unicodeNormalizeName, lowercase.Name}, |
||||
}); err != nil { |
||||
return err |
||||
} |
||||
mapping.DefaultAnalyzer = repoIndexerAnalyzer |
||||
mapping.AddDocumentMapping(repoIndexerDocType, docMapping) |
||||
mapping.AddDocumentMapping("_all", bleve.NewDocumentDisabledMapping()) |
||||
|
||||
indexer, err := bleve.New(path, mapping) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
indexerHolder.set(indexer) |
||||
|
||||
return rupture.WriteIndexMetadata(path, &rupture.IndexMetadata{ |
||||
Version: latestVersion, |
||||
}) |
||||
} |
||||
|
||||
func filenameIndexerID(repoID int64, filename string) string { |
||||
return indexerID(repoID) + "_" + filename |
||||
} |
||||
|
||||
func filenameOfIndexerID(indexerID string) string { |
||||
index := strings.IndexByte(indexerID, '_') |
||||
if index == -1 { |
||||
log.Error("Unexpected ID in repo indexer: %s", indexerID) |
||||
} |
||||
return indexerID[index+1:] |
||||
} |
||||
|
||||
// RepoIndexerBatch batch to add updates to
|
||||
func RepoIndexerBatch() rupture.FlushingBatch { |
||||
return rupture.NewFlushingBatch(indexerHolder.get(), maxBatchSize) |
||||
} |
||||
|
||||
// deleteRepoFromIndexer delete all of a repo's files from indexer
|
||||
func deleteRepoFromIndexer(repoID int64) error { |
||||
query := numericEqualityQuery(repoID, "RepoID") |
||||
searchRequest := bleve.NewSearchRequestOptions(query, 2147483647, 0, false) |
||||
result, err := indexerHolder.get().Search(searchRequest) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
batch := RepoIndexerBatch() |
||||
for _, hit := range result.Hits { |
||||
if err = batch.Delete(hit.ID); err != nil { |
||||
return err |
||||
} |
||||
} |
||||
return batch.Flush() |
||||
} |
||||
|
||||
// RepoSearchResult result of performing a search in a repo
|
||||
type RepoSearchResult struct { |
||||
RepoID int64 |
||||
StartIndex int |
||||
EndIndex int |
||||
Filename string |
||||
Content string |
||||
} |
||||
|
||||
// SearchRepoByKeyword searches for files in the specified repo.
|
||||
// Returns the matching file-paths
|
||||
func SearchRepoByKeyword(repoIDs []int64, keyword string, page, pageSize int) (int64, []*RepoSearchResult, error) { |
||||
phraseQuery := bleve.NewMatchPhraseQuery(keyword) |
||||
phraseQuery.FieldVal = "Content" |
||||
phraseQuery.Analyzer = repoIndexerAnalyzer |
||||
|
||||
var indexerQuery query.Query |
||||
if len(repoIDs) > 0 { |
||||
var repoQueries = make([]query.Query, 0, len(repoIDs)) |
||||
for _, repoID := range repoIDs { |
||||
repoQueries = append(repoQueries, numericEqualityQuery(repoID, "RepoID")) |
||||
} |
||||
|
||||
indexerQuery = bleve.NewConjunctionQuery( |
||||
bleve.NewDisjunctionQuery(repoQueries...), |
||||
phraseQuery, |
||||
) |
||||
} else { |
||||
indexerQuery = phraseQuery |
||||
} |
||||
|
||||
from := (page - 1) * pageSize |
||||
searchRequest := bleve.NewSearchRequestOptions(indexerQuery, pageSize, from, false) |
||||
searchRequest.Fields = []string{"Content", "RepoID"} |
||||
searchRequest.IncludeLocations = true |
||||
|
||||
result, err := indexerHolder.get().Search(searchRequest) |
||||
if err != nil { |
||||
return 0, nil, err |
||||
} |
||||
|
||||
searchResults := make([]*RepoSearchResult, len(result.Hits)) |
||||
for i, hit := range result.Hits { |
||||
var startIndex, endIndex int = -1, -1 |
||||
for _, locations := range hit.Locations["Content"] { |
||||
location := locations[0] |
||||
locationStart := int(location.Start) |
||||
locationEnd := int(location.End) |
||||
if startIndex < 0 || locationStart < startIndex { |
||||
startIndex = locationStart |
||||
} |
||||
if endIndex < 0 || locationEnd > endIndex { |
||||
endIndex = locationEnd |
||||
} |
||||
} |
||||
searchResults[i] = &RepoSearchResult{ |
||||
RepoID: int64(hit.Fields["RepoID"].(float64)), |
||||
StartIndex: startIndex, |
||||
EndIndex: endIndex, |
||||
Filename: filenameOfIndexerID(hit.ID), |
||||
Content: hit.Fields["Content"].(string), |
||||
} |
||||
} |
||||
return int64(result.Total), searchResults, nil |
||||
} |
Loading…
Reference in new issue