# HG changeset patch # User FUJIWARA Katsunori # Date 1485105458 -32400 # Node ID caef0be399489fd71c9e2d744fe928a352f6b9ab # Parent 2ff913970025ef152b436cd791290eb078de5681 search: make "repository:" condition work as expected Before this revision, "repository:foo" condition at searching for "File contents" or "File names" shows files in repositories below. - foo - foo/bar - foo-bar - and so on ... Whoosh library, which is used to parse text for indexing and seaching, does: - treat almost all non-alphanumeric characters as delimiter both at indexing search items and at parsing search condition - make each fields for a search item be indexed by multiple values For example, files in "foo/bar" repository are indexed by "foo" and "bar" in "repository" field. This tokenization make "repository:foo" search condition match against files in "foo/bar" repository, too. In addition to it, using plain TEXT also causes unintentional ignorance of "stop words" in search conditions. For example, "this", "a", "you", and so on are ignored at indexing and parsing, because these are too generic words (from point of view of generic "text search"). This issue can't be resolved by using ID instead of TEXT for "repository" of SCHEMA, like as previous revisions for JOURNAL_SCHEMA, because: - highlight-ing file content requires SCHEMA to support "positions" feature, but using ID instead of TEXT disables it - using ID violates current case-insensitive search policy, because it preserves case of text To make "repository:" condition work as expected, this revision explicitly specifies "analyzer", which does: - avoid tokenization - match case-insensitively - avoid removing "stop words" from text This revision requires full re-building index tables, because indexing schema is changed. BTW, "repository:" condition at searching for "Commit messages" uses CHGSETS_SCHEMA instead of SCHEMA. The former uses ID for "repository", and it does: - avoid issues by tokenization and removing "stop words" - disable "positions" feature of CHGSETS_SCHEMA But highlight-ing file content isn't needed at searching for "Commit messages". Therefore, this can be ignored. - preserve case of text This violates current case-insensitive search policy, This issue will be fixed by subsequent revision, because fixing it isn't so simple. diff -r 2ff913970025 -r caef0be39948 kallithea/lib/indexers/__init__.py --- a/kallithea/lib/indexers/__init__.py Mon Jan 23 02:17:38 2017 +0900 +++ b/kallithea/lib/indexers/__init__.py Mon Jan 23 02:17:38 2017 +0900 @@ -33,7 +33,7 @@ # Add location of top level folder to sys.path sys.path.append(dirname(dirname(dirname(os.path.realpath(__file__))))) -from whoosh.analysis import RegexTokenizer, LowercaseFilter +from whoosh.analysis import RegexTokenizer, LowercaseFilter, IDTokenizer from whoosh.fields import TEXT, ID, STORED, NUMERIC, BOOLEAN, Schema, FieldType, DATETIME from whoosh.formats import Characters from whoosh.highlight import highlight as whoosh_highlight, HtmlFormatter, ContextFragmenter @@ -44,11 +44,20 @@ # CUSTOM ANALYZER wordsplit + lowercase filter ANALYZER = RegexTokenizer(expression=r"\w+") | LowercaseFilter() +# CUSTOM ANALYZER raw-string + lowercase filter +# +# This is useful to: +# - avoid tokenization +# - avoid removing "stop words" from text +# - search case-insensitively +# +ICASEIDANALYZER = IDTokenizer() | LowercaseFilter() + #INDEX SCHEMA DEFINITION SCHEMA = Schema( fileid=ID(unique=True), owner=TEXT(), - repository=TEXT(stored=True), + repository=TEXT(stored=True, analyzer=ICASEIDANALYZER), path=TEXT(stored=True), content=FieldType(format=Characters(), analyzer=ANALYZER, scorable=True, stored=True), diff -r 2ff913970025 -r caef0be39948 kallithea/tests/functional/test_search_indexing.py --- a/kallithea/tests/functional/test_search_indexing.py Mon Jan 23 02:17:38 2017 +0900 +++ b/kallithea/tests/functional/test_search_indexing.py Mon Jan 23 02:17:38 2017 +0900 @@ -113,9 +113,9 @@ (u'group/*'), ]) @parametrize('searchtype,query,hit', [ - #('content', 'this_should_be_unique_content', 1), + ('content', 'this_should_be_unique_content', 1), ('commit', 'this_should_be_unique_commit_log', 1), - #('path', 'this_should_be_unique_filename.txt', 1), + ('path', 'this_should_be_unique_filename.txt', 1), ]) def test_repository_tokenization(self, reponame, searchtype, query, hit): self.log_user()