Mercurial > kallithea
comparison rhodecode/lib/indexers/__init__.py @ 547:1e757ac98988
renamed project to rhodecode
author | Marcin Kuzminski <marcin@python-works.com> |
---|---|
date | Wed, 06 Oct 2010 03:18:16 +0200 |
parents | pylons_app/lib/indexers/__init__.py@fb0c3af6031b |
children | 65b2f150beb7 |
comparison
equal
deleted
inserted
replaced
546:7c2f5e4d7bbf | 547:1e757ac98988 |
---|---|
1 from os.path import dirname as dn, join as jn | |
2 from rhodecode.config.environment import load_environment | |
3 from rhodecode.model.hg_model import HgModel | |
4 from shutil import rmtree | |
5 from webhelpers.html.builder import escape | |
6 from vcs.utils.lazy import LazyProperty | |
7 | |
8 from whoosh.analysis import RegexTokenizer, LowercaseFilter, StopFilter | |
9 from whoosh.fields import TEXT, ID, STORED, Schema, FieldType | |
10 from whoosh.index import create_in, open_dir | |
11 from whoosh.formats import Characters | |
12 from whoosh.highlight import highlight, SimpleFragmenter, HtmlFormatter | |
13 | |
14 import os | |
15 import sys | |
16 import traceback | |
17 | |
18 #to get the rhodecode import | |
19 sys.path.append(dn(dn(dn(os.path.realpath(__file__))))) | |
20 | |
21 | |
22 #LOCATION WE KEEP THE INDEX | |
23 IDX_LOCATION = jn(dn(dn(dn(dn(os.path.abspath(__file__))))), 'data', 'index') | |
24 | |
25 #EXTENSIONS WE WANT TO INDEX CONTENT OFF | |
26 INDEX_EXTENSIONS = ['action', 'adp', 'ashx', 'asmx', 'aspx', 'asx', 'axd', 'c', | |
27 'cfg', 'cfm', 'cpp', 'cs', 'css', 'diff', 'do', 'el', 'erl', | |
28 'h', 'htm', 'html', 'ini', 'java', 'js', 'jsp', 'jspx', 'lisp', | |
29 'lua', 'm', 'mako', 'ml', 'pas', 'patch', 'php', 'php3', | |
30 'php4', 'phtml', 'pm', 'py', 'rb', 'rst', 's', 'sh', 'sql', | |
31 'tpl', 'txt', 'vim', 'wss', 'xhtml', 'xml', 'xsl', 'xslt', | |
32 'yaws'] | |
33 | |
34 #CUSTOM ANALYZER wordsplit + lowercase filter | |
35 ANALYZER = RegexTokenizer(expression=r"\w+") | LowercaseFilter() | |
36 | |
37 | |
38 #INDEX SCHEMA DEFINITION | |
39 SCHEMA = Schema(owner=TEXT(), | |
40 repository=TEXT(stored=True), | |
41 path=ID(stored=True, unique=True), | |
42 content=FieldType(format=Characters(ANALYZER), | |
43 scorable=True, stored=True), | |
44 modtime=STORED(), extension=TEXT(stored=True)) | |
45 | |
46 | |
47 IDX_NAME = 'HG_INDEX' | |
48 FORMATTER = HtmlFormatter('span', between='\n<span class="break">...</span>\n') | |
49 FRAGMENTER = SimpleFragmenter(200) | |
50 | |
51 class ResultWrapper(object): | |
52 def __init__(self, searcher, matcher, highlight_items): | |
53 self.searcher = searcher | |
54 self.matcher = matcher | |
55 self.highlight_items = highlight_items | |
56 self.fragment_size = 200 / 2 | |
57 | |
58 @LazyProperty | |
59 def doc_ids(self): | |
60 docs_id = [] | |
61 while self.matcher.is_active(): | |
62 docnum = self.matcher.id() | |
63 chunks = [offsets for offsets in self.get_chunks()] | |
64 docs_id.append([docnum, chunks]) | |
65 self.matcher.next() | |
66 return docs_id | |
67 | |
68 def __str__(self): | |
69 return '<%s at %s>' % (self.__class__.__name__, len(self.doc_ids)) | |
70 | |
71 def __repr__(self): | |
72 return self.__str__() | |
73 | |
74 def __len__(self): | |
75 return len(self.doc_ids) | |
76 | |
77 def __iter__(self): | |
78 """ | |
79 Allows Iteration over results,and lazy generate content | |
80 | |
81 *Requires* implementation of ``__getitem__`` method. | |
82 """ | |
83 for docid in self.doc_ids: | |
84 yield self.get_full_content(docid) | |
85 | |
86 def __getslice__(self, i, j): | |
87 """ | |
88 Slicing of resultWrapper | |
89 """ | |
90 slice = [] | |
91 for docid in self.doc_ids[i:j]: | |
92 slice.append(self.get_full_content(docid)) | |
93 return slice | |
94 | |
95 | |
96 def get_full_content(self, docid): | |
97 res = self.searcher.stored_fields(docid[0]) | |
98 f_path = res['path'][res['path'].find(res['repository']) \ | |
99 + len(res['repository']):].lstrip('/') | |
100 | |
101 content_short = self.get_short_content(res, docid[1]) | |
102 res.update({'content_short':content_short, | |
103 'content_short_hl':self.highlight(content_short), | |
104 'f_path':f_path}) | |
105 | |
106 return res | |
107 | |
108 def get_short_content(self, res, chunks): | |
109 | |
110 return ''.join([res['content'][chunk[0]:chunk[1]] for chunk in chunks]) | |
111 | |
112 def get_chunks(self): | |
113 """ | |
114 Smart function that implements chunking the content | |
115 but not overlap chunks so it doesn't highlight the same | |
116 close occurences twice. | |
117 @param matcher: | |
118 @param size: | |
119 """ | |
120 memory = [(0, 0)] | |
121 for span in self.matcher.spans(): | |
122 start = span.startchar or 0 | |
123 end = span.endchar or 0 | |
124 start_offseted = max(0, start - self.fragment_size) | |
125 end_offseted = end + self.fragment_size | |
126 | |
127 if start_offseted < memory[-1][1]: | |
128 start_offseted = memory[-1][1] | |
129 memory.append((start_offseted, end_offseted,)) | |
130 yield (start_offseted, end_offseted,) | |
131 | |
132 def highlight(self, content, top=5): | |
133 hl = highlight(escape(content), | |
134 self.highlight_items, | |
135 analyzer=ANALYZER, | |
136 fragmenter=FRAGMENTER, | |
137 formatter=FORMATTER, | |
138 top=top) | |
139 return hl |