Browse Source

[enh][mod] result handling refactor

Several changes has been made:
 - Parallel result merge
 - Scoring algorithm slightly changed (see result_score())
 - Proper Thread locking on global data manipulation
Adam Tauber 9 years ago
parent
commit
b6c3cb0bdd
6 changed files with 321 additions and 292 deletions
  1. 239
    0
      searx/results.py
  2. 16
    257
      searx/search.py
  3. 41
    0
      searx/tests/test_results.py
  4. 4
    19
      searx/tests/test_search.py
  5. 7
    1
      searx/tests/test_webapp.py
  6. 14
    15
      searx/webapp.py

+ 239
- 0
searx/results.py View File

@@ -0,0 +1,239 @@
1
+import re
2
+from collections import defaultdict
3
+from operator import itemgetter
4
+from threading import RLock
5
+from urlparse import urlparse, unquote
6
+from searx.engines import engines
7
+
8
+CONTENT_LEN_IGNORED_CHARS_REGEX = re.compile('[,;:!?\./\\\\ ()-_]', re.M | re.U)
9
+WHITESPACE_REGEX = re.compile('( |\t|\n)+', re.M | re.U)
10
+
11
+
12
+# return the meaningful length of the content for a result
13
+def result_content_len(content):
14
+    if isinstance(content, basestring):
15
+        return len(CONTENT_LEN_IGNORED_CHARS_REGEX.sub('', content))
16
+    else:
17
+        return 0
18
+
19
+
20
+def compare_urls(url_a, url_b):
21
+    if url_a.netloc != url_b.netloc or url_a.query != url_b.query:
22
+        return False
23
+
24
+    # remove / from the end of the url if required
25
+    path_a = url_a.path[:-1]\
26
+        if url_a.path.endswith('/')\
27
+        else url_a.path
28
+    path_b = url_b.path[:-1]\
29
+        if url_b.path.endswith('/')\
30
+        else url_b.path
31
+
32
+    return unquote(path_a) == unquote(path_b)
33
+
34
+
35
+def merge_two_infoboxes(infobox1, infobox2):
36
+    if 'urls' in infobox2:
37
+        urls1 = infobox1.get('urls', None)
38
+        if urls1 is None:
39
+            urls1 = []
40
+            infobox1.set('urls', urls1)
41
+
42
+        urlSet = set()
43
+        for url in infobox1.get('urls', []):
44
+            urlSet.add(url.get('url', None))
45
+
46
+        for url in infobox2.get('urls', []):
47
+            if url.get('url', None) not in urlSet:
48
+                urls1.append(url)
49
+
50
+    if 'attributes' in infobox2:
51
+        attributes1 = infobox1.get('attributes', None)
52
+        if attributes1 is None:
53
+            attributes1 = []
54
+            infobox1.set('attributes', attributes1)
55
+
56
+        attributeSet = set()
57
+        for attribute in infobox1.get('attributes', []):
58
+            if attribute.get('label', None) not in attributeSet:
59
+                attributeSet.add(attribute.get('label', None))
60
+
61
+        for attribute in infobox2.get('attributes', []):
62
+            attributes1.append(attribute)
63
+
64
+    if 'content' in infobox2:
65
+        content1 = infobox1.get('content', None)
66
+        content2 = infobox2.get('content', '')
67
+        if content1 is not None:
68
+            if result_content_len(content2) > result_content_len(content1):
69
+                infobox1['content'] = content2
70
+        else:
71
+            infobox1.set('content', content2)
72
+
73
+
74
+def result_score(result):
75
+    weight = 1.0
76
+
77
+    for result_engine in result['engines']:
78
+        if hasattr(engines[result_engine], 'weight'):
79
+            weight *= float(engines[result_engine].weight)
80
+
81
+    occurences = len(result['positions'])
82
+
83
+    return sum((occurences * weight) / position for position in result['positions'])
84
+
85
+
86
+class ResultContainer(object):
87
+    """docstring for ResultContainer"""
88
+    def __init__(self):
89
+        super(ResultContainer, self).__init__()
90
+        self.results = defaultdict(list)
91
+        self._merged_results = []
92
+        self.infoboxes = []
93
+        self._infobox_ids = {}
94
+        self.suggestions = set()
95
+        self.answers = set()
96
+
97
+    def extend(self, engine_name, results):
98
+        for result in list(results):
99
+            if 'suggestion' in result:
100
+                self.suggestions.add(result['suggestion'])
101
+                results.remove(result)
102
+            elif 'answer' in result:
103
+                self.answers.add(result['suggestion'])
104
+                results.remve(result)
105
+            elif 'infobox' in result:
106
+                self._merge_infobox(result)
107
+                results.remove(result)
108
+
109
+        with RLock():
110
+            engines[engine_name].stats['search_count'] += 1
111
+            engines[engine_name].stats['result_count'] += len(results)
112
+
113
+        if not results:
114
+            return
115
+
116
+        self.results[engine_name].extend(results)
117
+
118
+        for i, result in enumerate(results):
119
+            position = i + 1
120
+            self._merge_result(result, position)
121
+
122
+    def _merge_infobox(self, infobox):
123
+        add_infobox = True
124
+        infobox_id = infobox.get('id', None)
125
+        if infobox_id is not None:
126
+            existingIndex = self._infobox_ids.get(infobox_id, None)
127
+            if existingIndex is not None:
128
+                merge_two_infoboxes(self.infoboxes[existingIndex], infobox)
129
+                add_infobox = False
130
+
131
+        if add_infobox:
132
+            self.infoboxes.append(infobox)
133
+            self._infobox_ids[infobox_id] = len(self.infoboxes) - 1
134
+
135
+    def _merge_result(self, result, position):
136
+        result['parsed_url'] = urlparse(result['url'])
137
+
138
+        # if the result has no scheme, use http as default
139
+        if not result['parsed_url'].scheme:
140
+            result['parsed_url'] = result['parsed_url']._replace(scheme="http")
141
+
142
+        result['host'] = result['parsed_url'].netloc
143
+
144
+        if result['host'].startswith('www.'):
145
+            result['host'] = result['host'].replace('www.', '', 1)
146
+
147
+        result['engines'] = [result['engine']]
148
+
149
+        # strip multiple spaces and cariage returns from content
150
+        if result.get('content'):
151
+            result['content'] = WHITESPACE_REGEX.sub(' ', result['content'])
152
+
153
+        # check for duplicates
154
+        duplicated = False
155
+        for merged_result in self._merged_results:
156
+            if compare_urls(result['parsed_url'], merged_result['parsed_url'])\
157
+               and result.get('template') == merged_result.get('template'):
158
+                duplicated = merged_result
159
+                break
160
+
161
+        # merge duplicates together
162
+        if duplicated:
163
+            # using content with more text
164
+            if result_content_len(result.get('content', '')) >\
165
+                    result_content_len(duplicated.get('content', '')):
166
+                duplicated['content'] = result['content']
167
+
168
+            # add the new position
169
+            duplicated['positions'].append(position)
170
+
171
+            # add engine to list of result-engines
172
+            duplicated['engines'].append(result['engine'])
173
+
174
+            # using https if possible
175
+            if duplicated['parsed_url'].scheme != 'https' and result['parsed_url'].scheme == 'https':
176
+                duplicated['url'] = result['parsed_url'].geturl()
177
+                duplicated['parsed_url'] = result['parsed_url']
178
+
179
+        # if there is no duplicate found, append result
180
+        else:
181
+            result['positions'] = [position]
182
+            with RLock():
183
+                self._merged_results.append(result)
184
+
185
+    def get_ordered_results(self):
186
+        for result in self._merged_results:
187
+            score = result_score(result)
188
+            result['score'] = score
189
+            with RLock():
190
+                for result_engine in result['engines']:
191
+                    engines[result_engine].stats['score_count'] += score
192
+
193
+        results = sorted(self._merged_results, key=itemgetter('score'), reverse=True)
194
+
195
+        # pass 2 : group results by category and template
196
+        gresults = []
197
+        categoryPositions = {}
198
+
199
+        for i, res in enumerate(results):
200
+            # FIXME : handle more than one category per engine
201
+            category = engines[res['engine']].categories[0] + ':' + ''\
202
+                if 'template' not in res\
203
+                else res['template']
204
+
205
+            current = None if category not in categoryPositions\
206
+                else categoryPositions[category]
207
+
208
+            # group with previous results using the same category
209
+            # if the group can accept more result and is not too far
210
+            # from the current position
211
+            if current is not None and (current['count'] > 0)\
212
+                    and (len(gresults) - current['index'] < 20):
213
+                # group with the previous results using
214
+                # the same category with this one
215
+                index = current['index']
216
+                gresults.insert(index, res)
217
+
218
+                # update every index after the current one
219
+                # (including the current one)
220
+                for k in categoryPositions:
221
+                    v = categoryPositions[k]['index']
222
+                    if v >= index:
223
+                        categoryPositions[k]['index'] = v + 1
224
+
225
+                # update this category
226
+                current['count'] -= 1
227
+
228
+            else:
229
+                # same category
230
+                gresults.append(res)
231
+
232
+                # update categoryIndex
233
+                categoryPositions[category] = {'index': len(gresults), 'count': 8}
234
+
235
+        # return gresults
236
+        return gresults
237
+
238
+    def results_length(self):
239
+        return len(self._merged_results)

+ 16
- 257
searx/search.py View File

@@ -16,13 +16,8 @@ along with searx. If not, see < http://www.gnu.org/licenses/ >.
16 16
 '''
17 17
 
18 18
 import threading
19
-import re
20 19
 import searx.poolrequests as requests_lib
21
-from itertools import izip_longest, chain
22
-from operator import itemgetter
23
-from Queue import Queue
24 20
 from time import time
25
-from urlparse import urlparse, unquote
26 21
 from searx import settings
27 22
 from searx.engines import (
28 23
     categories, engines
@@ -30,6 +25,7 @@ from searx.engines import (
30 25
 from searx.languages import language_codes
31 26
 from searx.utils import gen_useragent, get_blocked_engines
32 27
 from searx.query import Query
28
+from searx.results import ResultContainer
33 29
 from searx import logger
34 30
 
35 31
 logger = logger.getChild('search')
@@ -42,7 +38,8 @@ def search_request_wrapper(fn, url, engine_name, **kwargs):
42 38
         return fn(url, **kwargs)
43 39
     except:
44 40
         # increase errors stats
45
-        engines[engine_name].stats['errors'] += 1
41
+        with threading.RLock():
42
+            engines[engine_name].stats['errors'] += 1
46 43
 
47 44
         # print engine name and specific error message
48 45
         logger.exception('engine crash: {0}'.format(engine_name))
@@ -84,7 +81,7 @@ def default_request_params():
84 81
 
85 82
 
86 83
 # create a callback wrapper for the search engine results
87
-def make_callback(engine_name, results_queue, callback, params):
84
+def make_callback(engine_name, callback, params, result_container):
88 85
 
89 86
     # creating a callback wrapper for the search engine results
90 87
     def process_callback(response, **kwargs):
@@ -96,12 +93,17 @@ def make_callback(engine_name, results_queue, callback, params):
96 93
 
97 94
         response.search_params = params
98 95
 
99
-        timeout_overhead = 0.2  # seconds
100 96
         search_duration = time() - params['started']
97
+        # update stats with current page-load-time
98
+        with threading.RLock():
99
+            engines[engine_name].stats['page_load_time'] += search_duration
100
+
101
+        timeout_overhead = 0.2  # seconds
101 102
         timeout_limit = engines[engine_name].timeout + timeout_overhead
103
+
102 104
         if search_duration > timeout_limit:
103
-            engines[engine_name].stats['page_load_time'] += timeout_limit
104
-            engines[engine_name].stats['errors'] += 1
105
+            with threading.RLock():
106
+                engines[engine_name].stats['errors'] += 1
105 107
             return
106 108
 
107 109
         # callback
@@ -111,212 +113,11 @@ def make_callback(engine_name, results_queue, callback, params):
111 113
         for result in search_results:
112 114
             result['engine'] = engine_name
113 115
 
114
-        results_queue.put_nowait((engine_name, search_results))
115
-
116
-        # update stats with current page-load-time
117
-        engines[engine_name].stats['page_load_time'] += search_duration
116
+        result_container.extend(engine_name, search_results)
118 117
 
119 118
     return process_callback
120 119
 
121 120
 
122
-# return the meaningful length of the content for a result
123
-def content_result_len(content):
124
-    if isinstance(content, basestring):
125
-        content = re.sub('[,;:!?\./\\\\ ()-_]', '', content)
126
-        return len(content)
127
-    else:
128
-        return 0
129
-
130
-
131
-# score results and remove duplications
132
-def score_results(results):
133
-    # calculate scoring parameters
134
-    flat_res = filter(
135
-        None, chain.from_iterable(izip_longest(*results.values())))
136
-    flat_len = len(flat_res)
137
-    engines_len = len(results)
138
-
139
-    results = []
140
-
141
-    # pass 1: deduplication + scoring
142
-    for i, res in enumerate(flat_res):
143
-
144
-        res['parsed_url'] = urlparse(res['url'])
145
-
146
-        # if the result has no scheme, use http as default
147
-        if not res['parsed_url'].scheme:
148
-            res['parsed_url'] = res['parsed_url']._replace(scheme="http")
149
-
150
-        res['host'] = res['parsed_url'].netloc
151
-
152
-        if res['host'].startswith('www.'):
153
-            res['host'] = res['host'].replace('www.', '', 1)
154
-
155
-        res['engines'] = [res['engine']]
156
-
157
-        weight = 1.0
158
-
159
-        # strip multiple spaces and cariage returns from content
160
-        if res.get('content'):
161
-            res['content'] = re.sub(' +', ' ',
162
-                                    res['content'].strip().replace('\n', ''))
163
-
164
-        # get weight of this engine if possible
165
-        if hasattr(engines[res['engine']], 'weight'):
166
-            weight = float(engines[res['engine']].weight)
167
-
168
-        # calculate score for that engine
169
-        score = int((flat_len - i) / engines_len) * weight + 1
170
-
171
-        # check for duplicates
172
-        duplicated = False
173
-        for new_res in results:
174
-            # remove / from the end of the url if required
175
-            p1 = res['parsed_url'].path[:-1]\
176
-                if res['parsed_url'].path.endswith('/')\
177
-                else res['parsed_url'].path
178
-            p2 = new_res['parsed_url'].path[:-1]\
179
-                if new_res['parsed_url'].path.endswith('/')\
180
-                else new_res['parsed_url'].path
181
-
182
-            # check if that result is a duplicate
183
-            if res['host'] == new_res['host'] and\
184
-               unquote(p1) == unquote(p2) and\
185
-               res['parsed_url'].query == new_res['parsed_url'].query and\
186
-               res.get('template') == new_res.get('template'):
187
-                duplicated = new_res
188
-                break
189
-
190
-        # merge duplicates together
191
-        if duplicated:
192
-            # using content with more text
193
-            if content_result_len(res.get('content', '')) >\
194
-                    content_result_len(duplicated.get('content', '')):
195
-                duplicated['content'] = res['content']
196
-
197
-            # increase result-score
198
-            duplicated['score'] += score
199
-
200
-            # add engine to list of result-engines
201
-            duplicated['engines'].append(res['engine'])
202
-
203
-            # using https if possible
204
-            if duplicated['parsed_url'].scheme == 'https':
205
-                continue
206
-            elif res['parsed_url'].scheme == 'https':
207
-                duplicated['url'] = res['parsed_url'].geturl()
208
-                duplicated['parsed_url'] = res['parsed_url']
209
-
210
-        # if there is no duplicate found, append result
211
-        else:
212
-            res['score'] = score
213
-
214
-            results.append(res)
215
-
216
-    results = sorted(results, key=itemgetter('score'), reverse=True)
217
-
218
-    # pass 2 : group results by category and template
219
-    gresults = []
220
-    categoryPositions = {}
221
-
222
-    for i, res in enumerate(results):
223
-        # FIXME : handle more than one category per engine
224
-        category = engines[res['engine']].categories[0] + ':' + ''\
225
-            if 'template' not in res\
226
-            else res['template']
227
-
228
-        current = None if category not in categoryPositions\
229
-            else categoryPositions[category]
230
-
231
-        # group with previous results using the same category
232
-        # if the group can accept more result and is not too far
233
-        # from the current position
234
-        if current is not None and (current['count'] > 0)\
235
-                and (len(gresults) - current['index'] < 20):
236
-            # group with the previous results using
237
-            # the same category with this one
238
-            index = current['index']
239
-            gresults.insert(index, res)
240
-
241
-            # update every index after the current one
242
-            # (including the current one)
243
-            for k in categoryPositions:
244
-                v = categoryPositions[k]['index']
245
-                if v >= index:
246
-                    categoryPositions[k]['index'] = v + 1
247
-
248
-            # update this category
249
-            current['count'] -= 1
250
-
251
-        else:
252
-            # same category
253
-            gresults.append(res)
254
-
255
-            # update categoryIndex
256
-            categoryPositions[category] = {'index': len(gresults), 'count': 8}
257
-
258
-    # return gresults
259
-    return gresults
260
-
261
-
262
-def merge_two_infoboxes(infobox1, infobox2):
263
-    if 'urls' in infobox2:
264
-        urls1 = infobox1.get('urls', None)
265
-        if urls1 is None:
266
-            urls1 = []
267
-            infobox1.set('urls', urls1)
268
-
269
-        urlSet = set()
270
-        for url in infobox1.get('urls', []):
271
-            urlSet.add(url.get('url', None))
272
-
273
-        for url in infobox2.get('urls', []):
274
-            if url.get('url', None) not in urlSet:
275
-                urls1.append(url)
276
-
277
-    if 'attributes' in infobox2:
278
-        attributes1 = infobox1.get('attributes', None)
279
-        if attributes1 is None:
280
-            attributes1 = []
281
-            infobox1.set('attributes', attributes1)
282
-
283
-        attributeSet = set()
284
-        for attribute in infobox1.get('attributes', []):
285
-            if attribute.get('label', None) not in attributeSet:
286
-                attributeSet.add(attribute.get('label', None))
287
-
288
-        for attribute in infobox2.get('attributes', []):
289
-            attributes1.append(attribute)
290
-
291
-    if 'content' in infobox2:
292
-        content1 = infobox1.get('content', None)
293
-        content2 = infobox2.get('content', '')
294
-        if content1 is not None:
295
-            if content_result_len(content2) > content_result_len(content1):
296
-                infobox1['content'] = content2
297
-        else:
298
-            infobox1.set('content', content2)
299
-
300
-
301
-def merge_infoboxes(infoboxes):
302
-    results = []
303
-    infoboxes_id = {}
304
-    for infobox in infoboxes:
305
-        add_infobox = True
306
-        infobox_id = infobox.get('id', None)
307
-        if infobox_id is not None:
308
-            existingIndex = infoboxes_id.get(infobox_id, None)
309
-            if existingIndex is not None:
310
-                merge_two_infoboxes(results[existingIndex], infobox)
311
-                add_infobox = False
312
-
313
-        if add_infobox:
314
-            results.append(infobox)
315
-            infoboxes_id[infobox_id] = len(results) - 1
316
-
317
-    return results
318
-
319
-
320 121
 class Search(object):
321 122
 
322 123
     """Search information container"""
@@ -334,10 +135,7 @@ class Search(object):
334 135
         # set blocked engines
335 136
         self.blocked_engines = get_blocked_engines(engines, request.cookies)
336 137
 
337
-        self.results = []
338
-        self.suggestions = set()
339
-        self.answers = set()
340
-        self.infoboxes = []
138
+        self.result_container = ResultContainer()
341 139
         self.request_data = {}
342 140
 
343 141
         # set specific language if set
@@ -449,8 +247,6 @@ class Search(object):
449 247
 
450 248
         # init vars
451 249
         requests = []
452
-        results_queue = Queue()
453
-        results = {}
454 250
 
455 251
         # increase number of searches
456 252
         number_of_searches += 1
@@ -504,9 +300,9 @@ class Search(object):
504 300
             # create a callback wrapper for the search engine results
505 301
             callback = make_callback(
506 302
                 selected_engine['name'],
507
-                results_queue,
508 303
                 engine.response,
509
-                request_params)
304
+                request_params,
305
+                self.result_container)
510 306
 
511 307
             # create dictionary which contain all
512 308
             # informations about the request
@@ -539,42 +335,5 @@ class Search(object):
539 335
         # send all search-request
540 336
         threaded_requests(requests)
541 337
 
542
-        while not results_queue.empty():
543
-            engine_name, engine_results = results_queue.get_nowait()
544
-
545
-            # TODO type checks
546
-            [self.suggestions.add(x['suggestion'])
547
-             for x in list(engine_results)
548
-             if 'suggestion' in x
549
-             and engine_results.remove(x) is None]
550
-
551
-            [self.answers.add(x['answer'])
552
-             for x in list(engine_results)
553
-             if 'answer' in x
554
-             and engine_results.remove(x) is None]
555
-
556
-            self.infoboxes.extend(x for x in list(engine_results)
557
-                                  if 'infobox' in x
558
-                                  and engine_results.remove(x) is None)
559
-
560
-            results[engine_name] = engine_results
561
-
562
-        # update engine-specific stats
563
-        for engine_name, engine_results in results.items():
564
-            engines[engine_name].stats['search_count'] += 1
565
-            engines[engine_name].stats['result_count'] += len(engine_results)
566
-
567
-        # score results and remove duplications
568
-        self.results = score_results(results)
569
-
570
-        # merge infoboxes according to their ids
571
-        self.infoboxes = merge_infoboxes(self.infoboxes)
572
-
573
-        # update engine stats, using calculated score
574
-        for result in self.results:
575
-            for res_engine in result['engines']:
576
-                engines[result['engine']]\
577
-                    .stats['score_count'] += result['score']
578
-
579 338
         # return results, suggestions, answers and infoboxes
580 339
         return self

+ 41
- 0
searx/tests/test_results.py View File

@@ -0,0 +1,41 @@
1
+# -*- coding: utf-8 -*-
2
+
3
+from searx.results import ResultContainer
4
+from searx.testing import SearxTestCase
5
+
6
+
7
+def fake_result(url='https://aa.bb/cc?dd=ee#ff',
8
+                title='aaa',
9
+                content='bbb',
10
+                engine='wikipedia', **kwargs):
11
+    result = {'url': url,
12
+              'title': title,
13
+              'content': content,
14
+              'engine': engine}
15
+    result.update(kwargs)
16
+    return result
17
+
18
+
19
+#  TODO
20
+class ResultContainerTestCase(SearxTestCase):
21
+
22
+    def test_empty(self):
23
+        c = ResultContainer()
24
+        self.assertEqual(c.get_ordered_results(), [])
25
+
26
+    def test_one_result(self):
27
+        c = ResultContainer()
28
+        c.extend('wikipedia', [fake_result()])
29
+        self.assertEqual(c.results_length(), 1)
30
+
31
+    def test_one_suggestion(self):
32
+        c = ResultContainer()
33
+        c.extend('wikipedia', [fake_result(suggestion=True)])
34
+        self.assertEqual(len(c.suggestions), 1)
35
+        self.assertEqual(c.results_length(), 0)
36
+
37
+    def test_result_merge(self):
38
+        c = ResultContainer()
39
+        c.extend('wikipedia', [fake_result()])
40
+        c.extend('wikidata', [fake_result(), fake_result(url='https://example.com/')])
41
+        self.assertEqual(c.results_length(), 2)

+ 4
- 19
searx/tests/test_search.py View File

@@ -1,25 +1,10 @@
1 1
 # -*- coding: utf-8 -*-
2 2
 
3
-from searx.search import score_results
4 3
 from searx.testing import SearxTestCase
5 4
 
6 5
 
7
-def fake_result(url='https://aa.bb/cc?dd=ee#ff',
8
-                title='aaa',
9
-                content='bbb',
10
-                engine='wikipedia'):
11
-    return {'url': url,
12
-            'title': title,
13
-            'content': content,
14
-            'engine': engine}
6
+#  TODO
7
+class SearchTestCase(SearxTestCase):
15 8
 
16
-
17
-class ScoreResultsTestCase(SearxTestCase):
18
-
19
-    def test_empty(self):
20
-        self.assertEqual(score_results(dict()), [])
21
-
22
-    def test_urlparse(self):
23
-        results = score_results(dict(a=[fake_result(url='https://aa.bb/cc?dd=ee#ff')]))
24
-        parsed_url = results[0]['parsed_url']
25
-        self.assertEqual(parsed_url.query, 'dd=ee')
9
+    def test_(self):
10
+        pass

+ 7
- 1
searx/tests/test_webapp.py View File

@@ -1,6 +1,7 @@
1 1
 # -*- coding: utf-8 -*-
2 2
 
3 3
 import json
4
+from mock import Mock
4 5
 from urlparse import ParseResult
5 6
 from searx import webapp
6 7
 from searx.testing import SearxTestCase
@@ -33,7 +34,12 @@ class ViewsTestCase(SearxTestCase):
33 34
         ]
34 35
 
35 36
         def search_mock(search_self, *args):
36
-            search_self.results = self.test_results
37
+            search_self.result_container = Mock(get_ordered_results=lambda: self.test_results,
38
+                                                answers=set(),
39
+                                                suggestions=set(),
40
+                                                infoboxes=[],
41
+                                                results=self.test_results,
42
+                                                results_length=lambda: len(self.test_results))
37 43
 
38 44
         webapp.Search.search = search_mock
39 45
 

+ 14
- 15
searx/webapp.py View File

@@ -383,7 +383,7 @@ def index():
383 383
 
384 384
     plugins.call('post_search', request, locals())
385 385
 
386
-    for result in search.results:
386
+    for result in search.result_container.get_ordered_results():
387 387
 
388 388
         plugins.call('on_result', request, locals())
389 389
         if not search.paging and engines[result['engine']].paging:
@@ -411,7 +411,7 @@ def index():
411 411
                 minutes = int((timedifference.seconds / 60) % 60)
412 412
                 hours = int(timedifference.seconds / 60 / 60)
413 413
                 if hours == 0:
414
-                    result['publishedDate'] = gettext(u'{minutes} minute(s) ago').format(minutes=minutes)  # noqa
414
+                    result['publishedDate'] = gettext(u'{minutes} minute(s) ago').format(minutes=minutes)
415 415
                 else:
416 416
                     result['publishedDate'] = gettext(u'{hours} hour(s), {minutes} minute(s) ago').format(hours=hours, minutes=minutes)  # noqa
417 417
             else:
@@ -419,17 +419,16 @@ def index():
419 419
 
420 420
     if search.request_data.get('format') == 'json':
421 421
         return Response(json.dumps({'query': search.query,
422
-                                    'results': search.results}),
422
+                                    'results': search.result_container.get_ordered_results()}),
423 423
                         mimetype='application/json')
424 424
     elif search.request_data.get('format') == 'csv':
425 425
         csv = UnicodeWriter(cStringIO.StringIO())
426 426
         keys = ('title', 'url', 'content', 'host', 'engine', 'score')
427
-        if search.results:
428
-            csv.writerow(keys)
429
-            for row in search.results:
430
-                row['host'] = row['parsed_url'].netloc
431
-                csv.writerow([row.get(key, '') for key in keys])
432
-            csv.stream.seek(0)
427
+        csv.writerow(keys)
428
+        for row in search.result_container.get_ordered_results():
429
+            row['host'] = row['parsed_url'].netloc
430
+            csv.writerow([row.get(key, '') for key in keys])
431
+        csv.stream.seek(0)
433 432
         response = Response(csv.stream.read(), mimetype='application/csv')
434 433
         cont_disp = 'attachment;Filename=searx_-_{0}.csv'.format(search.query)
435 434
         response.headers.add('Content-Disposition', cont_disp)
@@ -437,24 +436,24 @@ def index():
437 436
     elif search.request_data.get('format') == 'rss':
438 437
         response_rss = render(
439 438
             'opensearch_response_rss.xml',
440
-            results=search.results,
439
+            results=search.result_container.get_ordered_results(),
441 440
             q=search.request_data['q'],
442
-            number_of_results=len(search.results),
441
+            number_of_results=search.result_container.results_length(),
443 442
             base_url=get_base_url()
444 443
         )
445 444
         return Response(response_rss, mimetype='text/xml')
446 445
 
447 446
     return render(
448 447
         'results.html',
449
-        results=search.results,
448
+        results=search.result_container.get_ordered_results(),
450 449
         q=search.request_data['q'],
451 450
         selected_categories=search.categories,
452 451
         paging=search.paging,
453 452
         pageno=search.pageno,
454 453
         base_url=get_base_url(),
455
-        suggestions=search.suggestions,
456
-        answers=search.answers,
457
-        infoboxes=search.infoboxes,
454
+        suggestions=search.result_container.suggestions,
455
+        answers=search.result_container.answers,
456
+        infoboxes=search.result_container.infoboxes,
458 457
         theme=get_current_theme_name(),
459 458
         favicons=global_favicons[themes.index(get_current_theme_name())]
460 459
     )