Explorar el Código

[enh][mod] search refactor

Adam Tauber hace 10 años
padre
commit
b0ba367a1a
Se han modificado 4 ficheros con 170 adiciones y 174 borrados
  1. 1
    162
      searx/engines/__init__.py
  2. 163
    0
      searx/search.py
  3. 4
    4
      searx/tests/test_webapp.py
  4. 2
    8
      searx/webapp.py

+ 1
- 162
searx/engines/__init__.py Ver fichero

@@ -19,19 +19,12 @@ along with searx. If not, see < http://www.gnu.org/licenses/ >.
19 19
 from os.path import realpath, dirname, splitext, join
20 20
 import sys
21 21
 from imp import load_source
22
-from itertools import izip_longest, chain
23
-from operator import itemgetter
24
-from urlparse import urlparse, unquote
25
-from datetime import datetime
26
-import grequests
27 22
 from flask.ext.babel import gettext
23
+from operator import itemgetter
28 24
 from searx import settings
29
-from searx.utils import gen_useragent
30 25
 
31 26
 engine_dir = dirname(realpath(__file__))
32 27
 
33
-number_of_searches = 0
34
-
35 28
 engines = {}
36 29
 
37 30
 categories = {'general': []}
@@ -114,160 +107,6 @@ for engine_data in settings['engines']:
114 107
         engine_shortcuts[engine.shortcut] = engine.name
115 108
 
116 109
 
117
-def default_request_params():
118
-    return {
119
-        'method': 'GET', 'headers': {}, 'data': {}, 'url': '', 'cookies': {}}
120
-
121
-
122
-def make_callback(engine_name, results, suggestions, callback, params):
123
-    # creating a callback wrapper for the search engine results
124
-    def process_callback(response, **kwargs):
125
-        cb_res = []
126
-        response.search_params = params
127
-        engines[engine_name].stats['page_load_time'] += \
128
-            (datetime.now() - params['started']).total_seconds()
129
-        try:
130
-            search_results = callback(response)
131
-        except Exception, e:
132
-            engines[engine_name].stats['errors'] += 1
133
-            results[engine_name] = cb_res
134
-            print '[E] Error with engine "{0}":\n\t{1}'.format(
135
-                engine_name, str(e))
136
-            return
137
-        for result in search_results:
138
-            result['engine'] = engine_name
139
-            if 'suggestion' in result:
140
-                # TODO type checks
141
-                suggestions.add(result['suggestion'])
142
-                continue
143
-            cb_res.append(result)
144
-        results[engine_name] = cb_res
145
-    return process_callback
146
-
147
-
148
-def score_results(results):
149
-    flat_res = filter(
150
-        None, chain.from_iterable(izip_longest(*results.values())))
151
-    flat_len = len(flat_res)
152
-    engines_len = len(results)
153
-    results = []
154
-    # deduplication + scoring
155
-    for i, res in enumerate(flat_res):
156
-
157
-        res['parsed_url'] = urlparse(res['url'])
158
-
159
-        res['host'] = res['parsed_url'].netloc
160
-
161
-        if res['host'].startswith('www.'):
162
-            res['host'] = res['host'].replace('www.', '', 1)
163
-
164
-        res['engines'] = [res['engine']]
165
-        weight = 1.0
166
-
167
-        if hasattr(engines[res['engine']], 'weight'):
168
-            weight = float(engines[res['engine']].weight)
169
-
170
-        score = int((flat_len - i) / engines_len) * weight + 1
171
-        duplicated = False
172
-
173
-        for new_res in results:
174
-            p1 = res['parsed_url'].path[:-1] if res['parsed_url'].path.endswith('/') else res['parsed_url'].path  # noqa
175
-            p2 = new_res['parsed_url'].path[:-1] if new_res['parsed_url'].path.endswith('/') else new_res['parsed_url'].path  # noqa
176
-            if res['host'] == new_res['host'] and\
177
-               unquote(p1) == unquote(p2) and\
178
-               res['parsed_url'].query == new_res['parsed_url'].query and\
179
-               res.get('template') == new_res.get('template'):
180
-                duplicated = new_res
181
-                break
182
-        if duplicated:
183
-            if res.get('content') > duplicated.get('content'):
184
-                duplicated['content'] = res['content']
185
-            duplicated['score'] += score
186
-            duplicated['engines'].append(res['engine'])
187
-            if duplicated['parsed_url'].scheme == 'https':
188
-                continue
189
-            elif res['parsed_url'].scheme == 'https':
190
-                duplicated['url'] = res['parsed_url'].geturl()
191
-                duplicated['parsed_url'] = res['parsed_url']
192
-        else:
193
-            res['score'] = score
194
-            results.append(res)
195
-    return sorted(results, key=itemgetter('score'), reverse=True)
196
-
197
-
198
-def search(query, request, selected_engines, pageno=1, lang='all'):
199
-    global engines, categories, number_of_searches
200
-    requests = []
201
-    results = {}
202
-    suggestions = set()
203
-    number_of_searches += 1
204
-    #user_agent = request.headers.get('User-Agent', '')
205
-    user_agent = gen_useragent()
206
-
207
-    for selected_engine in selected_engines:
208
-        if selected_engine['name'] not in engines:
209
-            continue
210
-
211
-        engine = engines[selected_engine['name']]
212
-
213
-        if pageno > 1 and not engine.paging:
214
-            continue
215
-
216
-        if lang != 'all' and not engine.language_support:
217
-            continue
218
-
219
-        request_params = default_request_params()
220
-        request_params['headers']['User-Agent'] = user_agent
221
-        request_params['category'] = selected_engine['category']
222
-        request_params['started'] = datetime.now()
223
-        request_params['pageno'] = pageno
224
-        request_params['language'] = lang
225
-        request_params = engine.request(query.encode('utf-8'), request_params)
226
-
227
-        if request_params['url'] is None:
228
-            # TODO add support of offline engines
229
-            pass
230
-
231
-        callback = make_callback(
232
-            selected_engine['name'],
233
-            results,
234
-            suggestions,
235
-            engine.response,
236
-            request_params
237
-        )
238
-
239
-        request_args = dict(
240
-            headers=request_params['headers'],
241
-            hooks=dict(response=callback),
242
-            cookies=request_params['cookies'],
243
-            timeout=engine.timeout
244
-        )
245
-
246
-        if request_params['method'] == 'GET':
247
-            req = grequests.get
248
-        else:
249
-            req = grequests.post
250
-            request_args['data'] = request_params['data']
251
-
252
-        # ignoring empty urls
253
-        if not request_params['url']:
254
-            continue
255
-
256
-        requests.append(req(request_params['url'], **request_args))
257
-    grequests.map(requests)
258
-    for engine_name, engine_results in results.items():
259
-        engines[engine_name].stats['search_count'] += 1
260
-        engines[engine_name].stats['result_count'] += len(engine_results)
261
-
262
-    results = score_results(results)
263
-
264
-    for result in results:
265
-        for res_engine in result['engines']:
266
-            engines[result['engine']].stats['score_count'] += result['score']
267
-
268
-    return results, suggestions
269
-
270
-
271 110
 def get_engines_stats():
272 111
     # TODO refactor
273 112
     pageloads = []

+ 163
- 0
searx/search.py Ver fichero

@@ -1,7 +1,96 @@
1
+import grequests
2
+from itertools import izip_longest, chain
3
+from datetime import datetime
4
+from operator import itemgetter
5
+from urlparse import urlparse, unquote
1 6
 from searx.engines import (
2 7
     categories, engines, engine_shortcuts
3 8
 )
4 9
 from searx.languages import language_codes
10
+from searx.utils import gen_useragent
11
+
12
+number_of_searches = 0
13
+
14
+
15
+def default_request_params():
16
+    return {
17
+        'method': 'GET', 'headers': {}, 'data': {}, 'url': '', 'cookies': {}}
18
+
19
+
20
+def make_callback(engine_name, results, suggestions, callback, params):
21
+    # creating a callback wrapper for the search engine results
22
+    def process_callback(response, **kwargs):
23
+        cb_res = []
24
+        response.search_params = params
25
+        engines[engine_name].stats['page_load_time'] += \
26
+            (datetime.now() - params['started']).total_seconds()
27
+        try:
28
+            search_results = callback(response)
29
+        except Exception, e:
30
+            engines[engine_name].stats['errors'] += 1
31
+            results[engine_name] = cb_res
32
+            print '[E] Error with engine "{0}":\n\t{1}'.format(
33
+                engine_name, str(e))
34
+            return
35
+        for result in search_results:
36
+            result['engine'] = engine_name
37
+            if 'suggestion' in result:
38
+                # TODO type checks
39
+                suggestions.add(result['suggestion'])
40
+                continue
41
+            cb_res.append(result)
42
+        results[engine_name] = cb_res
43
+    return process_callback
44
+
45
+
46
+def score_results(results):
47
+    flat_res = filter(
48
+        None, chain.from_iterable(izip_longest(*results.values())))
49
+    flat_len = len(flat_res)
50
+    engines_len = len(results)
51
+    results = []
52
+    # deduplication + scoring
53
+    for i, res in enumerate(flat_res):
54
+
55
+        res['parsed_url'] = urlparse(res['url'])
56
+
57
+        res['host'] = res['parsed_url'].netloc
58
+
59
+        if res['host'].startswith('www.'):
60
+            res['host'] = res['host'].replace('www.', '', 1)
61
+
62
+        res['engines'] = [res['engine']]
63
+        weight = 1.0
64
+
65
+        if hasattr(engines[res['engine']], 'weight'):
66
+            weight = float(engines[res['engine']].weight)
67
+
68
+        score = int((flat_len - i) / engines_len) * weight + 1
69
+        duplicated = False
70
+
71
+        for new_res in results:
72
+            p1 = res['parsed_url'].path[:-1] if res['parsed_url'].path.endswith('/') else res['parsed_url'].path  # noqa
73
+            p2 = new_res['parsed_url'].path[:-1] if new_res['parsed_url'].path.endswith('/') else new_res['parsed_url'].path  # noqa
74
+            if res['host'] == new_res['host'] and\
75
+               unquote(p1) == unquote(p2) and\
76
+               res['parsed_url'].query == new_res['parsed_url'].query and\
77
+               res.get('template') == new_res.get('template'):
78
+                duplicated = new_res
79
+                break
80
+        if duplicated:
81
+            if res.get('content') > duplicated.get('content'):
82
+                duplicated['content'] = res['content']
83
+            duplicated['score'] += score
84
+            duplicated['engines'].append(res['engine'])
85
+            if duplicated['parsed_url'].scheme == 'https':
86
+                continue
87
+            elif res['parsed_url'].scheme == 'https':
88
+                duplicated['url'] = res['parsed_url'].geturl()
89
+                duplicated['parsed_url'] = res['parsed_url']
90
+        else:
91
+            res['score'] = score
92
+            results.append(res)
93
+    return sorted(results, key=itemgetter('score'), reverse=True)
5 94
 
6 95
 
7 96
 class Search(object):
@@ -112,3 +201,77 @@ class Search(object):
112 201
         if modified:
113 202
             self.query = self.query.replace(query_parts[0], '', 1).strip()
114 203
             self.parse_query()
204
+
205
+    def search(self, request):
206
+        global number_of_searches
207
+        requests = []
208
+        results = {}
209
+        suggestions = set()
210
+        number_of_searches += 1
211
+        #user_agent = request.headers.get('User-Agent', '')
212
+        user_agent = gen_useragent()
213
+
214
+        for selected_engine in self.engines:
215
+            if selected_engine['name'] not in engines:
216
+                continue
217
+
218
+            engine = engines[selected_engine['name']]
219
+
220
+            if self.pageno > 1 and not engine.paging:
221
+                continue
222
+
223
+            if self.lang != 'all' and not engine.language_support:
224
+                continue
225
+
226
+            request_params = default_request_params()
227
+            request_params['headers']['User-Agent'] = user_agent
228
+            request_params['category'] = selected_engine['category']
229
+            request_params['started'] = datetime.now()
230
+            request_params['pageno'] = self.pageno
231
+            request_params['language'] = self.lang
232
+            request_params = engine.request(self.query.encode('utf-8'),
233
+                                            request_params)
234
+
235
+            if request_params['url'] is None:
236
+                # TODO add support of offline engines
237
+                pass
238
+
239
+            callback = make_callback(
240
+                selected_engine['name'],
241
+                results,
242
+                suggestions,
243
+                engine.response,
244
+                request_params
245
+            )
246
+
247
+            request_args = dict(
248
+                headers=request_params['headers'],
249
+                hooks=dict(response=callback),
250
+                cookies=request_params['cookies'],
251
+                timeout=engine.timeout
252
+            )
253
+
254
+            if request_params['method'] == 'GET':
255
+                req = grequests.get
256
+            else:
257
+                req = grequests.post
258
+                request_args['data'] = request_params['data']
259
+
260
+            # ignoring empty urls
261
+            if not request_params['url']:
262
+                continue
263
+
264
+            requests.append(req(request_params['url'], **request_args))
265
+        grequests.map(requests)
266
+        for engine_name, engine_results in results.items():
267
+            engines[engine_name].stats['search_count'] += 1
268
+            engines[engine_name].stats['result_count'] += len(engine_results)
269
+
270
+        results = score_results(results)
271
+
272
+        for result in results:
273
+            for res_engine in result['engines']:
274
+                engines[result['engine']]\
275
+                    .stats['score_count'] += result['score']
276
+
277
+        return results, suggestions

+ 4
- 4
searx/tests/test_webapp.py Ver fichero

@@ -39,7 +39,7 @@ class ViewsTestCase(SearxTestCase):
39 39
         self.assertEqual(result.status_code, 200)
40 40
         self.assertIn('<div class="title"><h1>searx</h1></div>', result.data)
41 41
 
42
-    @patch('searx.webapp.do_search')
42
+    @patch('searx.search.Search.search')
43 43
     def test_index_html(self, search):
44 44
         search.return_value = (
45 45
             self.test_results,
@@ -55,7 +55,7 @@ class ViewsTestCase(SearxTestCase):
55 55
             result.data
56 56
         )
57 57
 
58
-    @patch('searx.webapp.do_search')
58
+    @patch('searx.search.Search.search')
59 59
     def test_index_json(self, search):
60 60
         search.return_value = (
61 61
             self.test_results,
@@ -71,7 +71,7 @@ class ViewsTestCase(SearxTestCase):
71 71
         self.assertEqual(
72 72
             result_dict['results'][0]['url'], 'http://first.test.xyz')
73 73
 
74
-    @patch('searx.webapp.do_search')
74
+    @patch('searx.search.Search.search')
75 75
     def test_index_csv(self, search):
76 76
         search.return_value = (
77 77
             self.test_results,
@@ -86,7 +86,7 @@ class ViewsTestCase(SearxTestCase):
86 86
             result.data
87 87
         )
88 88
 
89
-    @patch('searx.webapp.do_search')
89
+    @patch('searx.search.Search.search')
90 90
     def test_index_rss(self, search):
91 91
         search.return_value = (
92 92
             self.test_results,

+ 2
- 8
searx/webapp.py Ver fichero

@@ -39,8 +39,7 @@ from flask import (
39 39
 from flask.ext.babel import Babel, gettext, format_date
40 40
 from searx import settings, searx_dir
41 41
 from searx.engines import (
42
-    search as do_search, categories, engines, get_engines_stats,
43
-    engine_shortcuts
42
+    categories, engines, get_engines_stats, engine_shortcuts
44 43
 )
45 44
 from searx.utils import (
46 45
     UnicodeWriter, highlight_content, html_to_text, get_themes
@@ -191,12 +190,7 @@ def index():
191 190
             'index.html',
192 191
         )
193 192
 
194
-    # TODO moar refactor - do_search integration into Search class
195
-    search.results, search.suggestions = do_search(search.query,
196
-                                                   request,
197
-                                                   search.engines,
198
-                                                   search.pageno,
199
-                                                   search.lang)
193
+    search.results, search.suggestions = search.search(request)
200 194
 
201 195
     for result in search.results:
202 196