소스 검색

Merge 38b733f92d55e76aded7ce6eccb967221704b1e7 into f82ead3e303d75ba63a370dc038311e172e1330d

Moritan 6 년 전
부모
커밋
349bce03fa
No account linked to committer's email
4개의 변경된 파일776개의 추가작업 그리고 0개의 파일을 삭제
  1. 15
    0
      searx/plugins/__init__.py
  2. 15
    0
      searx/plugins/self_info.py
  3. 194
    0
      searx/rest-server.py
  4. 552
    0
      searx/searchAPI.py

+ 15
- 0
searx/plugins/__init__.py 파일 보기

@@ -76,6 +76,21 @@ class PluginStore():
76 76
 
77 77
         return ret
78 78
 
79
+    def callAPI(self, plugin_type, user_plugins, *args, **kwargs):
80
+        ret = True
81
+        for plugin in user_plugins:
82
+            if hasattr(plugin, plugin_type+'API'):
83
+                ret = getattr(plugin, plugin_type)(*args, **kwargs)
84
+                if not ret:
85
+                    break
86
+            elif hasattr(plugin, plugin_type):
87
+                try:
88
+                    ret = getattr(plugin, plugin_type)('', *args, **kwargs)
89
+                    if not ret:
90
+                        break
91
+                except:
92
+                    ret = True
93
+        return ret
79 94
 
80 95
 plugins = PluginStore()
81 96
 plugins.register(oa_doi_rewrite)

+ 15
- 0
searx/plugins/self_info.py 파일 보기

@@ -44,3 +44,18 @@ def post_search(request, search):
44 44
         search.result_container.answers.clear()
45 45
         search.result_container.answers.add(ua)
46 46
     return True
47
+
48
+
49
+# attach callback to the post search hook
50
+#  request: flask request object
51
+#  ctx: the whole local context of the pre search hook
52
+def post_searchAPI(ctx):
53
+    if ctx['search'].query == 'ip':
54
+        ip = ctx['user_data'].ip
55
+        ctx['search'].answers.clear()
56
+        ctx['search'].answers.add(ip)
57
+    elif p.match(ctx['search'].query):
58
+        ua = ctx['user_data'].ua
59
+        ctx['search'].answers.clear()
60
+        ctx['search'].answers.add(ua)
61
+    return True

+ 194
- 0
searx/rest-server.py 파일 보기

@@ -0,0 +1,194 @@
1
+#!flask/bin/python
2
+from flask import Flask, jsonify, request, make_response
3
+
4
+from searx import settings
5
+from searx.engines import categories, engines
6
+
7
+from searx.searchAPI import Search
8
+
9
+from searx.version import VERSION_STRING
10
+from searx.languages import language_codes
11
+from searx.plugins import plugins
12
+
13
+app = Flask(__name__, static_url_path="/static/themes/ember")
14
+
15
+
16
+@app.errorhandler(400)
17
+def handle400(error):
18
+    return make_response(jsonify({'name': error.name, 'description': error.description}), 400)
19
+
20
+
21
+@app.errorhandler(404)
22
+def handle404(error):
23
+    return make_response(jsonify({'name': error.name, 'description': error.description}), 404)
24
+
25
+
26
+@app.route('/api/v1.0/search', methods=['GET', 'POST'])
27
+def search_task():
28
+    task = dict(query='', selected_categories=['general'], pageno=1, settings=get_default_settings())
29
+
30
+    # task['method'] = request.method
31
+    x_forwarded_for = request.headers.getlist("X-Forwarded-For")
32
+    if x_forwarded_for:
33
+        ip = x_forwarded_for[0]
34
+    else:
35
+        ip = request.remote_addr
36
+
37
+    user_data = {
38
+        'method': request.method,
39
+        'ip': ip,
40
+        'ua': request.user_agent
41
+    }
42
+    task['user_data'] = user_data
43
+
44
+    if 'query' in request.values:
45
+        task['query'] = request.values['query']
46
+    if 'selected_categories' in request.values:
47
+        task['selected_categories'].append(request.values['selected_categories'])
48
+    if 'selected_categories[]' in request.values:
49
+        task['selected_categories'] = request.values.getlist('selected_categories[]')
50
+    if 'pageno' in request.values:
51
+        task['pageno'] = request.values['pageno']
52
+    if 'settings' in request.values:
53
+        task['settings'] = request.values['settings']
54
+
55
+    if not task['query']:
56
+        return make_response(jsonify({'error': 'query empty'}), 500)
57
+
58
+    if not task['pageno'] or int(task['pageno']) < 1:
59
+        return make_response(jsonify({'error': 'wrong pageno'}), 500)
60
+
61
+    try:
62
+        search = Search(task)
63
+    except:
64
+        return make_response(jsonify(dict(error='task ???')), 500)
65
+
66
+    if plugins.callAPI('pre_search', task, locals()):
67
+        search.search(task)
68
+
69
+    plugins.callAPI('post_search', task, locals())
70
+
71
+    return jsonify({'results': search.results,
72
+                    'suggestions': search.suggestions,
73
+                    'answers': search.answers,
74
+                    'infoboxes': search.infoboxes
75
+                    })
76
+
77
+
78
+@app.route('/api/v1.0/settings', methods=['GET'])
79
+def get_settings():
80
+    return jsonify(get_default_settings())
81
+
82
+
83
+def get_default_settings():
84
+    engs = []
85
+    langs = []
86
+    plugs = []
87
+
88
+    for engine in engines.values():
89
+        eng = {
90
+            'name': engine.name,
91
+            'paging': engine.paging,
92
+            'categories': engine.categories,
93
+            'language_support': engine.language_support,
94
+            'safesearch': engine.safesearch,
95
+            'timeout': engine.timeout,
96
+            'shortcut': engine.shortcut,
97
+            'disabled': engine.disabled
98
+        }
99
+        engs.append(eng)
100
+
101
+    for plugin in plugins:
102
+        plug = {
103
+            'name': plugin.name,
104
+            'allow': plugin.default_on,
105
+            'description': plugin.description
106
+        }
107
+        plugs.append(plug)
108
+
109
+    for lang_id, lang_name, country_name in language_codes:
110
+        lang = {
111
+            'id': lang_id,
112
+            'name': lang_name,
113
+            'country_name': country_name
114
+        }
115
+        langs.append(lang)
116
+
117
+    setting = {'engines': engs,
118
+               'default_locale': get_locale(),
119
+               'locales': settings['locales'],
120
+               'all_categories': sorted(categories.keys()),
121
+               'search': settings['search'],
122
+               'image_proxy': settings['server'].get('image_proxy'),
123
+               'plugins': plugs,
124
+               'languages': langs,
125
+               'version': VERSION_STRING}
126
+    return setting
127
+
128
+
129
+def get_locale():
130
+    locale = request.accept_languages.best_match(settings['locales'].keys())
131
+
132
+    if settings['ui'].get('default_locale'):
133
+        locale = settings['ui']['default_locale']
134
+
135
+    if request.cookies.get('locale', '') in settings['locales']:
136
+        locale = request.cookies.get('locale', '')
137
+
138
+    if 'locale' in request.args \
139
+            and request.args['locale'] in settings['locales']:
140
+        locale = request.args['locale']
141
+
142
+    if 'locale' in request.form \
143
+            and request.form['locale'] in settings['locales']:
144
+        locale = request.form['locale']
145
+
146
+    return locale
147
+
148
+
149
+@app.before_request
150
+def option_autoreply():
151
+    """ Always reply 200 on OPTIONS request """
152
+
153
+    if request.method == 'OPTIONS':
154
+        resp = app.make_default_options_response()
155
+
156
+        headers = None
157
+        if 'ACCESS_CONTROL_REQUEST_HEADERS' in request.headers:
158
+            headers = request.headers['ACCESS_CONTROL_REQUEST_HEADERS']
159
+
160
+        h = resp.headers
161
+
162
+        # Allow the origin which made the XHR
163
+        h['Access-Control-Allow-Origin'] = request.headers['Origin']
164
+        # Allow the actual method
165
+        h['Access-Control-Allow-Methods'] = request.headers['Access-Control-Request-Method']
166
+        # Allow for 10 seconds
167
+        h['Access-Control-Max-Age'] = "10"
168
+
169
+        # We also keep current headers
170
+        if headers is not None:
171
+            h['Access-Control-Allow-Headers'] = headers
172
+
173
+        return resp
174
+
175
+
176
+@app.after_request
177
+def set_allow_origin(resp):
178
+    """ Set origin for GET, POST, PUT, DELETE requests """
179
+
180
+    h = resp.headers
181
+
182
+    # Allow crossdomain for other HTTP Verbs
183
+    if request.method != 'OPTIONS' and 'Origin' in request.headers:
184
+        h['Access-Control-Allow-Origin'] = request.headers['Origin']
185
+
186
+    return resp
187
+
188
+if __name__ == '__main__':
189
+    app.run(
190
+        debug=settings['general']['debug'],
191
+        use_debugger=settings['general']['debug'],
192
+        port=settings['server']['port'],
193
+        host=settings['server']['bind_address']
194
+    )

+ 552
- 0
searx/searchAPI.py 파일 보기

@@ -0,0 +1,552 @@
1
+"""
2
+searx is free software: you can redistribute it and/or modify
3
+it under the terms of the GNU Affero General Public License as published by
4
+the Free Software Foundation, either version 3 of the License, or
5
+(at your option) any later version.
6
+
7
+searx is distributed in the hope that it will be useful,
8
+but WITHOUT ANY WARRANTY; without even the implied warranty of
9
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10
+GNU Affero General Public License for more details.
11
+
12
+You should have received a copy of the GNU Affero General Public License
13
+along with searx. If not, see < http://www.gnu.org/licenses/ >.
14
+
15
+(C) 2013- by Adam Tauber, <asciimoo@gmail.com>
16
+"""
17
+
18
+import threading
19
+import re
20
+import searx.poolrequests as requests_lib
21
+from itertools import izip_longest, chain
22
+from operator import itemgetter
23
+from Queue import Queue
24
+from time import time
25
+from urlparse import urlparse, unquote
26
+from searx import settings
27
+from searx.engines import engines
28
+
29
+from searx.utils import gen_useragent, prettify_url, highlight_content, html_to_text
30
+from searx.plugins import plugins
31
+from searx.query import Query
32
+from searx import logger
33
+
34
+logger = logger.getChild('search')
35
+
36
+number_of_searches = 0
37
+
38
+
39
+def search_request_wrapper(fn, url, engine_name, **kwargs):
40
+    try:
41
+        return fn(url, **kwargs)
42
+    except:
43
+        # increase errors stats
44
+        engines[engine_name].stats['errors'] += 1
45
+
46
+        # print engine name and specific error message
47
+        logger.exception('engine crash: {0}'.format(engine_name))
48
+        return
49
+
50
+
51
+def threaded_requests(requests):
52
+    timeout_limit = max(r[2]['timeout'] for r in requests)
53
+    search_start = time()
54
+    for fn, url, request_args, engine_name in requests:
55
+        request_args['timeout'] = timeout_limit
56
+        th = threading.Thread(
57
+            target=search_request_wrapper,
58
+            args=(fn, url, engine_name),
59
+            kwargs=request_args,
60
+            name='search_request',
61
+        )
62
+        th._engine_name = engine_name
63
+        th.start()
64
+
65
+    for th in threading.enumerate():
66
+        if th.name == 'search_request':
67
+            remaining_time = max(0.0, timeout_limit - (time() - search_start))
68
+            th.join(remaining_time)
69
+            if th.isAlive():
70
+                logger.warning('engine timeout: {0}'.format(th._engine_name))
71
+
72
+
73
+# get default reqest parameter
74
+def default_request_params():
75
+    return {
76
+        'method': 'GET',
77
+        'headers': {},
78
+        'data': {},
79
+        'url': '',
80
+        'cookies': {},
81
+        'verify': True
82
+    }
83
+
84
+
85
+# create a callback wrapper for the search engine results
86
+def make_callback(engine_name, results_queue, callback, params):
87
+    # creating a callback wrapper for the search engine results
88
+    def process_callback(response, **kwargs):
89
+        # check if redirect comparing to the True value,
90
+        # because resp can be a Mock object, and any attribut name returns something.
91
+        if response.is_redirect is True:
92
+            logger.debug('{0} redirect on: {1}'.format(engine_name, response))
93
+            return
94
+
95
+        response.search_params = params
96
+
97
+        timeout_overhead = 0.2  # seconds
98
+        search_duration = time() - params['started']
99
+        timeout_limit = engines[engine_name].timeout + timeout_overhead
100
+        if search_duration > timeout_limit:
101
+            engines[engine_name].stats['page_load_time'] += timeout_limit
102
+            engines[engine_name].stats['errors'] += 1
103
+            return
104
+
105
+        # callback
106
+        search_results = callback(response)
107
+
108
+        # add results
109
+        for result in search_results:
110
+            result['engine'] = engine_name
111
+
112
+        results_queue.put_nowait((engine_name, search_results))
113
+
114
+        # update stats with current page-load-time
115
+        engines[engine_name].stats['page_load_time'] += search_duration
116
+
117
+    return process_callback
118
+
119
+
120
+# return the meaningful length of the content for a result
121
+def content_result_len(content):
122
+    if isinstance(content, basestring):
123
+        content = re.sub('[,;:!?\./\\\\ ()-_]', '', content)
124
+        return len(content)
125
+    else:
126
+        return 0
127
+
128
+
129
+# score results and remove duplications
130
+def score_results(results):
131
+    # calculate scoring parameters
132
+    flat_res = filter(
133
+        None, chain.from_iterable(izip_longest(*results.values())))
134
+    flat_len = len(flat_res)
135
+    engines_len = len(results)
136
+
137
+    results = []
138
+
139
+    # pass 1: deduplication + scoring
140
+    for i, res in enumerate(flat_res):
141
+
142
+        res['parsed_url'] = urlparse(res['url'])
143
+
144
+        res['host'] = res['parsed_url'].netloc
145
+
146
+        if res['host'].startswith('www.'):
147
+            res['host'] = res['host'].replace('www.', '', 1)
148
+
149
+        res['engines'] = [res['engine']]
150
+
151
+        weight = 1.0
152
+
153
+        # strip multiple spaces and cariage returns from content
154
+        if res.get('content'):
155
+            res['content'] = re.sub(' +', ' ',
156
+                                    res['content'].strip().replace('\n', ''))
157
+
158
+        # get weight of this engine if possible
159
+        if hasattr(engines[res['engine']], 'weight'):
160
+            weight = float(engines[res['engine']].weight)
161
+
162
+        # calculate score for that engine
163
+        score = int((flat_len - i) / engines_len) * weight + 1
164
+
165
+        # check for duplicates
166
+        duplicated = False
167
+        for new_res in results:
168
+            # remove / from the end of the url if required
169
+            p1 = res['parsed_url'].path[:-1] \
170
+                if res['parsed_url'].path.endswith('/') \
171
+                else res['parsed_url'].path
172
+            p2 = new_res['parsed_url'].path[:-1] \
173
+                if new_res['parsed_url'].path.endswith('/') \
174
+                else new_res['parsed_url'].path
175
+
176
+            # check if that result is a duplicate
177
+            if res['host'] == new_res['host'] and unquote(p1) == unquote(p2) \
178
+                    and res['parsed_url'].query == new_res['parsed_url'].query \
179
+                    and res.get('template') == new_res.get('template'):
180
+                duplicated = new_res
181
+                break
182
+
183
+        # merge duplicates together
184
+        if duplicated:
185
+            # using content with more text
186
+            if content_result_len(res.get('content', '')) > \
187
+                    content_result_len(duplicated.get('content', '')):
188
+                duplicated['content'] = res['content']
189
+
190
+            # increase result-score
191
+            duplicated['score'] += score
192
+
193
+            # add engine to list of result-engines
194
+            duplicated['engines'].append(res['engine'])
195
+
196
+            # using https if possible
197
+            if duplicated['parsed_url'].scheme == 'https':
198
+                continue
199
+            elif res['parsed_url'].scheme == 'https':
200
+                duplicated['url'] = res['parsed_url'].geturl()
201
+                duplicated['parsed_url'] = res['parsed_url']
202
+
203
+        # if there is no duplicate found, append result
204
+        else:
205
+            res['score'] = score
206
+            # if the result has no scheme, use http as default
207
+            if res['parsed_url'].scheme == '':
208
+                res['parsed_url'] = res['parsed_url']._replace(scheme="http")
209
+
210
+            results.append(res)
211
+
212
+    results = sorted(results, key=itemgetter('score'), reverse=True)
213
+
214
+    # pass 2 : group results by category and template
215
+    gresults = []
216
+    categoryPositions = {}
217
+
218
+    for i, res in enumerate(results):
219
+        # FIXME : handle more than one category per engine
220
+        category = engines[res['engine']].categories[0] + ':' + '' \
221
+            if 'template' not in res \
222
+            else res['template']
223
+
224
+        current = None if category not in categoryPositions \
225
+            else categoryPositions[category]
226
+
227
+        # group with previous results using the same category
228
+        # if the group can accept more result and is not too far
229
+        # from the current position
230
+        if current is not None and (current['count'] > 0) \
231
+                and (len(gresults) - current['index'] < 20):
232
+            # group with the previous results using
233
+            # the same category with this one
234
+            index = current['index']
235
+            gresults.insert(index, res)
236
+
237
+            # update every index after the current one
238
+            # (including the current one)
239
+            for k in categoryPositions:
240
+                v = categoryPositions[k]['index']
241
+                if v >= index:
242
+                    categoryPositions[k]['index'] = v + 1
243
+
244
+            # update this category
245
+            current['count'] -= 1
246
+
247
+        else:
248
+            # same category
249
+            gresults.append(res)
250
+
251
+            # update categoryIndex
252
+            categoryPositions[category] = {'index': len(gresults), 'count': 8}
253
+
254
+    # return gresults
255
+    return gresults
256
+
257
+
258
+def merge_two_infoboxes(infobox1, infobox2):
259
+    if 'urls' in infobox2:
260
+        urls1 = infobox1.get('urls', None)
261
+        if urls1 is None:
262
+            urls1 = []
263
+            infobox1.set('urls', urls1)
264
+
265
+        urlSet = set()
266
+        for url in infobox1.get('urls', []):
267
+            urlSet.add(url.get('url', None))
268
+
269
+        for url in infobox2.get('urls', []):
270
+            if url.get('url', None) not in urlSet:
271
+                urls1.append(url)
272
+
273
+    if 'attributes' in infobox2:
274
+        attributes1 = infobox1.get('attributes', None)
275
+        if attributes1 is None:
276
+            attributes1 = []
277
+            infobox1.set('attributes', attributes1)
278
+
279
+        attributeSet = set()
280
+        for attribute in infobox1.get('attributes', []):
281
+            if attribute.get('label', None) not in attributeSet:
282
+                attributeSet.add(attribute.get('label', None))
283
+
284
+        for attribute in infobox2.get('attributes', []):
285
+            attributes1.append(attribute)
286
+
287
+    if 'content' in infobox2:
288
+        content1 = infobox1.get('content', None)
289
+        content2 = infobox2.get('content', '')
290
+        if content1 is not None:
291
+            if content_result_len(content2) > content_result_len(content1):
292
+                infobox1['content'] = content2
293
+        else:
294
+            infobox1.set('content', content2)
295
+
296
+
297
+def merge_infoboxes(infoboxes):
298
+    results = []
299
+    infoboxes_id = {}
300
+    for infobox in infoboxes:
301
+        add_infobox = True
302
+        infobox_id = infobox.get('id', None)
303
+        if infobox_id is not None:
304
+            existing_index = infoboxes_id.get(infobox_id, None)
305
+            if existing_index is not None:
306
+                merge_two_infoboxes(results[existing_index], infobox)
307
+                add_infobox = False
308
+
309
+        if add_infobox:
310
+            results.append(infobox)
311
+            infoboxes_id[infobox_id] = len(results) - 1
312
+
313
+    return results
314
+
315
+
316
+class Search(object):
317
+    """Search information container"""
318
+
319
+    def __init__(self, task):
320
+        # init vars
321
+        #  super(SearchAPI, self).__init__()
322
+        self.query = None
323
+        self.engines = []
324
+        self.plugins = []
325
+        self.categories = []
326
+        self.paging = False
327
+        self.pageno = 1
328
+        self.lang = 'all'
329
+
330
+        # set blocked engines
331
+        self.blocked_engines = []  # get_blocked_engines(engines, request.cookies)
332
+
333
+        self.results = []
334
+        self.suggestions = list()
335
+        self.answers = list()
336
+        self.infoboxes = []
337
+        self.request_data = {}
338
+
339
+        # set specific language if set
340
+        if 'language' in task['settings']:
341
+            self.lang = task['settings']['language']
342
+
343
+        if 'plugins' in task['settings']:
344
+            for plugin in task['settings']['plugins']:
345
+                if plugin['allow']:
346
+                    self.plugins.append(plugin)
347
+
348
+        if task['pageno']:
349
+            self.pageno = int(task['pageno'])
350
+
351
+        # parse query, if tags are set, which change
352
+        # the search engine or search-language
353
+        query_obj = Query(str(task['query']), self.blocked_engines)
354
+        query_obj.parse_query()
355
+
356
+        # set query
357
+        self.query = query_obj.getSearchQuery()
358
+
359
+        # get last selected language in query, if possible
360
+        # TODO support search with multible languages
361
+        if len(query_obj.languages):
362
+            self.lang = query_obj.languages[-1]
363
+
364
+        self.engines = query_obj.engines
365
+
366
+        self.categories = []
367
+
368
+        # if engines are calculated from query,
369
+        # set categories by using that informations
370
+        if self.engines and query_obj.specific:
371
+            self.categories = list(set(engine['category']
372
+                                       for engine in self.engines))
373
+
374
+        # otherwise, using defined categories to
375
+        # calculate which engines should be used
376
+        else:
377
+            if 'selected_categories' in task and task['selected_categories']:
378
+                self.categories = task['selected_categories']
379
+
380
+            # if still no category is specified, using general
381
+            # as default-category
382
+            if not self.categories:
383
+                self.categories = ['general']
384
+
385
+            # set categories/engines
386
+            # load_default_categories = True
387
+            for engine in task['settings']['engines']:
388
+                if not engine['disabled']:
389
+                    for categ in engine['categories']:
390
+                        if categ in self.categories:
391
+                            self.engines.append({'category': categ,
392
+                                                 'name': engine['name']})
393
+
394
+    # do search-request
395
+    def search(self, task):
396
+        global number_of_searches
397
+
398
+        # init vars
399
+        requests = []
400
+        results_queue = Queue()
401
+        results = {}
402
+
403
+        # increase number of searches
404
+        number_of_searches += 1
405
+
406
+        # set default useragent
407
+        # user_agent = request.headers.get('User-Agent', '')
408
+        user_agent = gen_useragent()
409
+
410
+        # start search-reqest for all selected engines
411
+        for selected_engine in self.engines:
412
+            if selected_engine['name'] not in engines:
413
+                continue
414
+
415
+            engine = engines[selected_engine['name']]
416
+
417
+            # if paging is not supported, skip
418
+            if self.pageno > 1 and not engine.paging:
419
+                continue
420
+
421
+            # if search-language is set and engine does not
422
+            # provide language-support, skip
423
+            if self.lang != 'all' and not engine.language_support:
424
+                continue
425
+
426
+            # set default request parameters
427
+            request_params = default_request_params()
428
+            request_params['headers']['User-Agent'] = user_agent
429
+            request_params['category'] = selected_engine['category']
430
+            request_params['started'] = time()
431
+            request_params['pageno'] = self.pageno
432
+
433
+            if hasattr(engine, 'language') and engine.language:
434
+                request_params['language'] = engine.language
435
+            else:
436
+                request_params['language'] = self.lang
437
+
438
+                # try:
439
+                # 0 = None, 1 = Moderate, 2 = Strict
440
+                # request_params['safesearch'] = int(request.cookies.get('safesearch'))
441
+                # except Exception:
442
+            request_params['safesearch'] = settings['search']['safe_search']
443
+
444
+            # update request parameters dependent on
445
+            # search-engine (contained in engines folder)
446
+            engine.request(task['query'].encode('utf-8'), request_params)
447
+
448
+            # update request parameters dependent on
449
+            # search-engine (contained in engines folder)
450
+            if request_params['url'] is None:
451
+                # TODO add support of offline engines
452
+                pass
453
+
454
+            # create a callback wrapper for the search engine results
455
+            callback = make_callback(
456
+                selected_engine['name'],
457
+                results_queue,
458
+                engine.response,
459
+                request_params)
460
+
461
+            # create dictionary which contain all
462
+            # informations about the request
463
+            request_args = dict(
464
+                headers=request_params['headers'],
465
+                hooks=dict(response=callback),
466
+                cookies=request_params['cookies'],
467
+                timeout=engine.timeout,
468
+                verify=request_params['verify']
469
+            )
470
+
471
+            # specific type of request (GET or POST)
472
+            if request_params['method'] == 'GET':
473
+                req = requests_lib.get
474
+            else:
475
+                req = requests_lib.post
476
+                request_args['data'] = request_params['data']
477
+
478
+            # ignoring empty urls
479
+            if not request_params['url']:
480
+                continue
481
+
482
+            # append request to list
483
+            requests.append((req, request_params['url'],
484
+                             request_args,
485
+                             selected_engine['name']))
486
+
487
+        if not requests:
488
+            return self
489
+        # send all search-request
490
+        threaded_requests(requests)
491
+
492
+        while not results_queue.empty():
493
+            engine_name, engine_results = results_queue.get_nowait()
494
+
495
+            # TODO type checks
496
+            [self.suggestions.append(x['suggestion'])
497
+             for x in list(engine_results)
498
+             if 'suggestion' in x
499
+             and engine_results.remove(x) is None]
500
+
501
+            [self.answers.append(x['answer'])
502
+             for x in list(engine_results)
503
+             if 'answer' in x
504
+             and engine_results.remove(x) is None]
505
+
506
+            self.infoboxes.extend(x for x in list(engine_results)
507
+                                  if 'infobox' in x
508
+                                  and engine_results.remove(x) is None)
509
+
510
+            results[engine_name] = engine_results
511
+
512
+        # update engine-specific stats
513
+        for engine_name, engine_results in results.items():
514
+            engines[engine_name].stats['search_count'] += 1
515
+            engines[engine_name].stats['result_count'] += len(engine_results)
516
+
517
+        # score results and remove duplications
518
+        self.results = score_results(results)
519
+
520
+        # merge infoboxes according to their ids
521
+        self.infoboxes = merge_infoboxes(self.infoboxes)
522
+
523
+        # update engine stats, using calculated score
524
+        for result in self.results:
525
+            plugins.callAPI('on_result', self.plugins, locals())
526
+
527
+            for res_engine in result['engines']:
528
+                engines[result['engine']] \
529
+                    .stats['score_count'] += result['score']
530
+
531
+            result['pretty_url'] = prettify_url(result['url'])
532
+
533
+            # TODO, check if timezone is calculated right
534
+            if 'publishedDate' in result:
535
+                result['pubdate'] = result['publishedDate'].strftime('%Y-%m-%d %H:%M:%S%z')
536
+
537
+            if not self.paging and engines[result['engine']].paging:
538
+                self.paging = True
539
+
540
+            if 'content' in result:
541
+                result['content_html'] = highlight_content(result['content'],
542
+                                                           self.query.encode('utf-8'))  # noqa
543
+            result['title_html'] = highlight_content(result['title'],
544
+                                                     self.query.encode('utf-8'))
545
+
546
+            if result.get('content'):
547
+                result['content'] = html_to_text(result['content']).strip()
548
+            # removing html content and whitespace duplications
549
+            result['title'] = ' '.join(html_to_text(result['title']).strip().split())
550
+
551
+            # return results, suggestions, answers and infoboxes
552
+        return self