瀏覽代碼

[mod] strict timeout handling

Adam Tauber 10 年之前
父節點
當前提交
77c3a27f56
共有 1 個檔案被更改,包括 14 行新增18 行删除
  1. 14
    18
      searx/search.py

+ 14
- 18
searx/search.py 查看文件

@@ -19,7 +19,6 @@ import requests as requests_lib
19 19
 import threading
20 20
 import re
21 21
 from itertools import izip_longest, chain
22
-from datetime import datetime
23 22
 from operator import itemgetter
24 23
 from Queue import Queue
25 24
 from time import time
@@ -39,6 +38,7 @@ def threaded_requests(requests):
39 38
     timeout_limit = max(r[2]['timeout'] for r in requests)
40 39
     search_start = time()
41 40
     for fn, url, request_args in requests:
41
+        request_args['timeout'] = timeout_limit
42 42
         th = threading.Thread(
43 43
             target=fn,
44 44
             args=(url,),
@@ -63,13 +63,7 @@ def default_request_params():
63 63
 
64 64
 
65 65
 # create a callback wrapper for the search engine results
66
-def make_callback(engine_name,
67
-                  results_queue,
68
-                  suggestions,
69
-                  answers,
70
-                  infoboxes,
71
-                  callback,
72
-                  params):
66
+def make_callback(engine_name, results_queue, callback, params):
73 67
 
74 68
     # creating a callback wrapper for the search engine results
75 69
     def process_callback(response, **kwargs):
@@ -87,6 +81,14 @@ def make_callback(engine_name,
87 81
                 engine_name, str(e))
88 82
             return
89 83
 
84
+        timeout_overhead = 0.2  # seconds
85
+        search_duration = time() - params['started']
86
+        timeout_limit = engines[engine_name].timeout + timeout_overhead
87
+        if search_duration > timeout_limit:
88
+            engines[engine_name].stats['page_load_time'] += timeout_limit
89
+            engines[engine_name].stats['errors'] += 1
90
+            return
91
+
90 92
         # add results
91 93
         for result in search_results:
92 94
             result['engine'] = engine_name
@@ -94,8 +96,7 @@ def make_callback(engine_name,
94 96
         results_queue.put_nowait((engine_name, search_results))
95 97
 
96 98
         # update stats with current page-load-time
97
-        engines[engine_name].stats['page_load_time'] += \
98
-            (datetime.now() - params['started']).total_seconds()
99
+        engines[engine_name].stats['page_load_time'] += search_duration
99 100
 
100 101
     return process_callback
101 102
 
@@ -439,14 +440,13 @@ class Search(object):
439 440
             request_params = default_request_params()
440 441
             request_params['headers']['User-Agent'] = user_agent
441 442
             request_params['category'] = selected_engine['category']
442
-            request_params['started'] = datetime.now()
443
+            request_params['started'] = time()
443 444
             request_params['pageno'] = self.pageno
444 445
             request_params['language'] = self.lang
445 446
 
446 447
             # update request parameters dependent on
447 448
             # search-engine (contained in engines folder)
448
-            request_params = engine.request(self.query.encode('utf-8'),
449
-                                            request_params)
449
+            engine.request(self.query.encode('utf-8'), request_params)
450 450
 
451 451
             if request_params['url'] is None:
452 452
                 # TODO add support of offline engines
@@ -456,12 +456,8 @@ class Search(object):
456 456
             callback = make_callback(
457 457
                 selected_engine['name'],
458 458
                 results_queue,
459
-                suggestions,
460
-                answers,
461
-                infoboxes,
462 459
                 engine.response,
463
-                request_params
464
-            )
460
+                request_params)
465 461
 
466 462
             # create dictionary which contain all
467 463
             # informations about the request