search.py 18KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545
  1. '''
  2. searx is free software: you can redistribute it and/or modify
  3. it under the terms of the GNU Affero General Public License as published by
  4. the Free Software Foundation, either version 3 of the License, or
  5. (at your option) any later version.
  6. searx is distributed in the hope that it will be useful,
  7. but WITHOUT ANY WARRANTY; without even the implied warranty of
  8. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  9. GNU Affero General Public License for more details.
  10. You should have received a copy of the GNU Affero General Public License
  11. along with searx. If not, see < http://www.gnu.org/licenses/ >.
  12. (C) 2013- by Adam Tauber, <asciimoo@gmail.com>
  13. '''
  14. import requests as requests_lib
  15. import threading
  16. import re
  17. from itertools import izip_longest, chain
  18. from operator import itemgetter
  19. from Queue import Queue
  20. from time import time
  21. from urlparse import urlparse, unquote
  22. from searx.engines import (
  23. categories, engines
  24. )
  25. from searx.languages import language_codes
  26. from searx.utils import gen_useragent
  27. from searx.query import Query
  28. from searx import logger
  29. logger = logger.getChild('search')
  30. number_of_searches = 0
  31. def search_request_wrapper(fn, url, engine_name, **kwargs):
  32. try:
  33. return fn(url, **kwargs)
  34. except Exception, e:
  35. # increase errors stats
  36. engines[engine_name].stats['errors'] += 1
  37. # print engine name and specific error message
  38. logger.warning('engine crash: {0}\n\t{1}'.format(
  39. engine_name, str(e)))
  40. return
  41. def threaded_requests(requests):
  42. timeout_limit = max(r[2]['timeout'] for r in requests)
  43. search_start = time()
  44. for fn, url, request_args, engine_name in requests:
  45. request_args['timeout'] = timeout_limit
  46. th = threading.Thread(
  47. target=search_request_wrapper,
  48. args=(fn, url, engine_name),
  49. kwargs=request_args,
  50. name='search_request',
  51. )
  52. th._engine_name = engine_name
  53. th.start()
  54. for th in threading.enumerate():
  55. if th.name == 'search_request':
  56. remaining_time = max(0.0, timeout_limit - (time() - search_start))
  57. th.join(remaining_time)
  58. if th.isAlive():
  59. logger.warning('engine timeout: {0}'.format(th._engine_name))
  60. # get default reqest parameter
  61. def default_request_params():
  62. return {
  63. 'method': 'GET',
  64. 'headers': {},
  65. 'data': {},
  66. 'url': '',
  67. 'cookies': {},
  68. 'verify': True
  69. }
  70. # create a callback wrapper for the search engine results
  71. def make_callback(engine_name, results_queue, callback, params):
  72. # creating a callback wrapper for the search engine results
  73. def process_callback(response, **kwargs):
  74. response.search_params = params
  75. timeout_overhead = 0.2 # seconds
  76. search_duration = time() - params['started']
  77. timeout_limit = engines[engine_name].timeout + timeout_overhead
  78. if search_duration > timeout_limit:
  79. engines[engine_name].stats['page_load_time'] += timeout_limit
  80. engines[engine_name].stats['errors'] += 1
  81. return
  82. # callback
  83. search_results = callback(response)
  84. # add results
  85. for result in search_results:
  86. result['engine'] = engine_name
  87. results_queue.put_nowait((engine_name, search_results))
  88. # update stats with current page-load-time
  89. engines[engine_name].stats['page_load_time'] += search_duration
  90. return process_callback
  91. # return the meaningful length of the content for a result
  92. def content_result_len(content):
  93. if isinstance(content, basestring):
  94. content = re.sub('[,;:!?\./\\\\ ()-_]', '', content)
  95. return len(content)
  96. else:
  97. return 0
  98. # score results and remove duplications
  99. def score_results(results):
  100. # calculate scoring parameters
  101. flat_res = filter(
  102. None, chain.from_iterable(izip_longest(*results.values())))
  103. flat_len = len(flat_res)
  104. engines_len = len(results)
  105. results = []
  106. # pass 1: deduplication + scoring
  107. for i, res in enumerate(flat_res):
  108. res['parsed_url'] = urlparse(res['url'])
  109. res['host'] = res['parsed_url'].netloc
  110. if res['host'].startswith('www.'):
  111. res['host'] = res['host'].replace('www.', '', 1)
  112. res['engines'] = [res['engine']]
  113. weight = 1.0
  114. # strip multiple spaces and cariage returns from content
  115. if res.get('content'):
  116. res['content'] = re.sub(' +', ' ',
  117. res['content'].strip().replace('\n', ''))
  118. # get weight of this engine if possible
  119. if hasattr(engines[res['engine']], 'weight'):
  120. weight = float(engines[res['engine']].weight)
  121. # calculate score for that engine
  122. score = int((flat_len - i) / engines_len) * weight + 1
  123. # check for duplicates
  124. duplicated = False
  125. for new_res in results:
  126. # remove / from the end of the url if required
  127. p1 = res['parsed_url'].path[:-1]\
  128. if res['parsed_url'].path.endswith('/')\
  129. else res['parsed_url'].path
  130. p2 = new_res['parsed_url'].path[:-1]\
  131. if new_res['parsed_url'].path.endswith('/')\
  132. else new_res['parsed_url'].path
  133. # check if that result is a duplicate
  134. if res['host'] == new_res['host'] and\
  135. unquote(p1) == unquote(p2) and\
  136. res['parsed_url'].query == new_res['parsed_url'].query and\
  137. res.get('template') == new_res.get('template'):
  138. duplicated = new_res
  139. break
  140. # merge duplicates together
  141. if duplicated:
  142. # using content with more text
  143. if content_result_len(res.get('content', '')) >\
  144. content_result_len(duplicated.get('content', '')):
  145. duplicated['content'] = res['content']
  146. # increase result-score
  147. duplicated['score'] += score
  148. # add engine to list of result-engines
  149. duplicated['engines'].append(res['engine'])
  150. # using https if possible
  151. if duplicated['parsed_url'].scheme == 'https':
  152. continue
  153. elif res['parsed_url'].scheme == 'https':
  154. duplicated['url'] = res['parsed_url'].geturl()
  155. duplicated['parsed_url'] = res['parsed_url']
  156. # if there is no duplicate found, append result
  157. else:
  158. res['score'] = score
  159. results.append(res)
  160. results = sorted(results, key=itemgetter('score'), reverse=True)
  161. # pass 2 : group results by category and template
  162. gresults = []
  163. categoryPositions = {}
  164. for i, res in enumerate(results):
  165. # FIXME : handle more than one category per engine
  166. category = engines[res['engine']].categories[0] + ':' + ''\
  167. if 'template' not in res\
  168. else res['template']
  169. current = None if category not in categoryPositions\
  170. else categoryPositions[category]
  171. # group with previous results using the same category
  172. # if the group can accept more result and is not too far
  173. # from the current position
  174. if current is not None and (current['count'] > 0)\
  175. and (len(gresults) - current['index'] < 20):
  176. # group with the previous results using
  177. # the same category with this one
  178. index = current['index']
  179. gresults.insert(index, res)
  180. # update every index after the current one
  181. # (including the current one)
  182. for k in categoryPositions:
  183. v = categoryPositions[k]['index']
  184. if v >= index:
  185. categoryPositions[k]['index'] = v+1
  186. # update this category
  187. current['count'] -= 1
  188. else:
  189. # same category
  190. gresults.append(res)
  191. # update categoryIndex
  192. categoryPositions[category] = {'index': len(gresults), 'count': 8}
  193. # return gresults
  194. return gresults
  195. def merge_two_infoboxes(infobox1, infobox2):
  196. if 'urls' in infobox2:
  197. urls1 = infobox1.get('urls', None)
  198. if urls1 is None:
  199. urls1 = []
  200. infobox1.set('urls', urls1)
  201. urlSet = set()
  202. for url in infobox1.get('urls', []):
  203. urlSet.add(url.get('url', None))
  204. for url in infobox2.get('urls', []):
  205. if url.get('url', None) not in urlSet:
  206. urls1.append(url)
  207. if 'attributes' in infobox2:
  208. attributes1 = infobox1.get('attributes', None)
  209. if attributes1 is None:
  210. attributes1 = []
  211. infobox1.set('attributes', attributes1)
  212. attributeSet = set()
  213. for attribute in infobox1.get('attributes', []):
  214. if attribute.get('label', None) not in attributeSet:
  215. attributeSet.add(attribute.get('label', None))
  216. for attribute in infobox2.get('attributes', []):
  217. attributes1.append(attribute)
  218. if 'content' in infobox2:
  219. content1 = infobox1.get('content', None)
  220. content2 = infobox2.get('content', '')
  221. if content1 is not None:
  222. if content_result_len(content2) > content_result_len(content1):
  223. infobox1['content'] = content2
  224. else:
  225. infobox1.set('content', content2)
  226. def merge_infoboxes(infoboxes):
  227. results = []
  228. infoboxes_id = {}
  229. for infobox in infoboxes:
  230. add_infobox = True
  231. infobox_id = infobox.get('id', None)
  232. if infobox_id is not None:
  233. existingIndex = infoboxes_id.get(infobox_id, None)
  234. if existingIndex is not None:
  235. merge_two_infoboxes(results[existingIndex], infobox)
  236. add_infobox = False
  237. if add_infobox:
  238. results.append(infobox)
  239. infoboxes_id[infobox_id] = len(results)-1
  240. return results
  241. class Search(object):
  242. """Search information container"""
  243. def __init__(self, request):
  244. # init vars
  245. super(Search, self).__init__()
  246. self.query = None
  247. self.engines = []
  248. self.categories = []
  249. self.paging = False
  250. self.pageno = 1
  251. self.lang = 'all'
  252. # set blocked engines
  253. if request.cookies.get('blocked_engines'):
  254. self.blocked_engines = request.cookies['blocked_engines'].split(',') # noqa
  255. else:
  256. self.blocked_engines = []
  257. self.results = []
  258. self.suggestions = []
  259. self.answers = []
  260. self.infoboxes = []
  261. self.request_data = {}
  262. # set specific language if set
  263. if request.cookies.get('language')\
  264. and request.cookies['language'] in (x[0] for x in language_codes):
  265. self.lang = request.cookies['language']
  266. # set request method
  267. if request.method == 'POST':
  268. self.request_data = request.form
  269. else:
  270. self.request_data = request.args
  271. # TODO better exceptions
  272. if not self.request_data.get('q'):
  273. raise Exception('noquery')
  274. # set pagenumber
  275. pageno_param = self.request_data.get('pageno', '1')
  276. if not pageno_param.isdigit() or int(pageno_param) < 1:
  277. raise Exception('wrong pagenumber')
  278. self.pageno = int(pageno_param)
  279. # parse query, if tags are set, which change
  280. # the serch engine or search-language
  281. query_obj = Query(self.request_data['q'], self.blocked_engines)
  282. query_obj.parse_query()
  283. # set query
  284. self.query = query_obj.getSearchQuery()
  285. # get last selected language in query, if possible
  286. # TODO support search with multible languages
  287. if len(query_obj.languages):
  288. self.lang = query_obj.languages[-1]
  289. self.engines = query_obj.engines
  290. self.categories = []
  291. # if engines are calculated from query,
  292. # set categories by using that informations
  293. if self.engines:
  294. self.categories = list(set(engine['category']
  295. for engine in self.engines))
  296. # otherwise, using defined categories to
  297. # calculate which engines should be used
  298. else:
  299. # set used categories
  300. for pd_name, pd in self.request_data.items():
  301. if pd_name.startswith('category_'):
  302. category = pd_name[9:]
  303. # if category is not found in list, skip
  304. if category not in categories:
  305. continue
  306. # add category to list
  307. self.categories.append(category)
  308. # if no category is specified for this search,
  309. # using user-defined default-configuration which
  310. # (is stored in cookie)
  311. if not self.categories:
  312. cookie_categories = request.cookies.get('categories', '')
  313. cookie_categories = cookie_categories.split(',')
  314. for ccateg in cookie_categories:
  315. if ccateg in categories:
  316. self.categories.append(ccateg)
  317. # if still no category is specified, using general
  318. # as default-category
  319. if not self.categories:
  320. self.categories = ['general']
  321. # using all engines for that search, which are
  322. # declared under the specific categories
  323. for categ in self.categories:
  324. self.engines.extend({'category': categ,
  325. 'name': x.name}
  326. for x in categories[categ]
  327. if x.name not in self.blocked_engines)
  328. # do search-request
  329. def search(self, request):
  330. global number_of_searches
  331. # init vars
  332. requests = []
  333. results_queue = Queue()
  334. results = {}
  335. suggestions = set()
  336. answers = set()
  337. infoboxes = []
  338. # increase number of searches
  339. number_of_searches += 1
  340. # set default useragent
  341. # user_agent = request.headers.get('User-Agent', '')
  342. user_agent = gen_useragent()
  343. # start search-reqest for all selected engines
  344. for selected_engine in self.engines:
  345. if selected_engine['name'] not in engines:
  346. continue
  347. engine = engines[selected_engine['name']]
  348. # if paging is not supported, skip
  349. if self.pageno > 1 and not engine.paging:
  350. continue
  351. # if search-language is set and engine does not
  352. # provide language-support, skip
  353. if self.lang != 'all' and not engine.language_support:
  354. continue
  355. # set default request parameters
  356. request_params = default_request_params()
  357. request_params['headers']['User-Agent'] = user_agent
  358. request_params['category'] = selected_engine['category']
  359. request_params['started'] = time()
  360. request_params['pageno'] = self.pageno
  361. request_params['language'] = self.lang
  362. # update request parameters dependent on
  363. # search-engine (contained in engines folder)
  364. engine.request(self.query.encode('utf-8'), request_params)
  365. if request_params['url'] is None:
  366. # TODO add support of offline engines
  367. pass
  368. # create a callback wrapper for the search engine results
  369. callback = make_callback(
  370. selected_engine['name'],
  371. results_queue,
  372. engine.response,
  373. request_params)
  374. # create dictionary which contain all
  375. # informations about the request
  376. request_args = dict(
  377. headers=request_params['headers'],
  378. hooks=dict(response=callback),
  379. cookies=request_params['cookies'],
  380. timeout=engine.timeout,
  381. verify=request_params['verify']
  382. )
  383. # specific type of request (GET or POST)
  384. if request_params['method'] == 'GET':
  385. req = requests_lib.get
  386. else:
  387. req = requests_lib.post
  388. request_args['data'] = request_params['data']
  389. # ignoring empty urls
  390. if not request_params['url']:
  391. continue
  392. # append request to list
  393. requests.append((req, request_params['url'],
  394. request_args,
  395. selected_engine['name']))
  396. if not requests:
  397. return results, suggestions, answers, infoboxes
  398. # send all search-request
  399. threaded_requests(requests)
  400. while not results_queue.empty():
  401. engine_name, engine_results = results_queue.get_nowait()
  402. # TODO type checks
  403. [suggestions.add(x['suggestion'])
  404. for x in list(engine_results)
  405. if 'suggestion' in x
  406. and engine_results.remove(x) is None]
  407. [answers.add(x['answer'])
  408. for x in list(engine_results)
  409. if 'answer' in x
  410. and engine_results.remove(x) is None]
  411. infoboxes.extend(x for x in list(engine_results)
  412. if 'infobox' in x
  413. and engine_results.remove(x) is None)
  414. results[engine_name] = engine_results
  415. # update engine-specific stats
  416. for engine_name, engine_results in results.items():
  417. engines[engine_name].stats['search_count'] += 1
  418. engines[engine_name].stats['result_count'] += len(engine_results)
  419. # score results and remove duplications
  420. results = score_results(results)
  421. # merge infoboxes according to their ids
  422. infoboxes = merge_infoboxes(infoboxes)
  423. # update engine stats, using calculated score
  424. for result in results:
  425. for res_engine in result['engines']:
  426. engines[result['engine']]\
  427. .stats['score_count'] += result['score']
  428. # return results, suggestions, answers and infoboxes
  429. return results, suggestions, answers, infoboxes