123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120
  1. from lxml import html
  2. from urllib import urlencode, unquote
  3. from urlparse import urlparse, urljoin
  4. from lxml.etree import _ElementStringResult, _ElementUnicodeResult
  5. from searx.utils import html_to_text
  6. search_url = None
  7. url_xpath = None
  8. content_xpath = None
  9. title_xpath = None
  10. suggestion_xpath = ''
  11. results_xpath = ''
  12. # parameters for engines with paging support
  13. #
  14. # number of results on each page
  15. # (only needed if the site requires not a page number, but an offset)
  16. page_size = 1
  17. # number of the first page (usually 0 or 1)
  18. first_page_num = 1
  19. '''
  20. if xpath_results is list, extract the text from each result and concat the list
  21. if xpath_results is a xml element, extract all the text node from it
  22. ( text_content() method from lxml )
  23. if xpath_results is a string element, then it's already done
  24. '''
  25. def extract_text(xpath_results):
  26. if type(xpath_results) == list:
  27. # it's list of result : concat everything using recursive call
  28. if not xpath_results:
  29. raise Exception('Empty url resultset')
  30. result = ''
  31. for e in xpath_results:
  32. result = result + extract_text(e)
  33. return result.strip()
  34. elif type(xpath_results) in [_ElementStringResult, _ElementUnicodeResult]:
  35. # it's a string
  36. return ''.join(xpath_results)
  37. else:
  38. # it's a element
  39. return html_to_text(xpath_results.text_content()).strip()
  40. def extract_url(xpath_results, search_url):
  41. url = extract_text(xpath_results)
  42. if url.startswith('//'):
  43. # add http or https to this kind of url //example.com/
  44. parsed_search_url = urlparse(search_url)
  45. url = parsed_search_url.scheme + url
  46. elif url.startswith('/'):
  47. # fix relative url to the search engine
  48. url = urljoin(search_url, url)
  49. # normalize url
  50. url = normalize_url(url)
  51. return url
  52. def normalize_url(url):
  53. parsed_url = urlparse(url)
  54. # add a / at this end of the url if there is no path
  55. if not parsed_url.netloc:
  56. raise Exception('Cannot parse url')
  57. if not parsed_url.path:
  58. url += '/'
  59. # FIXME : hack for yahoo
  60. if parsed_url.hostname == 'search.yahoo.com'\
  61. and parsed_url.path.startswith('/r'):
  62. p = parsed_url.path
  63. mark = p.find('/**')
  64. if mark != -1:
  65. return unquote(p[mark + 3:]).decode('utf-8')
  66. return url
  67. def request(query, params):
  68. query = urlencode({'q': query})[2:]
  69. fp = {'query': query}
  70. if paging and search_url.find('{pageno}') >= 0:
  71. fp['pageno'] = (params['pageno'] + first_page_num - 1) * page_size
  72. params['url'] = search_url.format(**fp)
  73. params['query'] = query
  74. return params
  75. def response(resp):
  76. results = []
  77. dom = html.fromstring(resp.text)
  78. if results_xpath:
  79. for result in dom.xpath(results_xpath):
  80. url = extract_url(result.xpath(url_xpath), search_url)
  81. title = extract_text(result.xpath(title_xpath)[0])
  82. content = extract_text(result.xpath(content_xpath)[0])
  83. results.append({'url': url, 'title': title, 'content': content})
  84. else:
  85. for url, title, content in zip(
  86. (extract_url(x, search_url) for
  87. x in dom.xpath(url_xpath)),
  88. map(extract_text, dom.xpath(title_xpath)),
  89. map(extract_text, dom.xpath(content_xpath))
  90. ):
  91. results.append({'url': url, 'title': title, 'content': content})
  92. if not suggestion_xpath:
  93. return results
  94. for suggestion in dom.xpath(suggestion_xpath):
  95. results.append({'suggestion': extract_text(suggestion)})
  96. return results