瀏覽代碼

Merge pull request #89 from pointhi/engines

update search engines and add comments to it
Adam Tauber 10 年之前
父節點
當前提交
f825752145

+ 1
- 0
searx/engines/bing.py 查看文件

@@ -23,6 +23,7 @@ language_support = True
23 23
 base_url = 'https://www.bing.com/'
24 24
 search_string = 'search?{query}&first={offset}'
25 25
 
26
+
26 27
 # do search-request
27 28
 def request(query, params):
28 29
     offset = (params['pageno'] - 1) * 10 + 1

+ 1
- 0
searx/engines/bing_images.py 查看文件

@@ -24,6 +24,7 @@ paging = True
24 24
 base_url = 'https://www.bing.com/'
25 25
 search_string = 'images/search?{query}&count=10&first={offset}'
26 26
 
27
+
27 28
 # do search-request
28 29
 def request(query, params):
29 30
     offset = (params['pageno'] - 1) * 10 + 1

+ 1
- 0
searx/engines/bing_news.py 查看文件

@@ -24,6 +24,7 @@ language_support = True
24 24
 base_url = 'https://www.bing.com/'
25 25
 search_string = 'news/search?{query}&first={offset}'
26 26
 
27
+
27 28
 # do search-request
28 29
 def request(query, params):
29 30
     offset = (params['pageno'] - 1) * 10 + 1

+ 29
- 3
searx/engines/deviantart.py 查看文件

@@ -1,35 +1,61 @@
1
+## Deviantart (Images)
2
+# 
3
+# @website     https://www.deviantart.com/
4
+# @provide-api yes (https://www.deviantart.com/developers/) (RSS)
5
+# 
6
+# @using-api   no (TODO, rewrite to api)
7
+# @results     HTML
8
+# @stable      no (HTML can change)
9
+# @parse       url, title, thumbnail, img_src
10
+#
11
+# @todo        rewrite to api
12
+
1 13
 from urllib import urlencode
2 14
 from urlparse import urljoin
3 15
 from lxml import html
4 16
 
17
+# engine dependent config
5 18
 categories = ['images']
19
+paging = True
6 20
 
21
+# search-url
7 22
 base_url = 'https://www.deviantart.com/'
8 23
 search_url = base_url+'search?offset={offset}&{query}'
9 24
 
10
-paging = True
11
-
12 25
 
26
+# do search-request
13 27
 def request(query, params):
14 28
     offset = (params['pageno'] - 1) * 24
29
+
15 30
     params['url'] = search_url.format(offset=offset,
16 31
                                       query=urlencode({'q': query}))
32
+
17 33
     return params
18 34
 
19 35
 
36
+# get response from search-request
20 37
 def response(resp):
21 38
     results = []
39
+
40
+    # return empty array if a redirection code is returned
22 41
     if resp.status_code == 302:
23
-        return results
42
+        return []
43
+
24 44
     dom = html.fromstring(resp.text)
45
+
46
+    # parse results
25 47
     for result in dom.xpath('//div[contains(@class, "tt-a tt-fh")]'):
26 48
         link = result.xpath('.//a[contains(@class, "thumb")]')[0]
27 49
         url = urljoin(base_url, link.attrib.get('href'))
28 50
         title_links = result.xpath('.//span[@class="details"]//a[contains(@class, "t")]')  # noqa
29 51
         title = ''.join(title_links[0].xpath('.//text()'))
30 52
         img_src = link.xpath('.//img')[0].attrib['src']
53
+
54
+        # append result
31 55
         results.append({'url': url,
32 56
                         'title': title,
33 57
                         'img_src': img_src,
34 58
                         'template': 'images.html'})
59
+
60
+    # return results
35 61
     return results

+ 38
- 34
searx/engines/duckduckgo.py 查看文件

@@ -1,65 +1,69 @@
1
+## DuckDuckGo (Web)
2
+# 
3
+# @website     https://duckduckgo.com/
4
+# @provide-api yes (https://duckduckgo.com/api), but not all results from search-site
5
+# 
6
+# @using-api   no
7
+# @results     HTML (using search portal)
8
+# @stable      no (HTML can change)
9
+# @parse       url, title, content
10
+#
11
+# @todo        rewrite to api
12
+# @todo        language support (the current used site does not support language-change)
13
+
1 14
 from urllib import urlencode
2 15
 from lxml.html import fromstring
3 16
 from searx.utils import html_to_text
4 17
 
5
-url = 'https://duckduckgo.com/html?{query}&s={offset}'
18
+# engine dependent config
19
+categories = ['general']
20
+paging = True
6 21
 locale = 'us-en'
7 22
 
23
+# search-url
24
+url = 'https://duckduckgo.com/html?{query}&s={offset}'
25
+
26
+# specific xpath variables
27
+result_xpath = '//div[@class="results_links results_links_deep web-result"]'  # noqa
28
+url_xpath = './/a[@class="large"]/@href'
29
+title_xpath = './/a[@class="large"]//text()'
30
+content_xpath = './/div[@class="snippet"]//text()'
8 31
 
32
+
33
+# do search-request
9 34
 def request(query, params):
10 35
     offset = (params['pageno'] - 1) * 30
11
-    q = urlencode({'q': query,
12
-                   'l': locale})
13
-    params['url'] = url.format(query=q, offset=offset)
36
+
37
+    params['url'] = url.format(
38
+        query=urlencode({'q': query, 'l': locale}),
39
+        offset=offset)
40
+
14 41
     return params
15 42
 
16 43
 
44
+# get response from search-request
17 45
 def response(resp):
18
-    result_xpath = '//div[@class="results_links results_links_deep web-result"]'  # noqa
19
-    url_xpath = './/a[@class="large"]/@href'
20
-    title_xpath = './/a[@class="large"]//text()'
21
-    content_xpath = './/div[@class="snippet"]//text()'
22 46
     results = []
23 47
 
24 48
     doc = fromstring(resp.text)
25 49
 
50
+    # parse results
26 51
     for r in doc.xpath(result_xpath):
27 52
         try:
28 53
             res_url = r.xpath(url_xpath)[-1]
29 54
         except:
30 55
             continue
56
+
31 57
         if not res_url:
32 58
             continue
59
+
33 60
         title = html_to_text(''.join(r.xpath(title_xpath)))
34 61
         content = html_to_text(''.join(r.xpath(content_xpath)))
62
+
63
+        # append result
35 64
         results.append({'title': title,
36 65
                         'content': content,
37 66
                         'url': res_url})
38 67
 
68
+    # return results
39 69
     return results
40
-
41
-
42
-#from json import loads
43
-#search_url = url + 'd.js?{query}&p=1&s={offset}'
44
-#
45
-#paging = True
46
-#
47
-#
48
-#def request(query, params):
49
-#    offset = (params['pageno'] - 1) * 30
50
-#    q = urlencode({'q': query,
51
-#                   'l': locale})
52
-#    params['url'] = search_url.format(query=q, offset=offset)
53
-#    return params
54
-#
55
-#
56
-#def response(resp):
57
-#    results = []
58
-#    search_res = loads(resp.text[resp.text.find('[{'):-2])[:-1]
59
-#    for r in search_res:
60
-#        if not r.get('t'):
61
-#            continue
62
-#        results.append({'title': r['t'],
63
-#                       'content': html_to_text(r['a']),
64
-#                       'url': r['u']})
65
-#    return results

+ 8
- 0
searx/engines/dummy.py 查看文件

@@ -1,6 +1,14 @@
1
+## Dummy
2
+# 
3
+# @results     empty array
4
+# @stable      yes
5
+
6
+
7
+# do search-request
1 8
 def request(query, params):
2 9
     return params
3 10
 
4 11
 
12
+# get response from search-request
5 13
 def response(resp):
6 14
     return []

+ 28
- 3
searx/engines/generalfile.py 查看文件

@@ -1,35 +1,60 @@
1
+## General Files (Files)
2
+# 
3
+# @website     http://www.general-files.org
4
+# @provide-api no (nothing found)
5
+# 
6
+# @using-api   no (because nothing found)
7
+# @results     HTML (using search portal)
8
+# @stable      no (HTML can change)
9
+# @parse       url, title, content
10
+#
11
+# @todo        detect torrents?
12
+
1 13
 from lxml import html
2 14
 
15
+# engine dependent config
16
+categories = ['files']
17
+paging = True
3 18
 
19
+# search-url
4 20
 base_url = 'http://www.general-file.com'
5 21
 search_url = base_url + '/files-{letter}/{query}/{pageno}'
6 22
 
23
+# specific xpath variables
7 24
 result_xpath = '//table[@class="block-file"]'
8 25
 title_xpath = './/h2/a//text()'
9 26
 url_xpath = './/h2/a/@href'
10 27
 content_xpath = './/p//text()'
11 28
 
12
-paging = True
13
-
14 29
 
30
+# do search-request
15 31
 def request(query, params):
32
+
16 33
     params['url'] = search_url.format(query=query,
17 34
                                       letter=query[0],
18 35
                                       pageno=params['pageno'])
36
+
19 37
     return params
20 38
 
21 39
 
40
+# get response from search-request
22 41
 def response(resp):
23
-
24 42
     results = []
43
+
25 44
     dom = html.fromstring(resp.text)
45
+
46
+    # parse results
26 47
     for result in dom.xpath(result_xpath):
27 48
         url = result.xpath(url_xpath)[0]
49
+
28 50
         # skip fast download links
29 51
         if not url.startswith('/'):
30 52
             continue
53
+
54
+        # append result
31 55
         results.append({'url': base_url + url,
32 56
                         'title': ''.join(result.xpath(title_xpath)),
33 57
                         'content': ''.join(result.xpath(content_xpath))})
34 58
 
59
+    # return results
35 60
     return results

+ 30
- 2
searx/engines/github.py 查看文件

@@ -1,31 +1,59 @@
1
+## Github (It)
2
+# 
3
+# @website     https://github.com/
4
+# @provide-api yes (https://developer.github.com/v3/)
5
+# 
6
+# @using-api   yes
7
+# @results     JSON
8
+# @stable      yes (using api)
9
+# @parse       url, title, content
10
+
1 11
 from urllib import urlencode
2 12
 from json import loads
3 13
 from cgi import escape
4 14
 
15
+# engine dependent config
5 16
 categories = ['it']
6 17
 
18
+# search-url
7 19
 search_url = 'https://api.github.com/search/repositories?sort=stars&order=desc&{query}'  # noqa
8 20
 
9 21
 accept_header = 'application/vnd.github.preview.text-match+json'
10 22
 
11 23
 
24
+# do search-request
12 25
 def request(query, params):
13 26
     params['url'] = search_url.format(query=urlencode({'q': query}))
27
+
14 28
     params['headers']['Accept'] = accept_header
29
+
15 30
     return params
16 31
 
17 32
 
33
+# get response from search-request
18 34
 def response(resp):
19 35
     results = []
36
+
20 37
     search_res = loads(resp.text)
38
+
39
+    # check if items are recieved
21 40
     if not 'items' in search_res:
22
-        return results
41
+        return []
42
+
43
+    # parse results
23 44
     for res in search_res['items']:
24 45
         title = res['name']
25 46
         url = res['html_url']
47
+
26 48
         if res['description']:
27 49
             content = escape(res['description'][:500])
28 50
         else:
29 51
             content = ''
30
-        results.append({'url': url, 'title': title, 'content': content})
52
+
53
+        # append result
54
+        results.append({'url': url,
55
+                        'title': title,
56
+                        'content': content})
57
+
58
+    # return results
31 59
     return results

+ 34
- 7
searx/engines/piratebay.py 查看文件

@@ -1,39 +1,61 @@
1
+## Piratebay (Videos, Music, Files)
2
+# 
3
+# @website     https://thepiratebay.se
4
+# @provide-api no (nothing found)
5
+# 
6
+# @using-api   no
7
+# @results     HTML (using search portal)
8
+# @stable      yes (HTML can change)
9
+# @parse       url, title, content, seed, leech, magnetlink
10
+
1 11
 from urlparse import urljoin
2 12
 from cgi import escape
3 13
 from urllib import quote
4 14
 from lxml import html
5 15
 from operator import itemgetter
6 16
 
7
-categories = ['videos', 'music']
17
+# engine dependent config
18
+categories = ['videos', 'music', 'files']
19
+paging = True
8 20
 
21
+# search-url
9 22
 url = 'https://thepiratebay.se/'
10 23
 search_url = url + 'search/{search_term}/{pageno}/99/{search_type}'
11
-search_types = {'videos': '200',
24
+
25
+# piratebay specific type-definitions
26
+search_types = {'files': '0',                
12 27
                 'music': '100',
13
-                'files': '0'}
28
+                'videos': '200'}
14 29
 
30
+# specific xpath variables
15 31
 magnet_xpath = './/a[@title="Download this torrent using magnet"]'
16 32
 content_xpath = './/font[@class="detDesc"]//text()'
17 33
 
18
-paging = True
19
-
20 34
 
35
+# do search-request
21 36
 def request(query, params):
22
-    search_type = search_types.get(params['category'], '200')
37
+    search_type = search_types.get(params['category'], '0')
38
+
23 39
     params['url'] = search_url.format(search_term=quote(query),
24 40
                                       search_type=search_type,
25 41
                                       pageno=params['pageno'] - 1)
42
+
26 43
     return params
27 44
 
28 45
 
46
+# get response from search-request
29 47
 def response(resp):
30 48
     results = []
49
+
31 50
     dom = html.fromstring(resp.text)
51
+
32 52
     search_res = dom.xpath('//table[@id="searchResult"]//tr')
33 53
 
54
+    # return empty array if nothing is found
34 55
     if not search_res:
35
-        return results
56
+        return []
36 57
 
58
+    # parse results
37 59
     for result in search_res[1:]:
38 60
         link = result.xpath('.//div[@class="detName"]//a')[0]
39 61
         href = urljoin(url, link.attrib.get('href'))
@@ -41,17 +63,21 @@ def response(resp):
41 63
         content = escape(' '.join(result.xpath(content_xpath)))
42 64
         seed, leech = result.xpath('.//td[@align="right"]/text()')[:2]
43 65
 
66
+        # convert seed to int if possible
44 67
         if seed.isdigit():
45 68
             seed = int(seed)
46 69
         else:
47 70
             seed = 0
48 71
 
72
+        # convert leech to int if possible
49 73
         if leech.isdigit():
50 74
             leech = int(leech)
51 75
         else:
52 76
             leech = 0
53 77
 
54 78
         magnetlink = result.xpath(magnet_xpath)[0]
79
+
80
+        # append result
55 81
         results.append({'url': href,
56 82
                         'title': title,
57 83
                         'content': content,
@@ -60,4 +86,5 @@ def response(resp):
60 86
                         'magnetlink': magnetlink.attrib['href'],
61 87
                         'template': 'torrent.html'})
62 88
 
89
+    # return results sorted by seeder
63 90
     return sorted(results, key=itemgetter('seed'), reverse=True)

+ 29
- 4
searx/engines/soundcloud.py 查看文件

@@ -1,30 +1,55 @@
1
+## Soundcloud (Music)
2
+# 
3
+# @website     https://soundcloud.com
4
+# @provide-api yes (https://developers.soundcloud.com/)
5
+# 
6
+# @using-api   yes
7
+# @results     JSON
8
+# @stable      yes
9
+# @parse       url, title, content
10
+
1 11
 from json import loads
2 12
 from urllib import urlencode
3 13
 
14
+# engine dependent config
4 15
 categories = ['music']
16
+paging = True
5 17
 
18
+# api-key
6 19
 guest_client_id = 'b45b1aa10f1ac2941910a7f0d10f8e28'
7
-url = 'https://api.soundcloud.com/'
8
-search_url = url + 'search?{query}&facet=model&limit=20&offset={offset}&linked_partitioning=1&client_id='+guest_client_id  # noqa
9 20
 
10
-paging = True
21
+# search-url
22
+url = 'https://api.soundcloud.com/'
23
+search_url = url + 'search?{query}&facet=model&limit=20&offset={offset}&linked_partitioning=1&client_id={client_id}'
11 24
 
12 25
 
26
+# do search-request
13 27
 def request(query, params):
14 28
     offset = (params['pageno'] - 1) * 20
29
+
15 30
     params['url'] = search_url.format(query=urlencode({'q': query}),
16
-                                      offset=offset)
31
+                                      offset=offset,
32
+                                      client_id=guest_client_id)
33
+
17 34
     return params
18 35
 
19 36
 
37
+# get response from search-request
20 38
 def response(resp):
21 39
     results = []
40
+
22 41
     search_res = loads(resp.text)
42
+
43
+    # parse results
23 44
     for result in search_res.get('collection', []):
24 45
         if result['kind'] in ('track', 'playlist'):
25 46
             title = result['title']
26 47
             content = result['description']
48
+
49
+            # append result
27 50
             results.append({'url': result['permalink_url'],
28 51
                             'title': title,
29 52
                             'content': content})
53
+
54
+    # return results
30 55
     return results

+ 35
- 7
searx/engines/stackoverflow.py 查看文件

@@ -1,30 +1,58 @@
1
+## Stackoverflow (It)
2
+# 
3
+# @website     https://stackoverflow.com/
4
+# @provide-api not clear (https://api.stackexchange.com/docs/advanced-search)
5
+# 
6
+# @using-api   no
7
+# @results     HTML
8
+# @stable      no (HTML can change)
9
+# @parse       url, title, content
10
+
1 11
 from urlparse import urljoin
2 12
 from cgi import escape
3 13
 from urllib import urlencode
4 14
 from lxml import html
5 15
 
16
+# engine dependent config
6 17
 categories = ['it']
18
+paging = True
7 19
 
20
+# search-url
8 21
 url = 'http://stackoverflow.com/'
9 22
 search_url = url+'search?{query}&page={pageno}'
10
-result_xpath = './/div[@class="excerpt"]//text()'
11 23
 
12
-paging = True
24
+# specific xpath variables
25
+results_xpath = '//div[contains(@class,"question-summary")]'
26
+link_xpath = './/div[@class="result-link"]//a|.//div[@class="summary"]//h3//a'
27
+title_xpath = './/text()'
28
+content_xpath = './/div[@class="excerpt"]//text()'
13 29
 
14 30
 
31
+# do search-request
15 32
 def request(query, params):
16 33
     params['url'] = search_url.format(query=urlencode({'q': query}),
17 34
                                       pageno=params['pageno'])
35
+
18 36
     return params
19 37
 
20 38
 
39
+# get response from search-request
21 40
 def response(resp):
22 41
     results = []
42
+
23 43
     dom = html.fromstring(resp.text)
24
-    for result in dom.xpath('//div[@class="question-summary search-result"]'):
25
-        link = result.xpath('.//div[@class="result-link"]//a')[0]
44
+
45
+    # parse results
46
+    for result in dom.xpath(results_xpath):
47
+        link = result.xpath(link_xpath)[0]
26 48
         href = urljoin(url, link.attrib.get('href'))
27
-        title = escape(' '.join(link.xpath('.//text()')))
28
-        content = escape(' '.join(result.xpath(result_xpath)))
29
-        results.append({'url': href, 'title': title, 'content': content})
49
+        title = escape(' '.join(link.xpath(title_xpath)))
50
+        content = escape(' '.join(result.xpath(content_xpath)))
51
+
52
+        # append result
53
+        results.append({'url': href, 
54
+                        'title': title, 
55
+                        'content': content})
56
+
57
+    # return results
30 58
     return results

+ 53
- 21
searx/engines/startpage.py 查看文件

@@ -1,47 +1,79 @@
1
+## Startpage (Web)
2
+# 
3
+# @website     https://startpage.com
4
+# @provide-api no (nothing found)
5
+# 
6
+# @using-api   no
7
+# @results     HTML
8
+# @stable      no (HTML can change)
9
+# @parse       url, title, content
10
+#
11
+# @todo        paging
12
+
1 13
 from urllib import urlencode
2 14
 from lxml import html
3 15
 from cgi import escape
16
+import re
17
+
18
+# engine dependent config
19
+categories = ['general']
20
+# there is a mechanism to block "bot" search (probably the parameter qid), require storing of qid's between mulitble search-calls
21
+#paging = False 
22
+language_support = True
4 23
 
5
-base_url = None
6
-search_url = None
24
+# search-url
25
+base_url = 'https://startpage.com/'
26
+search_url = base_url + 'do/search'
7 27
 
8
-# TODO paging
9
-paging = False
10
-# TODO complete list of country mapping
11
-country_map = {'en_US': 'eng',
12
-               'en_UK': 'uk',
13
-               'nl_NL': 'ned'}
28
+# specific xpath variables
29
+# ads xpath //div[@id="results"]/div[@id="sponsored"]//div[@class="result"]
30
+# not ads: div[@class="result"] are the direct childs of div[@id="results"]
31
+results_xpath = '//div[@class="result"]'
32
+link_xpath = './/h3/a'
14 33
 
15 34
 
35
+# do search-request
16 36
 def request(query, params):
37
+    offset = (params['pageno'] - 1) * 10
17 38
     query = urlencode({'q': query})[2:]
39
+
18 40
     params['url'] = search_url
19 41
     params['method'] = 'POST'
20 42
     params['data'] = {'query': query,
21
-                      'startat': (params['pageno'] - 1) * 10}  # offset
22
-    country = country_map.get(params['language'], 'eng')
23
-    params['cookies']['preferences'] = \
24
-        'lang_homepageEEEs/air/{country}/N1NsslEEE1N1Nfont_sizeEEEmediumN1Nrecent_results_filterEEE1N1Nlanguage_uiEEEenglishN1Ndisable_open_in_new_windowEEE0N1Ncolor_schemeEEEnewN1Nnum_of_resultsEEE10N1N'.format(country=country)  # noqa
43
+                      'startat': offset}   
44
+
45
+    # set language if specified
46
+    if params['language'] != 'all':
47
+        params['data']['with_language'] = 'lang_' + params['language'].split('_')[0]
48
+
25 49
     return params
26 50
 
27 51
 
52
+# get response from search-request
28 53
 def response(resp):
29 54
     results = []
55
+
30 56
     dom = html.fromstring(resp.content)
31
-    # ads xpath //div[@id="results"]/div[@id="sponsored"]//div[@class="result"]
32
-    # not ads: div[@class="result"] are the direct childs of div[@id="results"]
33
-    for result in dom.xpath('//div[@class="result"]'):
34
-        link = result.xpath('.//h3/a')[0]
57
+    
58
+    # parse results
59
+    for result in dom.xpath(results_xpath):
60
+        link = result.xpath(link_xpath)[0]
35 61
         url = link.attrib.get('href')
36
-        if url.startswith('http://www.google.')\
37
-           or url.startswith('https://www.google.'):
38
-            continue
39 62
         title = escape(link.text_content())
40 63
 
41
-        content = ''
64
+        # block google-ad url's
65
+        if re.match("^http(s|)://www.google.[a-z]+/aclk.*$", url):
66
+            continue
67
+
42 68
         if result.xpath('./p[@class="desc"]'):
43 69
             content = escape(result.xpath('./p[@class="desc"]')[0].text_content())
70
+        else:
71
+            content = ''
44 72
 
45
-        results.append({'url': url, 'title': title, 'content': content})
73
+        # append result
74
+        results.append({'url': url, 
75
+                        'title': title, 
76
+                        'content': content})
46 77
 
78
+    # return results
47 79
     return results

+ 35
- 2
searx/engines/twitter.py 查看文件

@@ -1,30 +1,63 @@
1
+## Twitter (Social media)
2
+# 
3
+# @website     https://www.bing.com/news
4
+# @provide-api yes (https://dev.twitter.com/docs/using-search)
5
+# 
6
+# @using-api   no
7
+# @results     HTML (using search portal)
8
+# @stable      no (HTML can change)
9
+# @parse       url, title, content
10
+#
11
+# @todo        publishedDate
12
+
1 13
 from urlparse import urljoin
2 14
 from urllib import urlencode
3 15
 from lxml import html
4 16
 from cgi import escape
5 17
 
18
+# engine dependent config
6 19
 categories = ['social media']
20
+language_support = True
7 21
 
22
+# search-url
8 23
 base_url = 'https://twitter.com/'
9 24
 search_url = base_url+'search?'
25
+
26
+# specific xpath variables
27
+results_xpath = '//li[@data-item-type="tweet"]'
28
+link_xpath = './/small[@class="time"]//a'
10 29
 title_xpath = './/span[@class="username js-action-profile-name"]//text()'
11 30
 content_xpath = './/p[@class="js-tweet-text tweet-text"]//text()'
12 31
 
13 32
 
33
+# do search-request
14 34
 def request(query, params):
15 35
     params['url'] = search_url + urlencode({'q': query})
36
+
37
+    # set language if specified
38
+    if params['language'] != 'all':
39
+        params['cookies']['lang'] = params['language'].split('_')[0]
40
+
16 41
     return params
17 42
 
18 43
 
44
+# get response from search-request
19 45
 def response(resp):
20 46
     results = []
47
+
21 48
     dom = html.fromstring(resp.text)
22
-    for tweet in dom.xpath('//li[@data-item-type="tweet"]'):
23
-        link = tweet.xpath('.//small[@class="time"]//a')[0]
49
+
50
+    # parse results
51
+    for tweet in dom.xpath(results_xpath):
52
+        link = tweet.xpath(link_xpath)[0]
24 53
         url = urljoin(base_url, link.attrib.get('href'))
25 54
         title = ''.join(tweet.xpath(title_xpath))
26 55
         content = escape(''.join(tweet.xpath(content_xpath)))
56
+
57
+        # append result
27 58
         results.append({'url': url,
28 59
                         'title': title,
29 60
                         'content': content})
61
+
62
+    # return results
30 63
     return results

+ 47
- 10
searx/engines/wikipedia.py 查看文件

@@ -1,30 +1,67 @@
1
+## Wikipedia (Web)
2
+# 
3
+# @website     http://www.wikipedia.org
4
+# @provide-api yes (http://www.mediawiki.org/wiki/API:Search)
5
+# 
6
+# @using-api   yes
7
+# @results     JSON
8
+# @stable      yes
9
+# @parse       url, title 
10
+#
11
+# @todo        content
12
+
1 13
 from json import loads
2 14
 from urllib import urlencode, quote
3 15
 
4
-url = 'https://{language}.wikipedia.org/'
5
-
6
-search_url = url + 'w/api.php?action=query&list=search&{query}&srprop=timestamp&format=json&sroffset={offset}'  # noqa
7
-
8
-number_of_results = 10
9
-
16
+# engine dependent config
17
+categories = ['general']
10 18
 language_support = True
19
+paging = True
20
+number_of_results = 1
21
+    
22
+# search-url
23
+url = 'https://{language}.wikipedia.org/'
24
+search_url = url + 'w/api.php?action=query&list=search&{query}&srprop=timestamp&format=json&sroffset={offset}&srlimit={limit}'  # noqa
11 25
 
12 26
 
27
+# do search-request
13 28
 def request(query, params):
14
-    offset = (params['pageno'] - 1) * 10
29
+    offset = (params['pageno'] - 1) * number_of_results
30
+
15 31
     if params['language'] == 'all':
16 32
         language = 'en'
17 33
     else:
18 34
         language = params['language'].split('_')[0]
35
+    
36
+    # write search-language back to params, required in response
19 37
     params['language'] = language
38
+
20 39
     params['url'] = search_url.format(query=urlencode({'srsearch': query}),
21 40
                                       offset=offset,
41
+                                      limit=number_of_results,
22 42
                                       language=language)
43
+
23 44
     return params
24 45
 
25 46
 
47
+# get response from search-request
26 48
 def response(resp):
49
+    results = []
50
+
27 51
     search_results = loads(resp.text)
28
-    res = search_results.get('query', {}).get('search', [])
29
-    return [{'url': url.format(language=resp.search_params['language']) + 'wiki/' + quote(result['title'].replace(' ', '_').encode('utf-8')),  # noqa
30
-        'title': result['title']} for result in res[:int(number_of_results)]]
52
+
53
+    # return empty array if there are no results
54
+    if not search_results.get('query', {}).get('search'):
55
+        return []
56
+
57
+    # parse results
58
+    for result in search_results['query']['search']:
59
+        res_url = url.format(language=resp.search_params['language']) + 'wiki/' + quote(result['title'].replace(' ', '_').encode('utf-8'))
60
+        
61
+        # append result
62
+        results.append({'url': res_url,
63
+                        'title': result['title'],
64
+                        'content': ''})
65
+
66
+    # return results
67
+    return results

+ 36
- 7
searx/engines/youtube.py 查看文件

@@ -1,42 +1,69 @@
1
+## Youtube (Videos)
2
+# 
3
+# @website     https://www.youtube.com/
4
+# @provide-api yes (http://gdata-samples-youtube-search-py.appspot.com/)
5
+# 
6
+# @using-api   yes
7
+# @results     JSON
8
+# @stable      yes
9
+# @parse       url, title, content, publishedDate, thumbnail
10
+
1 11
 from json import loads
2 12
 from urllib import urlencode
3 13
 from dateutil import parser
4 14
 
15
+# engine dependent config
5 16
 categories = ['videos']
6
-
7
-search_url = ('https://gdata.youtube.com/feeds/api/videos'
8
-              '?alt=json&{query}&start-index={index}&max-results=25')  # noqa
9
-
10 17
 paging = True
18
+language_support = True
19
+
20
+# search-url
21
+base_url = 'https://gdata.youtube.com/feeds/api/videos'
22
+search_url = base_url + '?alt=json&{query}&start-index={index}&max-results=5'  # noqa
11 23
 
12 24
 
25
+# do search-request
13 26
 def request(query, params):
14
-    index = (params['pageno'] - 1) * 25 + 1
27
+    index = (params['pageno'] - 1) * 5 + 1
28
+
15 29
     params['url'] = search_url.format(query=urlencode({'q': query}),
16 30
                                       index=index)
31
+
32
+    # add language tag if specified
33
+    if params['language'] != 'all':
34
+        params['url'] += '&lr=' + params['language'].split('_')[0]
35
+
17 36
     return params
18 37
 
19 38
 
39
+# get response from search-request
20 40
 def response(resp):
21 41
     results = []
42
+
22 43
     search_results = loads(resp.text)
44
+
45
+    # return empty array if there are no results
23 46
     if not 'feed' in search_results:
24
-        return results
47
+        return []
48
+
25 49
     feed = search_results['feed']
26 50
 
51
+    # parse results
27 52
     for result in feed['entry']:
28 53
         url = [x['href'] for x in result['link'] if x['type'] == 'text/html']
54
+
29 55
         if not url:
30 56
             return
57
+
31 58
         # remove tracking
32 59
         url = url[0].replace('feature=youtube_gdata', '')
33 60
         if url.endswith('&'):
34 61
             url = url[:-1]
62
+
35 63
         title = result['title']['$t']
36 64
         content = ''
37 65
         thumbnail = ''
38 66
 
39
-#"2013-12-31T15:22:51.000Z"
40 67
         pubdate = result['published']['$t']
41 68
         publishedDate = parser.parse(pubdate)
42 69
 
@@ -49,6 +76,7 @@ def response(resp):
49 76
         else:
50 77
             content = result['content']['$t']
51 78
 
79
+        # append result
52 80
         results.append({'url': url,
53 81
                         'title': title,
54 82
                         'content': content,
@@ -56,4 +84,5 @@ def response(resp):
56 84
                         'publishedDate': publishedDate,
57 85
                         'thumbnail': thumbnail})
58 86
 
87
+    # return results
59 88
     return results

+ 1
- 15
searx/settings.yml 查看文件

@@ -11,9 +11,8 @@ server:
11 11
 engines:
12 12
   - name : wikipedia
13 13
     engine : wikipedia
14
-    number_of_results : 1
15
-    paging : False
16 14
     shortcut : wp
15
+#    number_of_results : 1 # default is 1
17 16
 
18 17
   - name : bing
19 18
     engine : bing
@@ -37,7 +36,6 @@ engines:
37 36
 
38 37
   - name : deviantart
39 38
     engine : deviantart
40
-    categories : images
41 39
     shortcut : da
42 40
     timeout: 3.0
43 41
 
@@ -47,7 +45,6 @@ engines:
47 45
 
48 46
   - name : duckduckgo
49 47
     engine : duckduckgo
50
-    locale : en-us
51 48
     shortcut : ddg
52 49
 
53 50
 # down - website is under criminal investigation by the UK
@@ -64,12 +61,10 @@ engines:
64 61
 
65 62
   - name : general-file
66 63
     engine : generalfile
67
-    categories : files
68 64
     shortcut : gf
69 65
 
70 66
   - name : github
71 67
     engine : github
72
-    categories : it
73 68
     shortcut : gh
74 69
 
75 70
   - name : google
@@ -86,23 +81,18 @@ engines:
86 81
 
87 82
   - name : piratebay
88 83
     engine : piratebay
89
-    categories : videos, music, files
90 84
     shortcut : tpb
91 85
 
92 86
   - name : soundcloud
93 87
     engine : soundcloud
94
-    categories : music
95 88
     shortcut : sc
96 89
 
97 90
   - name : stackoverflow
98 91
     engine : stackoverflow
99
-    categories : it
100 92
     shortcut : st
101 93
 
102 94
   - name : startpage
103 95
     engine : startpage
104
-    base_url : 'https://startpage.com/'
105
-    search_url : 'https://startpage.com/do/search'
106 96
     shortcut : sp
107 97
 
108 98
 # +30% page load time
@@ -113,7 +103,6 @@ engines:
113 103
 
114 104
   - name : twitter
115 105
     engine : twitter
116
-    categories : social media
117 106
     shortcut : tw
118 107
 
119 108
 # maybe in a fun category
@@ -142,13 +131,10 @@ engines:
142 131
 
143 132
   - name : youtube
144 133
     engine : youtube
145
-    categories : videos
146 134
     shortcut : yt
147 135
 
148 136
   - name : dailymotion
149 137
     engine : dailymotion
150
-    locale : en_US
151
-    categories : videos
152 138
     shortcut : dm
153 139
 
154 140
   - name : vimeo