Browse Source

Merge pull request #89 from pointhi/engines

update search engines and add comments to it
Adam Tauber 10 years ago
parent
commit
f825752145

+ 1
- 0
searx/engines/bing.py View File

23
 base_url = 'https://www.bing.com/'
23
 base_url = 'https://www.bing.com/'
24
 search_string = 'search?{query}&first={offset}'
24
 search_string = 'search?{query}&first={offset}'
25
 
25
 
26
+
26
 # do search-request
27
 # do search-request
27
 def request(query, params):
28
 def request(query, params):
28
     offset = (params['pageno'] - 1) * 10 + 1
29
     offset = (params['pageno'] - 1) * 10 + 1

+ 1
- 0
searx/engines/bing_images.py View File

24
 base_url = 'https://www.bing.com/'
24
 base_url = 'https://www.bing.com/'
25
 search_string = 'images/search?{query}&count=10&first={offset}'
25
 search_string = 'images/search?{query}&count=10&first={offset}'
26
 
26
 
27
+
27
 # do search-request
28
 # do search-request
28
 def request(query, params):
29
 def request(query, params):
29
     offset = (params['pageno'] - 1) * 10 + 1
30
     offset = (params['pageno'] - 1) * 10 + 1

+ 1
- 0
searx/engines/bing_news.py View File

24
 base_url = 'https://www.bing.com/'
24
 base_url = 'https://www.bing.com/'
25
 search_string = 'news/search?{query}&first={offset}'
25
 search_string = 'news/search?{query}&first={offset}'
26
 
26
 
27
+
27
 # do search-request
28
 # do search-request
28
 def request(query, params):
29
 def request(query, params):
29
     offset = (params['pageno'] - 1) * 10 + 1
30
     offset = (params['pageno'] - 1) * 10 + 1

+ 29
- 3
searx/engines/deviantart.py View File

1
+## Deviantart (Images)
2
+# 
3
+# @website     https://www.deviantart.com/
4
+# @provide-api yes (https://www.deviantart.com/developers/) (RSS)
5
+# 
6
+# @using-api   no (TODO, rewrite to api)
7
+# @results     HTML
8
+# @stable      no (HTML can change)
9
+# @parse       url, title, thumbnail, img_src
10
+#
11
+# @todo        rewrite to api
12
+
1
 from urllib import urlencode
13
 from urllib import urlencode
2
 from urlparse import urljoin
14
 from urlparse import urljoin
3
 from lxml import html
15
 from lxml import html
4
 
16
 
17
+# engine dependent config
5
 categories = ['images']
18
 categories = ['images']
19
+paging = True
6
 
20
 
21
+# search-url
7
 base_url = 'https://www.deviantart.com/'
22
 base_url = 'https://www.deviantart.com/'
8
 search_url = base_url+'search?offset={offset}&{query}'
23
 search_url = base_url+'search?offset={offset}&{query}'
9
 
24
 
10
-paging = True
11
-
12
 
25
 
26
+# do search-request
13
 def request(query, params):
27
 def request(query, params):
14
     offset = (params['pageno'] - 1) * 24
28
     offset = (params['pageno'] - 1) * 24
29
+
15
     params['url'] = search_url.format(offset=offset,
30
     params['url'] = search_url.format(offset=offset,
16
                                       query=urlencode({'q': query}))
31
                                       query=urlencode({'q': query}))
32
+
17
     return params
33
     return params
18
 
34
 
19
 
35
 
36
+# get response from search-request
20
 def response(resp):
37
 def response(resp):
21
     results = []
38
     results = []
39
+
40
+    # return empty array if a redirection code is returned
22
     if resp.status_code == 302:
41
     if resp.status_code == 302:
23
-        return results
42
+        return []
43
+
24
     dom = html.fromstring(resp.text)
44
     dom = html.fromstring(resp.text)
45
+
46
+    # parse results
25
     for result in dom.xpath('//div[contains(@class, "tt-a tt-fh")]'):
47
     for result in dom.xpath('//div[contains(@class, "tt-a tt-fh")]'):
26
         link = result.xpath('.//a[contains(@class, "thumb")]')[0]
48
         link = result.xpath('.//a[contains(@class, "thumb")]')[0]
27
         url = urljoin(base_url, link.attrib.get('href'))
49
         url = urljoin(base_url, link.attrib.get('href'))
28
         title_links = result.xpath('.//span[@class="details"]//a[contains(@class, "t")]')  # noqa
50
         title_links = result.xpath('.//span[@class="details"]//a[contains(@class, "t")]')  # noqa
29
         title = ''.join(title_links[0].xpath('.//text()'))
51
         title = ''.join(title_links[0].xpath('.//text()'))
30
         img_src = link.xpath('.//img')[0].attrib['src']
52
         img_src = link.xpath('.//img')[0].attrib['src']
53
+
54
+        # append result
31
         results.append({'url': url,
55
         results.append({'url': url,
32
                         'title': title,
56
                         'title': title,
33
                         'img_src': img_src,
57
                         'img_src': img_src,
34
                         'template': 'images.html'})
58
                         'template': 'images.html'})
59
+
60
+    # return results
35
     return results
61
     return results

+ 38
- 34
searx/engines/duckduckgo.py View File

1
+## DuckDuckGo (Web)
2
+# 
3
+# @website     https://duckduckgo.com/
4
+# @provide-api yes (https://duckduckgo.com/api), but not all results from search-site
5
+# 
6
+# @using-api   no
7
+# @results     HTML (using search portal)
8
+# @stable      no (HTML can change)
9
+# @parse       url, title, content
10
+#
11
+# @todo        rewrite to api
12
+# @todo        language support (the current used site does not support language-change)
13
+
1
 from urllib import urlencode
14
 from urllib import urlencode
2
 from lxml.html import fromstring
15
 from lxml.html import fromstring
3
 from searx.utils import html_to_text
16
 from searx.utils import html_to_text
4
 
17
 
5
-url = 'https://duckduckgo.com/html?{query}&s={offset}'
18
+# engine dependent config
19
+categories = ['general']
20
+paging = True
6
 locale = 'us-en'
21
 locale = 'us-en'
7
 
22
 
23
+# search-url
24
+url = 'https://duckduckgo.com/html?{query}&s={offset}'
25
+
26
+# specific xpath variables
27
+result_xpath = '//div[@class="results_links results_links_deep web-result"]'  # noqa
28
+url_xpath = './/a[@class="large"]/@href'
29
+title_xpath = './/a[@class="large"]//text()'
30
+content_xpath = './/div[@class="snippet"]//text()'
8
 
31
 
32
+
33
+# do search-request
9
 def request(query, params):
34
 def request(query, params):
10
     offset = (params['pageno'] - 1) * 30
35
     offset = (params['pageno'] - 1) * 30
11
-    q = urlencode({'q': query,
12
-                   'l': locale})
13
-    params['url'] = url.format(query=q, offset=offset)
36
+
37
+    params['url'] = url.format(
38
+        query=urlencode({'q': query, 'l': locale}),
39
+        offset=offset)
40
+
14
     return params
41
     return params
15
 
42
 
16
 
43
 
44
+# get response from search-request
17
 def response(resp):
45
 def response(resp):
18
-    result_xpath = '//div[@class="results_links results_links_deep web-result"]'  # noqa
19
-    url_xpath = './/a[@class="large"]/@href'
20
-    title_xpath = './/a[@class="large"]//text()'
21
-    content_xpath = './/div[@class="snippet"]//text()'
22
     results = []
46
     results = []
23
 
47
 
24
     doc = fromstring(resp.text)
48
     doc = fromstring(resp.text)
25
 
49
 
50
+    # parse results
26
     for r in doc.xpath(result_xpath):
51
     for r in doc.xpath(result_xpath):
27
         try:
52
         try:
28
             res_url = r.xpath(url_xpath)[-1]
53
             res_url = r.xpath(url_xpath)[-1]
29
         except:
54
         except:
30
             continue
55
             continue
56
+
31
         if not res_url:
57
         if not res_url:
32
             continue
58
             continue
59
+
33
         title = html_to_text(''.join(r.xpath(title_xpath)))
60
         title = html_to_text(''.join(r.xpath(title_xpath)))
34
         content = html_to_text(''.join(r.xpath(content_xpath)))
61
         content = html_to_text(''.join(r.xpath(content_xpath)))
62
+
63
+        # append result
35
         results.append({'title': title,
64
         results.append({'title': title,
36
                         'content': content,
65
                         'content': content,
37
                         'url': res_url})
66
                         'url': res_url})
38
 
67
 
68
+    # return results
39
     return results
69
     return results
40
-
41
-
42
-#from json import loads
43
-#search_url = url + 'd.js?{query}&p=1&s={offset}'
44
-#
45
-#paging = True
46
-#
47
-#
48
-#def request(query, params):
49
-#    offset = (params['pageno'] - 1) * 30
50
-#    q = urlencode({'q': query,
51
-#                   'l': locale})
52
-#    params['url'] = search_url.format(query=q, offset=offset)
53
-#    return params
54
-#
55
-#
56
-#def response(resp):
57
-#    results = []
58
-#    search_res = loads(resp.text[resp.text.find('[{'):-2])[:-1]
59
-#    for r in search_res:
60
-#        if not r.get('t'):
61
-#            continue
62
-#        results.append({'title': r['t'],
63
-#                       'content': html_to_text(r['a']),
64
-#                       'url': r['u']})
65
-#    return results

+ 8
- 0
searx/engines/dummy.py View File

1
+## Dummy
2
+# 
3
+# @results     empty array
4
+# @stable      yes
5
+
6
+
7
+# do search-request
1
 def request(query, params):
8
 def request(query, params):
2
     return params
9
     return params
3
 
10
 
4
 
11
 
12
+# get response from search-request
5
 def response(resp):
13
 def response(resp):
6
     return []
14
     return []

+ 28
- 3
searx/engines/generalfile.py View File

1
+## General Files (Files)
2
+# 
3
+# @website     http://www.general-files.org
4
+# @provide-api no (nothing found)
5
+# 
6
+# @using-api   no (because nothing found)
7
+# @results     HTML (using search portal)
8
+# @stable      no (HTML can change)
9
+# @parse       url, title, content
10
+#
11
+# @todo        detect torrents?
12
+
1
 from lxml import html
13
 from lxml import html
2
 
14
 
15
+# engine dependent config
16
+categories = ['files']
17
+paging = True
3
 
18
 
19
+# search-url
4
 base_url = 'http://www.general-file.com'
20
 base_url = 'http://www.general-file.com'
5
 search_url = base_url + '/files-{letter}/{query}/{pageno}'
21
 search_url = base_url + '/files-{letter}/{query}/{pageno}'
6
 
22
 
23
+# specific xpath variables
7
 result_xpath = '//table[@class="block-file"]'
24
 result_xpath = '//table[@class="block-file"]'
8
 title_xpath = './/h2/a//text()'
25
 title_xpath = './/h2/a//text()'
9
 url_xpath = './/h2/a/@href'
26
 url_xpath = './/h2/a/@href'
10
 content_xpath = './/p//text()'
27
 content_xpath = './/p//text()'
11
 
28
 
12
-paging = True
13
-
14
 
29
 
30
+# do search-request
15
 def request(query, params):
31
 def request(query, params):
32
+
16
     params['url'] = search_url.format(query=query,
33
     params['url'] = search_url.format(query=query,
17
                                       letter=query[0],
34
                                       letter=query[0],
18
                                       pageno=params['pageno'])
35
                                       pageno=params['pageno'])
36
+
19
     return params
37
     return params
20
 
38
 
21
 
39
 
40
+# get response from search-request
22
 def response(resp):
41
 def response(resp):
23
-
24
     results = []
42
     results = []
43
+
25
     dom = html.fromstring(resp.text)
44
     dom = html.fromstring(resp.text)
45
+
46
+    # parse results
26
     for result in dom.xpath(result_xpath):
47
     for result in dom.xpath(result_xpath):
27
         url = result.xpath(url_xpath)[0]
48
         url = result.xpath(url_xpath)[0]
49
+
28
         # skip fast download links
50
         # skip fast download links
29
         if not url.startswith('/'):
51
         if not url.startswith('/'):
30
             continue
52
             continue
53
+
54
+        # append result
31
         results.append({'url': base_url + url,
55
         results.append({'url': base_url + url,
32
                         'title': ''.join(result.xpath(title_xpath)),
56
                         'title': ''.join(result.xpath(title_xpath)),
33
                         'content': ''.join(result.xpath(content_xpath))})
57
                         'content': ''.join(result.xpath(content_xpath))})
34
 
58
 
59
+    # return results
35
     return results
60
     return results

+ 30
- 2
searx/engines/github.py View File

1
+## Github (It)
2
+# 
3
+# @website     https://github.com/
4
+# @provide-api yes (https://developer.github.com/v3/)
5
+# 
6
+# @using-api   yes
7
+# @results     JSON
8
+# @stable      yes (using api)
9
+# @parse       url, title, content
10
+
1
 from urllib import urlencode
11
 from urllib import urlencode
2
 from json import loads
12
 from json import loads
3
 from cgi import escape
13
 from cgi import escape
4
 
14
 
15
+# engine dependent config
5
 categories = ['it']
16
 categories = ['it']
6
 
17
 
18
+# search-url
7
 search_url = 'https://api.github.com/search/repositories?sort=stars&order=desc&{query}'  # noqa
19
 search_url = 'https://api.github.com/search/repositories?sort=stars&order=desc&{query}'  # noqa
8
 
20
 
9
 accept_header = 'application/vnd.github.preview.text-match+json'
21
 accept_header = 'application/vnd.github.preview.text-match+json'
10
 
22
 
11
 
23
 
24
+# do search-request
12
 def request(query, params):
25
 def request(query, params):
13
     params['url'] = search_url.format(query=urlencode({'q': query}))
26
     params['url'] = search_url.format(query=urlencode({'q': query}))
27
+
14
     params['headers']['Accept'] = accept_header
28
     params['headers']['Accept'] = accept_header
29
+
15
     return params
30
     return params
16
 
31
 
17
 
32
 
33
+# get response from search-request
18
 def response(resp):
34
 def response(resp):
19
     results = []
35
     results = []
36
+
20
     search_res = loads(resp.text)
37
     search_res = loads(resp.text)
38
+
39
+    # check if items are recieved
21
     if not 'items' in search_res:
40
     if not 'items' in search_res:
22
-        return results
41
+        return []
42
+
43
+    # parse results
23
     for res in search_res['items']:
44
     for res in search_res['items']:
24
         title = res['name']
45
         title = res['name']
25
         url = res['html_url']
46
         url = res['html_url']
47
+
26
         if res['description']:
48
         if res['description']:
27
             content = escape(res['description'][:500])
49
             content = escape(res['description'][:500])
28
         else:
50
         else:
29
             content = ''
51
             content = ''
30
-        results.append({'url': url, 'title': title, 'content': content})
52
+
53
+        # append result
54
+        results.append({'url': url,
55
+                        'title': title,
56
+                        'content': content})
57
+
58
+    # return results
31
     return results
59
     return results

+ 34
- 7
searx/engines/piratebay.py View File

1
+## Piratebay (Videos, Music, Files)
2
+# 
3
+# @website     https://thepiratebay.se
4
+# @provide-api no (nothing found)
5
+# 
6
+# @using-api   no
7
+# @results     HTML (using search portal)
8
+# @stable      yes (HTML can change)
9
+# @parse       url, title, content, seed, leech, magnetlink
10
+
1
 from urlparse import urljoin
11
 from urlparse import urljoin
2
 from cgi import escape
12
 from cgi import escape
3
 from urllib import quote
13
 from urllib import quote
4
 from lxml import html
14
 from lxml import html
5
 from operator import itemgetter
15
 from operator import itemgetter
6
 
16
 
7
-categories = ['videos', 'music']
17
+# engine dependent config
18
+categories = ['videos', 'music', 'files']
19
+paging = True
8
 
20
 
21
+# search-url
9
 url = 'https://thepiratebay.se/'
22
 url = 'https://thepiratebay.se/'
10
 search_url = url + 'search/{search_term}/{pageno}/99/{search_type}'
23
 search_url = url + 'search/{search_term}/{pageno}/99/{search_type}'
11
-search_types = {'videos': '200',
24
+
25
+# piratebay specific type-definitions
26
+search_types = {'files': '0',                
12
                 'music': '100',
27
                 'music': '100',
13
-                'files': '0'}
28
+                'videos': '200'}
14
 
29
 
30
+# specific xpath variables
15
 magnet_xpath = './/a[@title="Download this torrent using magnet"]'
31
 magnet_xpath = './/a[@title="Download this torrent using magnet"]'
16
 content_xpath = './/font[@class="detDesc"]//text()'
32
 content_xpath = './/font[@class="detDesc"]//text()'
17
 
33
 
18
-paging = True
19
-
20
 
34
 
35
+# do search-request
21
 def request(query, params):
36
 def request(query, params):
22
-    search_type = search_types.get(params['category'], '200')
37
+    search_type = search_types.get(params['category'], '0')
38
+
23
     params['url'] = search_url.format(search_term=quote(query),
39
     params['url'] = search_url.format(search_term=quote(query),
24
                                       search_type=search_type,
40
                                       search_type=search_type,
25
                                       pageno=params['pageno'] - 1)
41
                                       pageno=params['pageno'] - 1)
42
+
26
     return params
43
     return params
27
 
44
 
28
 
45
 
46
+# get response from search-request
29
 def response(resp):
47
 def response(resp):
30
     results = []
48
     results = []
49
+
31
     dom = html.fromstring(resp.text)
50
     dom = html.fromstring(resp.text)
51
+
32
     search_res = dom.xpath('//table[@id="searchResult"]//tr')
52
     search_res = dom.xpath('//table[@id="searchResult"]//tr')
33
 
53
 
54
+    # return empty array if nothing is found
34
     if not search_res:
55
     if not search_res:
35
-        return results
56
+        return []
36
 
57
 
58
+    # parse results
37
     for result in search_res[1:]:
59
     for result in search_res[1:]:
38
         link = result.xpath('.//div[@class="detName"]//a')[0]
60
         link = result.xpath('.//div[@class="detName"]//a')[0]
39
         href = urljoin(url, link.attrib.get('href'))
61
         href = urljoin(url, link.attrib.get('href'))
41
         content = escape(' '.join(result.xpath(content_xpath)))
63
         content = escape(' '.join(result.xpath(content_xpath)))
42
         seed, leech = result.xpath('.//td[@align="right"]/text()')[:2]
64
         seed, leech = result.xpath('.//td[@align="right"]/text()')[:2]
43
 
65
 
66
+        # convert seed to int if possible
44
         if seed.isdigit():
67
         if seed.isdigit():
45
             seed = int(seed)
68
             seed = int(seed)
46
         else:
69
         else:
47
             seed = 0
70
             seed = 0
48
 
71
 
72
+        # convert leech to int if possible
49
         if leech.isdigit():
73
         if leech.isdigit():
50
             leech = int(leech)
74
             leech = int(leech)
51
         else:
75
         else:
52
             leech = 0
76
             leech = 0
53
 
77
 
54
         magnetlink = result.xpath(magnet_xpath)[0]
78
         magnetlink = result.xpath(magnet_xpath)[0]
79
+
80
+        # append result
55
         results.append({'url': href,
81
         results.append({'url': href,
56
                         'title': title,
82
                         'title': title,
57
                         'content': content,
83
                         'content': content,
60
                         'magnetlink': magnetlink.attrib['href'],
86
                         'magnetlink': magnetlink.attrib['href'],
61
                         'template': 'torrent.html'})
87
                         'template': 'torrent.html'})
62
 
88
 
89
+    # return results sorted by seeder
63
     return sorted(results, key=itemgetter('seed'), reverse=True)
90
     return sorted(results, key=itemgetter('seed'), reverse=True)

+ 29
- 4
searx/engines/soundcloud.py View File

1
+## Soundcloud (Music)
2
+# 
3
+# @website     https://soundcloud.com
4
+# @provide-api yes (https://developers.soundcloud.com/)
5
+# 
6
+# @using-api   yes
7
+# @results     JSON
8
+# @stable      yes
9
+# @parse       url, title, content
10
+
1
 from json import loads
11
 from json import loads
2
 from urllib import urlencode
12
 from urllib import urlencode
3
 
13
 
14
+# engine dependent config
4
 categories = ['music']
15
 categories = ['music']
16
+paging = True
5
 
17
 
18
+# api-key
6
 guest_client_id = 'b45b1aa10f1ac2941910a7f0d10f8e28'
19
 guest_client_id = 'b45b1aa10f1ac2941910a7f0d10f8e28'
7
-url = 'https://api.soundcloud.com/'
8
-search_url = url + 'search?{query}&facet=model&limit=20&offset={offset}&linked_partitioning=1&client_id='+guest_client_id  # noqa
9
 
20
 
10
-paging = True
21
+# search-url
22
+url = 'https://api.soundcloud.com/'
23
+search_url = url + 'search?{query}&facet=model&limit=20&offset={offset}&linked_partitioning=1&client_id={client_id}'
11
 
24
 
12
 
25
 
26
+# do search-request
13
 def request(query, params):
27
 def request(query, params):
14
     offset = (params['pageno'] - 1) * 20
28
     offset = (params['pageno'] - 1) * 20
29
+
15
     params['url'] = search_url.format(query=urlencode({'q': query}),
30
     params['url'] = search_url.format(query=urlencode({'q': query}),
16
-                                      offset=offset)
31
+                                      offset=offset,
32
+                                      client_id=guest_client_id)
33
+
17
     return params
34
     return params
18
 
35
 
19
 
36
 
37
+# get response from search-request
20
 def response(resp):
38
 def response(resp):
21
     results = []
39
     results = []
40
+
22
     search_res = loads(resp.text)
41
     search_res = loads(resp.text)
42
+
43
+    # parse results
23
     for result in search_res.get('collection', []):
44
     for result in search_res.get('collection', []):
24
         if result['kind'] in ('track', 'playlist'):
45
         if result['kind'] in ('track', 'playlist'):
25
             title = result['title']
46
             title = result['title']
26
             content = result['description']
47
             content = result['description']
48
+
49
+            # append result
27
             results.append({'url': result['permalink_url'],
50
             results.append({'url': result['permalink_url'],
28
                             'title': title,
51
                             'title': title,
29
                             'content': content})
52
                             'content': content})
53
+
54
+    # return results
30
     return results
55
     return results

+ 35
- 7
searx/engines/stackoverflow.py View File

1
+## Stackoverflow (It)
2
+# 
3
+# @website     https://stackoverflow.com/
4
+# @provide-api not clear (https://api.stackexchange.com/docs/advanced-search)
5
+# 
6
+# @using-api   no
7
+# @results     HTML
8
+# @stable      no (HTML can change)
9
+# @parse       url, title, content
10
+
1
 from urlparse import urljoin
11
 from urlparse import urljoin
2
 from cgi import escape
12
 from cgi import escape
3
 from urllib import urlencode
13
 from urllib import urlencode
4
 from lxml import html
14
 from lxml import html
5
 
15
 
16
+# engine dependent config
6
 categories = ['it']
17
 categories = ['it']
18
+paging = True
7
 
19
 
20
+# search-url
8
 url = 'http://stackoverflow.com/'
21
 url = 'http://stackoverflow.com/'
9
 search_url = url+'search?{query}&page={pageno}'
22
 search_url = url+'search?{query}&page={pageno}'
10
-result_xpath = './/div[@class="excerpt"]//text()'
11
 
23
 
12
-paging = True
24
+# specific xpath variables
25
+results_xpath = '//div[contains(@class,"question-summary")]'
26
+link_xpath = './/div[@class="result-link"]//a|.//div[@class="summary"]//h3//a'
27
+title_xpath = './/text()'
28
+content_xpath = './/div[@class="excerpt"]//text()'
13
 
29
 
14
 
30
 
31
+# do search-request
15
 def request(query, params):
32
 def request(query, params):
16
     params['url'] = search_url.format(query=urlencode({'q': query}),
33
     params['url'] = search_url.format(query=urlencode({'q': query}),
17
                                       pageno=params['pageno'])
34
                                       pageno=params['pageno'])
35
+
18
     return params
36
     return params
19
 
37
 
20
 
38
 
39
+# get response from search-request
21
 def response(resp):
40
 def response(resp):
22
     results = []
41
     results = []
42
+
23
     dom = html.fromstring(resp.text)
43
     dom = html.fromstring(resp.text)
24
-    for result in dom.xpath('//div[@class="question-summary search-result"]'):
25
-        link = result.xpath('.//div[@class="result-link"]//a')[0]
44
+
45
+    # parse results
46
+    for result in dom.xpath(results_xpath):
47
+        link = result.xpath(link_xpath)[0]
26
         href = urljoin(url, link.attrib.get('href'))
48
         href = urljoin(url, link.attrib.get('href'))
27
-        title = escape(' '.join(link.xpath('.//text()')))
28
-        content = escape(' '.join(result.xpath(result_xpath)))
29
-        results.append({'url': href, 'title': title, 'content': content})
49
+        title = escape(' '.join(link.xpath(title_xpath)))
50
+        content = escape(' '.join(result.xpath(content_xpath)))
51
+
52
+        # append result
53
+        results.append({'url': href, 
54
+                        'title': title, 
55
+                        'content': content})
56
+
57
+    # return results
30
     return results
58
     return results

+ 53
- 21
searx/engines/startpage.py View File

1
+## Startpage (Web)
2
+# 
3
+# @website     https://startpage.com
4
+# @provide-api no (nothing found)
5
+# 
6
+# @using-api   no
7
+# @results     HTML
8
+# @stable      no (HTML can change)
9
+# @parse       url, title, content
10
+#
11
+# @todo        paging
12
+
1
 from urllib import urlencode
13
 from urllib import urlencode
2
 from lxml import html
14
 from lxml import html
3
 from cgi import escape
15
 from cgi import escape
16
+import re
17
+
18
+# engine dependent config
19
+categories = ['general']
20
+# there is a mechanism to block "bot" search (probably the parameter qid), require storing of qid's between mulitble search-calls
21
+#paging = False 
22
+language_support = True
4
 
23
 
5
-base_url = None
6
-search_url = None
24
+# search-url
25
+base_url = 'https://startpage.com/'
26
+search_url = base_url + 'do/search'
7
 
27
 
8
-# TODO paging
9
-paging = False
10
-# TODO complete list of country mapping
11
-country_map = {'en_US': 'eng',
12
-               'en_UK': 'uk',
13
-               'nl_NL': 'ned'}
28
+# specific xpath variables
29
+# ads xpath //div[@id="results"]/div[@id="sponsored"]//div[@class="result"]
30
+# not ads: div[@class="result"] are the direct childs of div[@id="results"]
31
+results_xpath = '//div[@class="result"]'
32
+link_xpath = './/h3/a'
14
 
33
 
15
 
34
 
35
+# do search-request
16
 def request(query, params):
36
 def request(query, params):
37
+    offset = (params['pageno'] - 1) * 10
17
     query = urlencode({'q': query})[2:]
38
     query = urlencode({'q': query})[2:]
39
+
18
     params['url'] = search_url
40
     params['url'] = search_url
19
     params['method'] = 'POST'
41
     params['method'] = 'POST'
20
     params['data'] = {'query': query,
42
     params['data'] = {'query': query,
21
-                      'startat': (params['pageno'] - 1) * 10}  # offset
22
-    country = country_map.get(params['language'], 'eng')
23
-    params['cookies']['preferences'] = \
24
-        'lang_homepageEEEs/air/{country}/N1NsslEEE1N1Nfont_sizeEEEmediumN1Nrecent_results_filterEEE1N1Nlanguage_uiEEEenglishN1Ndisable_open_in_new_windowEEE0N1Ncolor_schemeEEEnewN1Nnum_of_resultsEEE10N1N'.format(country=country)  # noqa
43
+                      'startat': offset}   
44
+
45
+    # set language if specified
46
+    if params['language'] != 'all':
47
+        params['data']['with_language'] = 'lang_' + params['language'].split('_')[0]
48
+
25
     return params
49
     return params
26
 
50
 
27
 
51
 
52
+# get response from search-request
28
 def response(resp):
53
 def response(resp):
29
     results = []
54
     results = []
55
+
30
     dom = html.fromstring(resp.content)
56
     dom = html.fromstring(resp.content)
31
-    # ads xpath //div[@id="results"]/div[@id="sponsored"]//div[@class="result"]
32
-    # not ads: div[@class="result"] are the direct childs of div[@id="results"]
33
-    for result in dom.xpath('//div[@class="result"]'):
34
-        link = result.xpath('.//h3/a')[0]
57
+    
58
+    # parse results
59
+    for result in dom.xpath(results_xpath):
60
+        link = result.xpath(link_xpath)[0]
35
         url = link.attrib.get('href')
61
         url = link.attrib.get('href')
36
-        if url.startswith('http://www.google.')\
37
-           or url.startswith('https://www.google.'):
38
-            continue
39
         title = escape(link.text_content())
62
         title = escape(link.text_content())
40
 
63
 
41
-        content = ''
64
+        # block google-ad url's
65
+        if re.match("^http(s|)://www.google.[a-z]+/aclk.*$", url):
66
+            continue
67
+
42
         if result.xpath('./p[@class="desc"]'):
68
         if result.xpath('./p[@class="desc"]'):
43
             content = escape(result.xpath('./p[@class="desc"]')[0].text_content())
69
             content = escape(result.xpath('./p[@class="desc"]')[0].text_content())
70
+        else:
71
+            content = ''
44
 
72
 
45
-        results.append({'url': url, 'title': title, 'content': content})
73
+        # append result
74
+        results.append({'url': url, 
75
+                        'title': title, 
76
+                        'content': content})
46
 
77
 
78
+    # return results
47
     return results
79
     return results

+ 35
- 2
searx/engines/twitter.py View File

1
+## Twitter (Social media)
2
+# 
3
+# @website     https://www.bing.com/news
4
+# @provide-api yes (https://dev.twitter.com/docs/using-search)
5
+# 
6
+# @using-api   no
7
+# @results     HTML (using search portal)
8
+# @stable      no (HTML can change)
9
+# @parse       url, title, content
10
+#
11
+# @todo        publishedDate
12
+
1
 from urlparse import urljoin
13
 from urlparse import urljoin
2
 from urllib import urlencode
14
 from urllib import urlencode
3
 from lxml import html
15
 from lxml import html
4
 from cgi import escape
16
 from cgi import escape
5
 
17
 
18
+# engine dependent config
6
 categories = ['social media']
19
 categories = ['social media']
20
+language_support = True
7
 
21
 
22
+# search-url
8
 base_url = 'https://twitter.com/'
23
 base_url = 'https://twitter.com/'
9
 search_url = base_url+'search?'
24
 search_url = base_url+'search?'
25
+
26
+# specific xpath variables
27
+results_xpath = '//li[@data-item-type="tweet"]'
28
+link_xpath = './/small[@class="time"]//a'
10
 title_xpath = './/span[@class="username js-action-profile-name"]//text()'
29
 title_xpath = './/span[@class="username js-action-profile-name"]//text()'
11
 content_xpath = './/p[@class="js-tweet-text tweet-text"]//text()'
30
 content_xpath = './/p[@class="js-tweet-text tweet-text"]//text()'
12
 
31
 
13
 
32
 
33
+# do search-request
14
 def request(query, params):
34
 def request(query, params):
15
     params['url'] = search_url + urlencode({'q': query})
35
     params['url'] = search_url + urlencode({'q': query})
36
+
37
+    # set language if specified
38
+    if params['language'] != 'all':
39
+        params['cookies']['lang'] = params['language'].split('_')[0]
40
+
16
     return params
41
     return params
17
 
42
 
18
 
43
 
44
+# get response from search-request
19
 def response(resp):
45
 def response(resp):
20
     results = []
46
     results = []
47
+
21
     dom = html.fromstring(resp.text)
48
     dom = html.fromstring(resp.text)
22
-    for tweet in dom.xpath('//li[@data-item-type="tweet"]'):
23
-        link = tweet.xpath('.//small[@class="time"]//a')[0]
49
+
50
+    # parse results
51
+    for tweet in dom.xpath(results_xpath):
52
+        link = tweet.xpath(link_xpath)[0]
24
         url = urljoin(base_url, link.attrib.get('href'))
53
         url = urljoin(base_url, link.attrib.get('href'))
25
         title = ''.join(tweet.xpath(title_xpath))
54
         title = ''.join(tweet.xpath(title_xpath))
26
         content = escape(''.join(tweet.xpath(content_xpath)))
55
         content = escape(''.join(tweet.xpath(content_xpath)))
56
+
57
+        # append result
27
         results.append({'url': url,
58
         results.append({'url': url,
28
                         'title': title,
59
                         'title': title,
29
                         'content': content})
60
                         'content': content})
61
+
62
+    # return results
30
     return results
63
     return results

+ 47
- 10
searx/engines/wikipedia.py View File

1
+## Wikipedia (Web)
2
+# 
3
+# @website     http://www.wikipedia.org
4
+# @provide-api yes (http://www.mediawiki.org/wiki/API:Search)
5
+# 
6
+# @using-api   yes
7
+# @results     JSON
8
+# @stable      yes
9
+# @parse       url, title 
10
+#
11
+# @todo        content
12
+
1
 from json import loads
13
 from json import loads
2
 from urllib import urlencode, quote
14
 from urllib import urlencode, quote
3
 
15
 
4
-url = 'https://{language}.wikipedia.org/'
5
-
6
-search_url = url + 'w/api.php?action=query&list=search&{query}&srprop=timestamp&format=json&sroffset={offset}'  # noqa
7
-
8
-number_of_results = 10
9
-
16
+# engine dependent config
17
+categories = ['general']
10
 language_support = True
18
 language_support = True
19
+paging = True
20
+number_of_results = 1
21
+    
22
+# search-url
23
+url = 'https://{language}.wikipedia.org/'
24
+search_url = url + 'w/api.php?action=query&list=search&{query}&srprop=timestamp&format=json&sroffset={offset}&srlimit={limit}'  # noqa
11
 
25
 
12
 
26
 
27
+# do search-request
13
 def request(query, params):
28
 def request(query, params):
14
-    offset = (params['pageno'] - 1) * 10
29
+    offset = (params['pageno'] - 1) * number_of_results
30
+
15
     if params['language'] == 'all':
31
     if params['language'] == 'all':
16
         language = 'en'
32
         language = 'en'
17
     else:
33
     else:
18
         language = params['language'].split('_')[0]
34
         language = params['language'].split('_')[0]
35
+    
36
+    # write search-language back to params, required in response
19
     params['language'] = language
37
     params['language'] = language
38
+
20
     params['url'] = search_url.format(query=urlencode({'srsearch': query}),
39
     params['url'] = search_url.format(query=urlencode({'srsearch': query}),
21
                                       offset=offset,
40
                                       offset=offset,
41
+                                      limit=number_of_results,
22
                                       language=language)
42
                                       language=language)
43
+
23
     return params
44
     return params
24
 
45
 
25
 
46
 
47
+# get response from search-request
26
 def response(resp):
48
 def response(resp):
49
+    results = []
50
+
27
     search_results = loads(resp.text)
51
     search_results = loads(resp.text)
28
-    res = search_results.get('query', {}).get('search', [])
29
-    return [{'url': url.format(language=resp.search_params['language']) + 'wiki/' + quote(result['title'].replace(' ', '_').encode('utf-8')),  # noqa
30
-        'title': result['title']} for result in res[:int(number_of_results)]]
52
+
53
+    # return empty array if there are no results
54
+    if not search_results.get('query', {}).get('search'):
55
+        return []
56
+
57
+    # parse results
58
+    for result in search_results['query']['search']:
59
+        res_url = url.format(language=resp.search_params['language']) + 'wiki/' + quote(result['title'].replace(' ', '_').encode('utf-8'))
60
+        
61
+        # append result
62
+        results.append({'url': res_url,
63
+                        'title': result['title'],
64
+                        'content': ''})
65
+
66
+    # return results
67
+    return results

+ 36
- 7
searx/engines/youtube.py View File

1
+## Youtube (Videos)
2
+# 
3
+# @website     https://www.youtube.com/
4
+# @provide-api yes (http://gdata-samples-youtube-search-py.appspot.com/)
5
+# 
6
+# @using-api   yes
7
+# @results     JSON
8
+# @stable      yes
9
+# @parse       url, title, content, publishedDate, thumbnail
10
+
1
 from json import loads
11
 from json import loads
2
 from urllib import urlencode
12
 from urllib import urlencode
3
 from dateutil import parser
13
 from dateutil import parser
4
 
14
 
15
+# engine dependent config
5
 categories = ['videos']
16
 categories = ['videos']
6
-
7
-search_url = ('https://gdata.youtube.com/feeds/api/videos'
8
-              '?alt=json&{query}&start-index={index}&max-results=25')  # noqa
9
-
10
 paging = True
17
 paging = True
18
+language_support = True
19
+
20
+# search-url
21
+base_url = 'https://gdata.youtube.com/feeds/api/videos'
22
+search_url = base_url + '?alt=json&{query}&start-index={index}&max-results=5'  # noqa
11
 
23
 
12
 
24
 
25
+# do search-request
13
 def request(query, params):
26
 def request(query, params):
14
-    index = (params['pageno'] - 1) * 25 + 1
27
+    index = (params['pageno'] - 1) * 5 + 1
28
+
15
     params['url'] = search_url.format(query=urlencode({'q': query}),
29
     params['url'] = search_url.format(query=urlencode({'q': query}),
16
                                       index=index)
30
                                       index=index)
31
+
32
+    # add language tag if specified
33
+    if params['language'] != 'all':
34
+        params['url'] += '&lr=' + params['language'].split('_')[0]
35
+
17
     return params
36
     return params
18
 
37
 
19
 
38
 
39
+# get response from search-request
20
 def response(resp):
40
 def response(resp):
21
     results = []
41
     results = []
42
+
22
     search_results = loads(resp.text)
43
     search_results = loads(resp.text)
44
+
45
+    # return empty array if there are no results
23
     if not 'feed' in search_results:
46
     if not 'feed' in search_results:
24
-        return results
47
+        return []
48
+
25
     feed = search_results['feed']
49
     feed = search_results['feed']
26
 
50
 
51
+    # parse results
27
     for result in feed['entry']:
52
     for result in feed['entry']:
28
         url = [x['href'] for x in result['link'] if x['type'] == 'text/html']
53
         url = [x['href'] for x in result['link'] if x['type'] == 'text/html']
54
+
29
         if not url:
55
         if not url:
30
             return
56
             return
57
+
31
         # remove tracking
58
         # remove tracking
32
         url = url[0].replace('feature=youtube_gdata', '')
59
         url = url[0].replace('feature=youtube_gdata', '')
33
         if url.endswith('&'):
60
         if url.endswith('&'):
34
             url = url[:-1]
61
             url = url[:-1]
62
+
35
         title = result['title']['$t']
63
         title = result['title']['$t']
36
         content = ''
64
         content = ''
37
         thumbnail = ''
65
         thumbnail = ''
38
 
66
 
39
-#"2013-12-31T15:22:51.000Z"
40
         pubdate = result['published']['$t']
67
         pubdate = result['published']['$t']
41
         publishedDate = parser.parse(pubdate)
68
         publishedDate = parser.parse(pubdate)
42
 
69
 
49
         else:
76
         else:
50
             content = result['content']['$t']
77
             content = result['content']['$t']
51
 
78
 
79
+        # append result
52
         results.append({'url': url,
80
         results.append({'url': url,
53
                         'title': title,
81
                         'title': title,
54
                         'content': content,
82
                         'content': content,
56
                         'publishedDate': publishedDate,
84
                         'publishedDate': publishedDate,
57
                         'thumbnail': thumbnail})
85
                         'thumbnail': thumbnail})
58
 
86
 
87
+    # return results
59
     return results
88
     return results

+ 1
- 15
searx/settings.yml View File

11
 engines:
11
 engines:
12
   - name : wikipedia
12
   - name : wikipedia
13
     engine : wikipedia
13
     engine : wikipedia
14
-    number_of_results : 1
15
-    paging : False
16
     shortcut : wp
14
     shortcut : wp
15
+#    number_of_results : 1 # default is 1
17
 
16
 
18
   - name : bing
17
   - name : bing
19
     engine : bing
18
     engine : bing
37
 
36
 
38
   - name : deviantart
37
   - name : deviantart
39
     engine : deviantart
38
     engine : deviantart
40
-    categories : images
41
     shortcut : da
39
     shortcut : da
42
     timeout: 3.0
40
     timeout: 3.0
43
 
41
 
47
 
45
 
48
   - name : duckduckgo
46
   - name : duckduckgo
49
     engine : duckduckgo
47
     engine : duckduckgo
50
-    locale : en-us
51
     shortcut : ddg
48
     shortcut : ddg
52
 
49
 
53
 # down - website is under criminal investigation by the UK
50
 # down - website is under criminal investigation by the UK
64
 
61
 
65
   - name : general-file
62
   - name : general-file
66
     engine : generalfile
63
     engine : generalfile
67
-    categories : files
68
     shortcut : gf
64
     shortcut : gf
69
 
65
 
70
   - name : github
66
   - name : github
71
     engine : github
67
     engine : github
72
-    categories : it
73
     shortcut : gh
68
     shortcut : gh
74
 
69
 
75
   - name : google
70
   - name : google
86
 
81
 
87
   - name : piratebay
82
   - name : piratebay
88
     engine : piratebay
83
     engine : piratebay
89
-    categories : videos, music, files
90
     shortcut : tpb
84
     shortcut : tpb
91
 
85
 
92
   - name : soundcloud
86
   - name : soundcloud
93
     engine : soundcloud
87
     engine : soundcloud
94
-    categories : music
95
     shortcut : sc
88
     shortcut : sc
96
 
89
 
97
   - name : stackoverflow
90
   - name : stackoverflow
98
     engine : stackoverflow
91
     engine : stackoverflow
99
-    categories : it
100
     shortcut : st
92
     shortcut : st
101
 
93
 
102
   - name : startpage
94
   - name : startpage
103
     engine : startpage
95
     engine : startpage
104
-    base_url : 'https://startpage.com/'
105
-    search_url : 'https://startpage.com/do/search'
106
     shortcut : sp
96
     shortcut : sp
107
 
97
 
108
 # +30% page load time
98
 # +30% page load time
113
 
103
 
114
   - name : twitter
104
   - name : twitter
115
     engine : twitter
105
     engine : twitter
116
-    categories : social media
117
     shortcut : tw
106
     shortcut : tw
118
 
107
 
119
 # maybe in a fun category
108
 # maybe in a fun category
142
 
131
 
143
   - name : youtube
132
   - name : youtube
144
     engine : youtube
133
     engine : youtube
145
-    categories : videos
146
     shortcut : yt
134
     shortcut : yt
147
 
135
 
148
   - name : dailymotion
136
   - name : dailymotion
149
     engine : dailymotion
137
     engine : dailymotion
150
-    locale : en_US
151
-    categories : videos
152
     shortcut : dm
138
     shortcut : dm
153
 
139
 
154
   - name : vimeo
140
   - name : vimeo