浏览代码

[enh] engine cfg compatibilty

asciimoo 11 年前
父节点
当前提交
74b6be3991

+ 4
- 1
searx/engines/duckduckgo.py 查看文件

@@ -1,8 +1,11 @@
1 1
 from json import loads
2
+from urllib import urlencode
2 3
 
4
+url = 'https://duckduckgo.com/'
5
+search_url = url + 'd.js?{query}&l=us-en&p=1&s=0'
3 6
 
4 7
 def request(query, params):
5
-    params['url'] = 'https://duckduckgo.com/d.js?q=%s&l=us-en&p=1&s=0' % query
8
+    params['url'] = search_url.format(query=urlencode({'q': query}))
6 9
     return params
7 10
 
8 11
 

+ 4
- 1
searx/engines/duckduckgo_definitions.py 查看文件

@@ -1,7 +1,10 @@
1 1
 import json
2
+from urllib import urlencode
3
+
4
+url = 'http://api.duckduckgo.com/?{query}&format=json&pretty=0'
2 5
 
3 6
 def request(query, params):
4
-    params['url'] = 'http://api.duckduckgo.com/?q=%s&format=json&pretty=0' % query
7
+    params['url'] =  url.format(query=urlencode({'q': query}))
5 8
     return params
6 9
 
7 10
 

+ 6
- 8
searx/engines/flickr.py 查看文件

@@ -1,18 +1,16 @@
1 1
 #!/usr/bin/env python
2 2
 
3
-from urllib import quote
3
+from urllib import urlencode
4 4
 from lxml import html
5 5
 from urlparse import urljoin
6 6
 
7 7
 categories = ['images']
8 8
 
9
-base_url = 'https://secure.flickr.com/'
10
-search_url = base_url+'search/?q='
9
+url = 'https://secure.flickr.com/'
10
+search_url = url+'search/?q={query}'
11 11
 
12 12
 def request(query, params):
13
-    global search_url
14
-    query = quote(query.replace(' ', '+'), safe='+')
15
-    params['url'] = search_url + query
13
+    params['url'] = search_url.format(query=urlencode({'q': query}))
16 14
     return params
17 15
 
18 16
 def response(resp):
@@ -20,11 +18,11 @@ def response(resp):
20 18
     results = []
21 19
     dom = html.fromstring(resp.text)
22 20
     for result in dom.xpath('//div[@id="thumbnails"]//a[@class="rapidnofollow photo-click" and @data-track="photo-click"]'):
23
-        url = urljoin(base_url, result.attrib.get('href'))
21
+        href = urljoin(url, result.attrib.get('href'))
24 22
         img = result.xpath('.//img')[0]
25 23
         title = img.attrib.get('alt', '')
26 24
         img_src = img.attrib.get('data-defer-src')
27 25
         if not img_src:
28 26
             continue
29
-        results.append({'url': url, 'title': title, 'img_src': img_src, 'template': 'images.html'})
27
+        results.append({'url': href, 'title': title, 'img_src': img_src, 'template': 'images.html'})
30 28
     return results

+ 2
- 2
searx/engines/github.py 查看文件

@@ -4,11 +4,11 @@ from cgi import escape
4 4
 
5 5
 categories = ['it']
6 6
 
7
-search_url = 'https://api.github.com/search/repositories?sort=stars&order=desc&'
7
+search_url = 'https://api.github.com/search/repositories?sort=stars&order=desc&{query}'
8 8
 
9 9
 def request(query, params):
10 10
     global search_url
11
-    params['url'] = search_url + urlencode({'q': query})
11
+    params['url'] = search_url.format(query=urlencode({'q': query}))
12 12
     params['headers']['Accept'] = 'application/vnd.github.preview.text-match+json'
13 13
     return params
14 14
 

+ 4
- 5
searx/engines/google_images.py 查看文件

@@ -5,15 +5,14 @@ from json import loads
5 5
 
6 6
 categories = ['images']
7 7
 
8
-search_url = 'https://ajax.googleapis.com/ajax/services/search/images?v=1.0&start=0&rsz=large&safe=off&filter=off&'
8
+url = 'https://ajax.googleapis.com/'
9
+search_url = url + 'ajax/services/search/images?v=1.0&start=0&rsz=large&safe=off&filter=off&{query}'
9 10
 
10 11
 def request(query, params):
11
-    global search_url
12 12
     params['url'] = search_url + urlencode({'q': query})
13 13
     return params
14 14
 
15 15
 def response(resp):
16
-    global base_url
17 16
     results = []
18 17
     search_res = loads(resp.text)
19 18
     if not search_res.get('responseData'):
@@ -21,9 +20,9 @@ def response(resp):
21 20
     if not search_res['responseData'].get('results'):
22 21
         return []
23 22
     for result in search_res['responseData']['results']:
24
-        url = result['originalContextUrl']
23
+        href = result['originalContextUrl']
25 24
         title = result['title']
26 25
         if not result['url']:
27 26
             continue
28
-        results.append({'url': url, 'title': title, 'content': '', 'img_src': result['url'], 'template': 'images.html'})
27
+        results.append({'url': href, 'title': title, 'content': '', 'img_src': result['url'], 'template': 'images.html'})
29 28
     return results

+ 5
- 8
searx/engines/piratebay.py 查看文件

@@ -5,21 +5,18 @@ from urllib import quote
5 5
 
6 6
 categories = ['videos', 'music']
7 7
 
8
-base_url = 'https://thepiratebay.sx/'
9
-search_url = base_url + 'search/{search_term}/0/99/{search_type}'
8
+url = 'https://thepiratebay.sx/'
9
+search_url = url + 'search/{search_term}/0/99/{search_type}'
10 10
 search_types = {'videos': '200'
11 11
                ,'music' : '100'
12 12
                }
13 13
 
14 14
 def request(query, params):
15
-    global search_url, search_types
16
-    # 200 is the video category
17 15
     params['url'] = search_url.format(search_term=quote(query), search_type=search_types.get(params['category']))
18 16
     return params
19 17
 
20 18
 
21 19
 def response(resp):
22
-    global base_url
23 20
     results = []
24 21
     dom = html.fromstring(resp.text)
25 22
     search_res = dom.xpath('//table[@id="searchResult"]//tr')
@@ -27,12 +24,12 @@ def response(resp):
27 24
         return results
28 25
     for result in search_res[1:]:
29 26
         link = result.xpath('.//div[@class="detName"]//a')[0]
30
-        url = urljoin(base_url, link.attrib.get('href'))
27
+        href = urljoin(url, link.attrib.get('href'))
31 28
         title = ' '.join(link.xpath('.//text()'))
32 29
         content = escape(' '.join(result.xpath('.//font[@class="detDesc"]//text()')))
33 30
         seed, leech = result.xpath('.//td[@align="right"]/text()')[:2]
34 31
         content += '<br />Seed: %s, Leech: %s' % (seed, leech)
35 32
         magnetlink = result.xpath('.//a[@title="Download this torrent using magnet"]')[0]
36
-        content += '<br /><a href="%s">magnet link</a>' % urljoin(base_url, magnetlink.attrib['href'])
37
-        results.append({'url': url, 'title': title, 'content': content})
33
+        content += '<br /><a href="%s">magnet link</a>' % urljoin(url, magnetlink.attrib['href'])
34
+        results.append({'url': href, 'title': title, 'content': content})
38 35
     return results

+ 4
- 2
searx/engines/soundcloud.py 查看文件

@@ -1,13 +1,15 @@
1 1
 from json import loads
2
+from urllib import urlencode
2 3
 
3 4
 categories = ['music']
4 5
 
5 6
 guest_client_id = 'b45b1aa10f1ac2941910a7f0d10f8e28'
6
-search_url = 'https://api.soundcloud.com/search?q=%s&facet=model&limit=10&offset=0&linked_partitioning=1&client_id='+guest_client_id
7
+url = 'https://api.soundcloud.com/'
8
+search_url = url + 'search?{query}&facet=model&limit=20&offset=0&linked_partitioning=1&client_id='+guest_client_id
7 9
 
8 10
 def request(query, params):
9 11
     global search_url
10
-    params['url'] = search_url % query
12
+    params['url'] = search_url.format(query=urlencode({'q': query}))
11 13
     return params
12 14
 
13 15
 

+ 4
- 6
searx/engines/stackoverflow.py 查看文件

@@ -5,23 +5,21 @@ from urllib import urlencode
5 5
 
6 6
 categories = ['it']
7 7
 
8
-base_url = 'http://stackoverflow.com/'
9
-search_url = base_url+'search?'
8
+url = 'http://stackoverflow.com/'
9
+search_url = url+'search?'
10 10
 
11 11
 def request(query, params):
12
-    global search_url
13 12
     params['url'] = search_url + urlencode({'q': query})
14 13
     return params
15 14
 
16 15
 
17 16
 def response(resp):
18
-    global base_url
19 17
     results = []
20 18
     dom = html.fromstring(resp.text)
21 19
     for result in dom.xpath('//div[@class="question-summary search-result"]'):
22 20
         link = result.xpath('.//div[@class="result-link"]//a')[0]
23
-        url = urljoin(base_url, link.attrib.get('href'))
21
+        href = urljoin(url, link.attrib.get('href'))
24 22
         title = ' '.join(link.xpath('.//text()'))
25 23
         content = escape(' '.join(result.xpath('.//div[@class="excerpt"]//text()')))
26
-        results.append({'url': url, 'title': title, 'content': content})
24
+        results.append({'url': href, 'title': title, 'content': content})
27 25
     return results

+ 3
- 6
searx/engines/youtube.py 查看文件

@@ -1,15 +1,12 @@
1 1
 from json import loads
2
-from urllib import quote
2
+from urllib import urlencode
3 3
 
4 4
 categories = ['videos']
5 5
 
6
-search_url = 'https://gdata.youtube.com/feeds/api/videos?alt=json&q='
6
+search_url = 'https://gdata.youtube.com/feeds/api/videos?alt=json&{query}'
7 7
 
8 8
 def request(query, params):
9
-    global search_url
10
-    query = quote(query.replace(' ', '+'), safe='+')
11
-    params['url'] = search_url + query
12
-
9
+    params['url'] = search_url.format(query=urlencode({'q': query}))
13 10
     return params
14 11
 
15 12