瀏覽代碼

Merge pull request #139 from dalf/master

pep8 : engines
Adam Tauber 10 年之前
父節點
當前提交
9517f7a6e7

+ 8
- 7
searx/engines/bing.py 查看文件

@@ -1,8 +1,9 @@
1 1
 ## Bing (Web)
2
-# 
2
+#
3 3
 # @website     https://www.bing.com
4
-# @provide-api yes (http://datamarket.azure.com/dataset/bing/search), max. 5000 query/month
5
-# 
4
+# @provide-api yes (http://datamarket.azure.com/dataset/bing/search),
5
+#              max. 5000 query/month
6
+#
6 7
 # @using-api   no (because of query limit)
7 8
 # @results     HTML (using search portal)
8 9
 # @stable      no (HTML can change)
@@ -58,8 +59,8 @@ def response(resp):
58 59
         content = escape(' '.join(result.xpath('.//p//text()')))
59 60
 
60 61
         # append result
61
-        results.append({'url': url, 
62
-                        'title': title, 
62
+        results.append({'url': url,
63
+                        'title': title,
63 64
                         'content': content})
64 65
 
65 66
     # return results if something is found
@@ -74,8 +75,8 @@ def response(resp):
74 75
         content = escape(' '.join(result.xpath('.//p//text()')))
75 76
 
76 77
         # append result
77
-        results.append({'url': url, 
78
-                        'title': title, 
78
+        results.append({'url': url,
79
+                        'title': title,
79 80
                         'content': content})
80 81
 
81 82
     # return results

+ 11
- 9
searx/engines/bing_images.py 查看文件

@@ -1,17 +1,19 @@
1 1
 ## Bing (Images)
2
-# 
2
+#
3 3
 # @website     https://www.bing.com/images
4
-# @provide-api yes (http://datamarket.azure.com/dataset/bing/search), max. 5000 query/month
5
-# 
4
+# @provide-api yes (http://datamarket.azure.com/dataset/bing/search),
5
+#              max. 5000 query/month
6
+#
6 7
 # @using-api   no (because of query limit)
7 8
 # @results     HTML (using search portal)
8 9
 # @stable      no (HTML can change)
9 10
 # @parse       url, title, img_src
10 11
 #
11
-# @todo        currently there are up to 35 images receive per page, because bing does not parse count=10. limited response to 10 images
12
+# @todo        currently there are up to 35 images receive per page,
13
+#              because bing does not parse count=10.
14
+#              limited response to 10 images
12 15
 
13 16
 from urllib import urlencode
14
-from cgi import escape
15 17
 from lxml import html
16 18
 from yaml import load
17 19
 import re
@@ -51,15 +53,15 @@ def response(resp):
51 53
     dom = html.fromstring(resp.content)
52 54
 
53 55
     # init regex for yaml-parsing
54
-    p = re.compile( '({|,)([a-z]+):(")')
56
+    p = re.compile('({|,)([a-z]+):(")')
55 57
 
56 58
     # parse results
57 59
     for result in dom.xpath('//div[@class="dg_u"]'):
58 60
         link = result.xpath('./a')[0]
59 61
 
60 62
         # parse yaml-data (it is required to add a space, to make it parsable)
61
-        yaml_data = load(p.sub( r'\1\2: \3', link.attrib.get('m')))
62
- 
63
+        yaml_data = load(p.sub(r'\1\2: \3', link.attrib.get('m')))
64
+
63 65
         title = link.attrib.get('t1')
64 66
         #url = 'http://' + link.attrib.get('t3')
65 67
         url = yaml_data.get('surl')
@@ -69,7 +71,7 @@ def response(resp):
69 71
         results.append({'template': 'images.html',
70 72
                         'url': url,
71 73
                         'title': title,
72
-                        'content': '',  
74
+                        'content': '',
73 75
                         'img_src': img_src})
74 76
 
75 77
         # TODO stop parsing if 10 images are found

+ 10
- 9
searx/engines/bing_news.py 查看文件

@@ -1,8 +1,9 @@
1 1
 ## Bing (News)
2
-# 
2
+#
3 3
 # @website     https://www.bing.com/news
4
-# @provide-api yes (http://datamarket.azure.com/dataset/bing/search), max. 5000 query/month
5
-# 
4
+# @provide-api yes (http://datamarket.azure.com/dataset/bing/search),
5
+#              max. 5000 query/month
6
+#
6 7
 # @using-api   no (because of query limit)
7 8
 # @results     HTML (using search portal)
8 9
 # @stable      no (HTML can change)
@@ -57,12 +58,12 @@ def response(resp):
57 58
         url = link.attrib.get('href')
58 59
         title = ' '.join(link.xpath('.//text()'))
59 60
         contentXPath = result.xpath('.//div[@class="sn_txt"]/div//span[@class="sn_snip"]//text()')
60
-        if contentXPath != None:
61
+        if contentXPath is not None:
61 62
             content = escape(' '.join(contentXPath))
62
-            
63
+
63 64
         # parse publishedDate
64 65
         publishedDateXPath = result.xpath('.//div[@class="sn_txt"]/div//span[contains(@class,"sn_ST")]//span[contains(@class,"sn_tm")]//text()')
65
-        if publishedDateXPath != None:
66
+        if publishedDateXPath is not None:
66 67
             publishedDate = escape(' '.join(publishedDateXPath))
67 68
 
68 69
         if re.match("^[0-9]+ minute(s|) ago$", publishedDate):
@@ -89,10 +90,10 @@ def response(resp):
89 90
             except TypeError:
90 91
                 # FIXME
91 92
                 publishedDate = datetime.now()
92
-                
93
+
93 94
         # append result
94
-        results.append({'url': url, 
95
-                        'title': title, 
95
+        results.append({'url': url,
96
+                        'title': title,
96 97
                         'publishedDate': publishedDate,
97 98
                         'content': content})
98 99
 

+ 1
- 1
searx/engines/currency_convert.py 查看文件

@@ -55,6 +55,6 @@ def response(resp):
55 55
         resp.search_params['to'].lower()
56 56
     )
57 57
 
58
-    results.append({'answer' : answer, 'url': url})
58
+    results.append({'answer': answer, 'url': url})
59 59
 
60 60
     return results

+ 2
- 3
searx/engines/dailymotion.py 查看文件

@@ -1,8 +1,8 @@
1 1
 ## Dailymotion (Videos)
2
-# 
2
+#
3 3
 # @website     https://www.dailymotion.com
4 4
 # @provide-api yes (http://www.dailymotion.com/developer)
5
-# 
5
+#
6 6
 # @using-api   yes
7 7
 # @results     JSON
8 8
 # @stable      yes
@@ -12,7 +12,6 @@
12 12
 
13 13
 from urllib import urlencode
14 14
 from json import loads
15
-from lxml import html
16 15
 
17 16
 # engine dependent config
18 17
 categories = ['videos']

+ 2
- 2
searx/engines/deviantart.py 查看文件

@@ -1,8 +1,8 @@
1 1
 ## Deviantart (Images)
2
-# 
2
+#
3 3
 # @website     https://www.deviantart.com/
4 4
 # @provide-api yes (https://www.deviantart.com/developers/) (RSS)
5
-# 
5
+#
6 6
 # @using-api   no (TODO, rewrite to api)
7 7
 # @results     HTML
8 8
 # @stable      no (HTML can change)

+ 7
- 5
searx/engines/duckduckgo.py 查看文件

@@ -1,15 +1,17 @@
1 1
 ## DuckDuckGo (Web)
2
-# 
2
+#
3 3
 # @website     https://duckduckgo.com/
4
-# @provide-api yes (https://duckduckgo.com/api), but not all results from search-site
5
-# 
4
+# @provide-api yes (https://duckduckgo.com/api),
5
+#              but not all results from search-site
6
+#
6 7
 # @using-api   no
7 8
 # @results     HTML (using search portal)
8 9
 # @stable      no (HTML can change)
9 10
 # @parse       url, title, content
10 11
 #
11 12
 # @todo        rewrite to api
12
-# @todo        language support (the current used site does not support language-change)
13
+# @todo        language support
14
+#              (the current used site does not support language-change)
13 15
 
14 16
 from urllib import urlencode
15 17
 from lxml.html import fromstring
@@ -37,7 +39,7 @@ def request(query, params):
37 39
     if params['language'] == 'all':
38 40
         locale = 'en-us'
39 41
     else:
40
-        locale = params['language'].replace('_','-').lower()
42
+        locale = params['language'].replace('_', '-').lower()
41 43
 
42 44
     params['url'] = url.format(
43 45
         query=urlencode({'q': query, 'kl': locale}),

+ 46
- 31
searx/engines/duckduckgo_definitions.py 查看文件

@@ -3,21 +3,25 @@ from urllib import urlencode
3 3
 from lxml import html
4 4
 from searx.engines.xpath import extract_text
5 5
 
6
-url = 'https://api.duckduckgo.com/?{query}&format=json&pretty=0&no_redirect=1&d=1'
6
+url = 'https://api.duckduckgo.com/'\
7
+    + '?{query}&format=json&pretty=0&no_redirect=1&d=1'
8
+
7 9
 
8 10
 def result_to_text(url, text, htmlResult):
9 11
     # TODO : remove result ending with "Meaning" or "Category"
10 12
     dom = html.fromstring(htmlResult)
11 13
     a = dom.xpath('//a')
12
-    if len(a)>=1:
14
+    if len(a) >= 1:
13 15
         return extract_text(a[0])
14 16
     else:
15 17
         return text
16 18
 
19
+
17 20
 def html_to_text(htmlFragment):
18 21
     dom = html.fromstring(htmlFragment)
19 22
     return extract_text(dom)
20 23
 
24
+
21 25
 def request(query, params):
22 26
     # TODO add kl={locale}
23 27
     params['url'] = url.format(query=urlencode({'q': query}))
@@ -38,16 +42,15 @@ def response(resp):
38 42
     # add answer if there is one
39 43
     answer = search_res.get('Answer', '')
40 44
     if answer != '':
41
-        results.append({ 'answer' : html_to_text(answer) })
45
+        results.append({'answer': html_to_text(answer)})
42 46
 
43 47
     # add infobox
44 48
     if 'Definition' in search_res:
45
-        content = content + search_res.get('Definition', '') 
49
+        content = content + search_res.get('Definition', '')
46 50
 
47 51
     if 'Abstract' in search_res:
48 52
         content = content + search_res.get('Abstract', '')
49 53
 
50
-
51 54
     # image
52 55
     image = search_res.get('Image', '')
53 56
     image = None if image == '' else image
@@ -55,29 +58,35 @@ def response(resp):
55 58
     # attributes
56 59
     if 'Infobox' in search_res:
57 60
         infobox = search_res.get('Infobox', None)
58
-        if  'content' in infobox:
61
+        if 'content' in infobox:
59 62
             for info in infobox.get('content'):
60
-                attributes.append({'label': info.get('label'), 'value': info.get('value')})
63
+                attributes.append({'label': info.get('label'),
64
+                                  'value': info.get('value')})
61 65
 
62 66
     # urls
63 67
     for ddg_result in search_res.get('Results', []):
64 68
         if 'FirstURL' in ddg_result:
65 69
             firstURL = ddg_result.get('FirstURL', '')
66 70
             text = ddg_result.get('Text', '')
67
-            urls.append({'title':text, 'url':firstURL})
68
-            results.append({'title':heading, 'url': firstURL})
71
+            urls.append({'title': text, 'url': firstURL})
72
+            results.append({'title': heading, 'url': firstURL})
69 73
 
70 74
     # related topics
71 75
     for ddg_result in search_res.get('RelatedTopics', None):
72 76
         if 'FirstURL' in ddg_result:
73
-            suggestion = result_to_text(ddg_result.get('FirstURL', None), ddg_result.get('Text', None), ddg_result.get('Result', None))
77
+            suggestion = result_to_text(ddg_result.get('FirstURL', None),
78
+                                        ddg_result.get('Text', None),
79
+                                        ddg_result.get('Result', None))
74 80
             if suggestion != heading:
75 81
                 results.append({'suggestion': suggestion})
76 82
         elif 'Topics' in ddg_result:
77 83
             suggestions = []
78
-            relatedTopics.append({ 'name' : ddg_result.get('Name', ''), 'suggestions': suggestions })
84
+            relatedTopics.append({'name': ddg_result.get('Name', ''),
85
+                                 'suggestions': suggestions})
79 86
             for topic_result in ddg_result.get('Topics', []):
80
-                suggestion = result_to_text(topic_result.get('FirstURL', None), topic_result.get('Text', None), topic_result.get('Result', None))
87
+                suggestion = result_to_text(topic_result.get('FirstURL', None),
88
+                                            topic_result.get('Text', None),
89
+                                            topic_result.get('Result', None))
81 90
                 if suggestion != heading:
82 91
                     suggestions.append(suggestion)
83 92
 
@@ -86,21 +95,26 @@ def response(resp):
86 95
     if abstractURL != '':
87 96
         # add as result ? problem always in english
88 97
         infobox_id = abstractURL
89
-        urls.append({'title': search_res.get('AbstractSource'), 'url': abstractURL})
98
+        urls.append({'title': search_res.get('AbstractSource'),
99
+                    'url': abstractURL})
90 100
 
91 101
     # definition
92 102
     definitionURL = search_res.get('DefinitionURL', '')
93 103
     if definitionURL != '':
94 104
         # add as result ? as answer ? problem always in english
95 105
         infobox_id = definitionURL
96
-        urls.append({'title': search_res.get('DefinitionSource'), 'url': definitionURL})
106
+        urls.append({'title': search_res.get('DefinitionSource'),
107
+                    'url': definitionURL})
97 108
 
98 109
     # entity
99 110
     entity = search_res.get('Entity', None)
100
-    # TODO continent / country / department / location / waterfall / mountain range : link to map search, get weather, near by locations
111
+    # TODO continent / country / department / location / waterfall /
112
+    #      mountain range :
113
+    #      link to map search, get weather, near by locations
101 114
     # TODO musician : link to music search
102 115
     # TODO concert tour : ??
103
-    # TODO film / actor / television  / media franchise : links to IMDB / rottentomatoes (or scrap result)
116
+    # TODO film / actor / television  / media franchise :
117
+    #      links to IMDB / rottentomatoes (or scrap result)
104 118
     # TODO music : link tu musicbrainz / last.fm
105 119
     # TODO book : ??
106 120
     # TODO artist / playwright : ??
@@ -114,24 +128,25 @@ def response(resp):
114 128
     # TODO programming language : ??
115 129
     # TODO file format : ??
116 130
 
117
-    if len(heading)>0:
131
+    if len(heading) > 0:
118 132
         # TODO get infobox.meta.value where .label='article_title'
119
-        if image==None and len(attributes)==0 and len(urls)==1 and len(relatedTopics)==0 and len(content)==0:
133
+        if image is None and len(attributes) == 0 and len(urls) == 1 and\
134
+           len(relatedTopics) == 0 and len(content) == 0:
120 135
             results.append({
121
-                    'url': urls[0]['url'],
122
-                    'title': heading,
123
-                    'content': content
124
-                    })
136
+                           'url': urls[0]['url'],
137
+                           'title': heading,
138
+                           'content': content
139
+                           })
125 140
         else:
126 141
             results.append({
127
-                    'infobox': heading,
128
-                    'id': infobox_id,
129
-                    'entity': entity,
130
-                    'content': content,
131
-                    'img_src' : image,
132
-                    'attributes': attributes,
133
-                    'urls': urls,
134
-                    'relatedTopics': relatedTopics
135
-                    })
142
+                           'infobox': heading,
143
+                           'id': infobox_id,
144
+                           'entity': entity,
145
+                           'content': content,
146
+                           'img_src': image,
147
+                           'attributes': attributes,
148
+                           'urls': urls,
149
+                           'relatedTopics': relatedTopics
150
+                           })
136 151
 
137 152
     return results

+ 1
- 1
searx/engines/dummy.py 查看文件

@@ -1,5 +1,5 @@
1 1
 ## Dummy
2
-# 
2
+#
3 3
 # @results     empty array
4 4
 # @stable      yes
5 5
 

+ 6
- 5
searx/engines/faroo.py 查看文件

@@ -1,8 +1,8 @@
1 1
 ## Faroo (Web, News)
2
-# 
2
+#
3 3
 # @website     http://www.faroo.com
4 4
 # @provide-api yes (http://www.faroo.com/hp/api/api.html), require API-key
5
-# 
5
+#
6 6
 # @using-api   yes
7 7
 # @results     JSON
8 8
 # @stable      yes
@@ -24,9 +24,10 @@ api_key = None
24 24
 url = 'http://www.faroo.com/'
25 25
 search_url = url + 'api?{query}&start={offset}&length={number_of_results}&l={language}&src={categorie}&i=false&f=json&key={api_key}'
26 26
 
27
-search_category = {'general': 'web',                
27
+search_category = {'general': 'web',
28 28
                 'news': 'news'}
29 29
 
30
+
30 31
 # do search-request
31 32
 def request(query, params):
32 33
     offset = (params['pageno']-1) * number_of_results + 1
@@ -48,7 +49,7 @@ def request(query, params):
48 49
                                       query=urlencode({'q': query}),
49 50
                                       language=language,
50 51
                                       categorie=categorie,
51
-                                      api_key=api_key )
52
+                                      api_key=api_key)
52 53
 
53 54
     # using searx User-Agent
54 55
     params['headers']['User-Agent'] = searx_useragent()
@@ -101,7 +102,7 @@ def response(resp):
101 102
             results.append({'template': 'images.html',
102 103
                             'url': result['url'],
103 104
                             'title': result['title'],
104
-                            'content': result['kwic'],  
105
+                            'content': result['kwic'],
105 106
                             'img_src': result['iurl']})
106 107
 
107 108
     # return results

+ 2
- 2
searx/engines/generalfile.py 查看文件

@@ -1,8 +1,8 @@
1 1
 ## General Files (Files)
2
-# 
2
+#
3 3
 # @website     http://www.general-files.org
4 4
 # @provide-api no (nothing found)
5
-# 
5
+#
6 6
 # @using-api   no (because nothing found)
7 7
 # @results     HTML (using search portal)
8 8
 # @stable      no (HTML can change)

+ 2
- 2
searx/engines/github.py 查看文件

@@ -1,8 +1,8 @@
1 1
 ## Github (It)
2
-# 
2
+#
3 3
 # @website     https://github.com/
4 4
 # @provide-api yes (https://developer.github.com/v3/)
5
-# 
5
+#
6 6
 # @using-api   yes
7 7
 # @results     JSON
8 8
 # @stable      yes (using api)

+ 4
- 3
searx/engines/google_images.py 查看文件

@@ -1,8 +1,9 @@
1 1
 ## Google (Images)
2
-# 
2
+#
3 3
 # @website     https://www.google.com
4
-# @provide-api yes (https://developers.google.com/web-search/docs/), deprecated!
5
-# 
4
+# @provide-api yes (https://developers.google.com/web-search/docs/),
5
+#              deprecated!
6
+#
6 7
 # @using-api   yes
7 8
 # @results     JSON
8 9
 # @stable      yes (but deprecated)

+ 4
- 3
searx/engines/google_news.py 查看文件

@@ -1,8 +1,9 @@
1 1
 ## Google (News)
2
-# 
2
+#
3 3
 # @website     https://www.google.com
4
-# @provide-api yes (https://developers.google.com/web-search/docs/), deprecated!
5
-# 
4
+# @provide-api yes (https://developers.google.com/web-search/docs/),
5
+#              deprecated!
6
+#
6 7
 # @using-api   yes
7 8
 # @results     JSON
8 9
 # @stable      yes (but deprecated)

+ 15
- 15
searx/engines/openstreetmap.py 查看文件

@@ -39,16 +39,16 @@ def response(resp):
39 39
         url = result_base_url.format(osm_type=osm_type,
40 40
                                      osm_id=r['osm_id'])
41 41
 
42
-        osm = {'type':osm_type,
43
-               'id':r['osm_id']}
42
+        osm = {'type': osm_type,
43
+               'id': r['osm_id']}
44 44
 
45
-        geojson =  r.get('geojson')
45
+        geojson = r.get('geojson')
46 46
 
47 47
         # if no geojson is found and osm_type is a node, add geojson Point
48 48
         if not geojson and\
49 49
            osm_type == 'node':
50
-            geojson = {u'type':u'Point', 
51
-                       u'coordinates':[r['lon'],r['lat']]}
50
+            geojson = {u'type': u'Point',
51
+                       u'coordinates': [r['lon'], r['lat']]}
52 52
 
53 53
         address_raw = r.get('address')
54 54
         address = {}
@@ -59,20 +59,20 @@ def response(resp):
59 59
            r['class'] == 'tourism' or\
60 60
            r['class'] == 'leisure':
61 61
             if address_raw.get('address29'):
62
-                address = {'name':address_raw.get('address29')}
62
+                address = {'name': address_raw.get('address29')}
63 63
             else:
64
-                address = {'name':address_raw.get(r['type'])}
64
+                address = {'name': address_raw.get(r['type'])}
65 65
 
66 66
         # add rest of adressdata, if something is already found
67 67
         if address.get('name'):
68
-            address.update({'house_number':address_raw.get('house_number'),
69
-                       'road':address_raw.get('road'),
70
-                       'locality':address_raw.get('city',
71
-                                  address_raw.get('town', 
72
-                                  address_raw.get('village'))),
73
-                       'postcode':address_raw.get('postcode'),
74
-                       'country':address_raw.get('country'),
75
-                       'country_code':address_raw.get('country_code')})
68
+            address.update({'house_number': address_raw.get('house_number'),
69
+                           'road': address_raw.get('road'),
70
+                           'locality': address_raw.get('city',
71
+                                       address_raw.get('town',
72
+                                       address_raw.get('village'))),
73
+                           'postcode': address_raw.get('postcode'),
74
+                           'country': address_raw.get('country'),
75
+                           'country_code': address_raw.get('country_code')})
76 76
         else:
77 77
             address = None
78 78
 

+ 3
- 3
searx/engines/piratebay.py 查看文件

@@ -1,8 +1,8 @@
1 1
 ## Piratebay (Videos, Music, Files)
2
-# 
2
+#
3 3
 # @website     https://thepiratebay.se
4 4
 # @provide-api no (nothing found)
5
-# 
5
+#
6 6
 # @using-api   no
7 7
 # @results     HTML (using search portal)
8 8
 # @stable      yes (HTML can change)
@@ -23,7 +23,7 @@ url = 'https://thepiratebay.se/'
23 23
 search_url = url + 'search/{search_term}/{pageno}/99/{search_type}'
24 24
 
25 25
 # piratebay specific type-definitions
26
-search_types = {'files': '0',                
26
+search_types = {'files': '0',
27 27
                 'music': '100',
28 28
                 'videos': '200'}
29 29
 

+ 2
- 2
searx/engines/soundcloud.py 查看文件

@@ -1,8 +1,8 @@
1 1
 ## Soundcloud (Music)
2
-# 
2
+#
3 3
 # @website     https://soundcloud.com
4 4
 # @provide-api yes (https://developers.soundcloud.com/)
5
-# 
5
+#
6 6
 # @using-api   yes
7 7
 # @results     JSON
8 8
 # @stable      yes

+ 4
- 4
searx/engines/stackoverflow.py 查看文件

@@ -1,8 +1,8 @@
1 1
 ## Stackoverflow (It)
2
-# 
2
+#
3 3
 # @website     https://stackoverflow.com/
4 4
 # @provide-api not clear (https://api.stackexchange.com/docs/advanced-search)
5
-# 
5
+#
6 6
 # @using-api   no
7 7
 # @results     HTML
8 8
 # @stable      no (HTML can change)
@@ -50,8 +50,8 @@ def response(resp):
50 50
         content = escape(' '.join(result.xpath(content_xpath)))
51 51
 
52 52
         # append result
53
-        results.append({'url': href, 
54
-                        'title': title, 
53
+        results.append({'url': href,
54
+                        'title': title,
55 55
                         'content': content})
56 56
 
57 57
     # return results

+ 2
- 2
searx/engines/twitter.py 查看文件

@@ -1,8 +1,8 @@
1 1
 ## Twitter (Social media)
2
-# 
2
+#
3 3
 # @website     https://www.bing.com/news
4 4
 # @provide-api yes (https://dev.twitter.com/docs/using-search)
5
-# 
5
+#
6 6
 # @using-api   no
7 7
 # @results     HTML (using search portal)
8 8
 # @stable      no (HTML can change)

+ 7
- 5
searx/engines/vimeo.py 查看文件

@@ -1,8 +1,9 @@
1 1
 ## Vimeo (Videos)
2
-# 
2
+#
3 3
 # @website     https://vimeo.com/
4
-# @provide-api yes (http://developer.vimeo.com/api), they have a maximum count of queries/hour
5
-# 
4
+# @provide-api yes (http://developer.vimeo.com/api),
5
+#              they have a maximum count of queries/hour
6
+#
6 7
 # @using-api   no (TODO, rewrite to api)
7 8
 # @results     HTML (using search portal)
8 9
 # @stable      no (HTML can change)
@@ -35,11 +36,12 @@ publishedDate_xpath = './/p[@class="meta"]//attribute::datetime'
35 36
 
36 37
 # do search-request
37 38
 def request(query, params):
38
-    params['url'] = search_url.format(pageno=params['pageno'] ,
39
+    params['url'] = search_url.format(pageno=params['pageno'],
39 40
                                       query=urlencode({'q': query}))
40 41
 
41 42
     # TODO required?
42
-    params['cookies']['__utma'] = '00000000.000#0000000.0000000000.0000000000.0000000000.0'
43
+    params['cookies']['__utma'] =\
44
+        '00000000.000#0000000.0000000000.0000000000.0000000000.0'
43 45
 
44 46
     return params
45 47
 

+ 122
- 59
searx/engines/wikidata.py 查看文件

@@ -2,13 +2,25 @@ import json
2 2
 from requests import get
3 3
 from urllib import urlencode
4 4
 
5
-resultCount=1
6
-urlSearch = 'https://www.wikidata.org/w/api.php?action=query&list=search&format=json&srnamespace=0&srprop=sectiontitle&{query}'
7
-urlDetail = 'https://www.wikidata.org/w/api.php?action=wbgetentities&format=json&props=labels%7Cinfo%7Csitelinks%7Csitelinks%2Furls%7Cdescriptions%7Cclaims&{query}'
8
-urlMap = 'https://www.openstreetmap.org/?lat={latitude}&lon={longitude}&zoom={zoom}&layers=M'
5
+result_count = 1
6
+wikidata_host = 'https://www.wikidata.org'
7
+wikidata_api = wikidata_host + '/w/api.php'
8
+url_search = wikidata_api \
9
+    + '?action=query&list=search&format=json'\
10
+    + '&srnamespace=0&srprop=sectiontitle&{query}'
11
+url_detail = wikidata_api\
12
+    + '?action=wbgetentities&format=json'\
13
+    + '&props=labels%7Cinfo%7Csitelinks'\
14
+    + '%7Csitelinks%2Furls%7Cdescriptions%7Cclaims'\
15
+    + '&{query}'
16
+url_map = 'https://www.openstreetmap.org/'\
17
+    + '?lat={latitude}&lon={longitude}&zoom={zoom}&layers=M'
18
+
9 19
 
10 20
 def request(query, params):
11
-    params['url'] = urlSearch.format(query=urlencode({'srsearch': query, 'srlimit': resultCount}))
21
+    params['url'] = url_search.format(
22
+        query=urlencode({'srsearch': query,
23
+                        'srlimit': result_count}))
12 24
     return params
13 25
 
14 26
 
@@ -23,7 +35,8 @@ def response(resp):
23 35
     language = resp.search_params['language'].split('_')[0]
24 36
     if language == 'all':
25 37
         language = 'en'
26
-    url = urlDetail.format(query=urlencode({'ids': '|'.join(wikidata_ids), 'languages': language + '|en'}))
38
+    url = url_detail.format(query=urlencode({'ids': '|'.join(wikidata_ids),
39
+                                            'languages': language + '|en'}))
27 40
 
28 41
     htmlresponse = get(url)
29 42
     jsonresponse = json.loads(htmlresponse.content)
@@ -32,6 +45,7 @@ def response(resp):
32 45
 
33 46
     return results
34 47
 
48
+
35 49
 def getDetail(jsonresponse, wikidata_id, language):
36 50
     results = []
37 51
     urls = []
@@ -40,60 +54,103 @@ def getDetail(jsonresponse, wikidata_id, language):
40 54
     result = jsonresponse.get('entities', {}).get(wikidata_id, {})
41 55
 
42 56
     title = result.get('labels', {}).get(language, {}).get('value', None)
43
-    if title == None:
57
+    if title is None:
44 58
         title = result.get('labels', {}).get('en', {}).get('value', None)
45
-    if title == None:
59
+    if title is None:
46 60
         return results
47 61
 
48
-    description = result.get('descriptions', {}).get(language, {}).get('value', None)
49
-    if description == None:
50
-        description = result.get('descriptions', {}).get('en', {}).get('value', '')
62
+    description = result\
63
+        .get('descriptions', {})\
64
+        .get(language, {})\
65
+        .get('value', None)
66
+
67
+    if description is None:
68
+        description = result\
69
+            .get('descriptions', {})\
70
+            .get('en', {})\
71
+            .get('value', '')
51 72
 
52 73
     claims = result.get('claims', {})
53 74
     official_website = get_string(claims, 'P856', None)
54
-    if official_website != None:
55
-        urls.append({ 'title' : 'Official site', 'url': official_website })
56
-        results.append({ 'title': title, 'url' : official_website })
75
+    if official_website is not None:
76
+        urls.append({'title': 'Official site', 'url': official_website})
77
+        results.append({'title': title, 'url': official_website})
57 78
 
58 79
     wikipedia_link_count = 0
59 80
     if language != 'en':
60
-        wikipedia_link_count += add_url(urls, 'Wikipedia (' + language + ')', get_wikilink(result, language + 'wiki'))
81
+        wikipedia_link_count += add_url(urls,
82
+                                        'Wikipedia (' + language + ')',
83
+                                        get_wikilink(result, language +
84
+                                                     'wiki'))
61 85
     wikipedia_en_link = get_wikilink(result, 'enwiki')
62
-    wikipedia_link_count += add_url(urls, 'Wikipedia (en)', wikipedia_en_link)
86
+    wikipedia_link_count += add_url(urls,
87
+                                    'Wikipedia (en)',
88
+                                    wikipedia_en_link)
63 89
     if wikipedia_link_count == 0:
64 90
         misc_language = get_wiki_firstlanguage(result, 'wiki')
65
-        if misc_language != None:
66
-            add_url(urls, 'Wikipedia (' + misc_language + ')', get_wikilink(result, misc_language + 'wiki'))
91
+        if misc_language is not None:
92
+            add_url(urls,
93
+                    'Wikipedia (' + misc_language + ')',
94
+                    get_wikilink(result, misc_language + 'wiki'))
67 95
 
68 96
     if language != 'en':
69
-        add_url(urls, 'Wiki voyage (' + language + ')', get_wikilink(result, language + 'wikivoyage'))
70
-    add_url(urls, 'Wiki voyage (en)', get_wikilink(result, 'enwikivoyage'))
97
+        add_url(urls,
98
+                'Wiki voyage (' + language + ')',
99
+                get_wikilink(result, language + 'wikivoyage'))
100
+
101
+    add_url(urls,
102
+            'Wiki voyage (en)',
103
+            get_wikilink(result, 'enwikivoyage'))
71 104
 
72 105
     if language != 'en':
73
-        add_url(urls, 'Wikiquote (' + language + ')', get_wikilink(result, language + 'wikiquote'))
74
-    add_url(urls, 'Wikiquote (en)', get_wikilink(result, 'enwikiquote'))
106
+        add_url(urls,
107
+                'Wikiquote (' + language + ')',
108
+                get_wikilink(result, language + 'wikiquote'))
75 109
 
76
-    add_url(urls, 'Commons wiki', get_wikilink(result, 'commonswiki'))
110
+    add_url(urls,
111
+            'Wikiquote (en)',
112
+            get_wikilink(result, 'enwikiquote'))
77 113
 
78
-    add_url(urls, 'Location', get_geolink(claims, 'P625', None))
114
+    add_url(urls,
115
+            'Commons wiki',
116
+            get_wikilink(result, 'commonswiki'))
79 117
 
80
-    add_url(urls, 'Wikidata', 'https://www.wikidata.org/wiki/' + wikidata_id + '?uselang='+ language)
118
+    add_url(urls,
119
+            'Location',
120
+            get_geolink(claims, 'P625', None))
121
+
122
+    add_url(urls,
123
+            'Wikidata',
124
+            'https://www.wikidata.org/wiki/'
125
+            + wikidata_id + '?uselang=' + language)
81 126
 
82 127
     musicbrainz_work_id = get_string(claims, 'P435')
83
-    if musicbrainz_work_id != None:
84
-        add_url(urls, 'MusicBrainz', 'http://musicbrainz.org/work/' + musicbrainz_work_id)
128
+    if musicbrainz_work_id is not None:
129
+        add_url(urls,
130
+                'MusicBrainz',
131
+                'http://musicbrainz.org/work/'
132
+                + musicbrainz_work_id)
85 133
 
86 134
     musicbrainz_artist_id = get_string(claims, 'P434')
87
-    if musicbrainz_artist_id != None:
88
-        add_url(urls, 'MusicBrainz', 'http://musicbrainz.org/artist/' + musicbrainz_artist_id)
135
+    if musicbrainz_artist_id is not None:
136
+        add_url(urls,
137
+                'MusicBrainz',
138
+                'http://musicbrainz.org/artist/'
139
+                + musicbrainz_artist_id)
89 140
 
90 141
     musicbrainz_release_group_id = get_string(claims, 'P436')
91
-    if musicbrainz_release_group_id != None:
92
-        add_url(urls, 'MusicBrainz', 'http://musicbrainz.org/release-group/' + musicbrainz_release_group_id)
142
+    if musicbrainz_release_group_id is not None:
143
+        add_url(urls,
144
+                'MusicBrainz',
145
+                'http://musicbrainz.org/release-group/'
146
+                + musicbrainz_release_group_id)
93 147
 
94 148
     musicbrainz_label_id = get_string(claims, 'P966')
95
-    if musicbrainz_label_id != None:
96
-        add_url(urls, 'MusicBrainz', 'http://musicbrainz.org/label/' + musicbrainz_label_id)
149
+    if musicbrainz_label_id is not None:
150
+        add_url(urls,
151
+                'MusicBrainz',
152
+                'http://musicbrainz.org/label/'
153
+                + musicbrainz_label_id)
97 154
 
98 155
     # musicbrainz_area_id = get_string(claims, 'P982')
99 156
     # P1407 MusicBrainz series ID
@@ -102,42 +159,43 @@ def getDetail(jsonresponse, wikidata_id, language):
102 159
     # P1407 MusicBrainz series ID
103 160
 
104 161
     postal_code = get_string(claims, 'P281', None)
105
-    if postal_code != None:
106
-        attributes.append({'label' : 'Postal code(s)', 'value' : postal_code})
162
+    if postal_code is not None:
163
+        attributes.append({'label': 'Postal code(s)', 'value': postal_code})
107 164
 
108 165
     date_of_birth = get_time(claims, 'P569', None)
109
-    if date_of_birth != None:
110
-        attributes.append({'label' : 'Date of birth', 'value' : date_of_birth})
166
+    if date_of_birth is not None:
167
+        attributes.append({'label': 'Date of birth', 'value': date_of_birth})
111 168
 
112 169
     date_of_death = get_time(claims, 'P570', None)
113
-    if date_of_death != None:
114
-        attributes.append({'label' : 'Date of death', 'value' : date_of_death})
170
+    if date_of_death is not None:
171
+        attributes.append({'label': 'Date of death', 'value': date_of_death})
115 172
 
116
-    if len(attributes)==0 and len(urls)==2 and len(description)==0:
173
+    if len(attributes) == 0 and len(urls) == 2 and len(description) == 0:
117 174
         results.append({
118
-                'url': urls[0]['url'],
119
-                'title': title,
120
-                'content': description
121
-                })
175
+                       'url': urls[0]['url'],
176
+                       'title': title,
177
+                       'content': description
178
+                       })
122 179
     else:
123 180
         results.append({
124
-                'infobox' : title,
125
-                'id' : wikipedia_en_link,
126
-                'content' : description,
127
-                'attributes' : attributes,
128
-                'urls' : urls
129
-                })
181
+                       'infobox': title,
182
+                       'id': wikipedia_en_link,
183
+                       'content': description,
184
+                       'attributes': attributes,
185
+                       'urls': urls
186
+                       })
130 187
 
131 188
     return results
132 189
 
133 190
 
134 191
 def add_url(urls, title, url):
135
-    if url != None:
136
-        urls.append({'title' : title, 'url' : url})
192
+    if url is not None:
193
+        urls.append({'title': title, 'url': url})
137 194
         return 1
138 195
     else:
139 196
         return 0
140 197
 
198
+
141 199
 def get_mainsnak(claims, propertyName):
142 200
     propValue = claims.get(propertyName, {})
143 201
     if len(propValue) == 0:
@@ -157,7 +215,7 @@ def get_string(claims, propertyName, defaultValue=None):
157 215
         mainsnak = e.get('mainsnak', {})
158 216
 
159 217
         datavalue = mainsnak.get('datavalue', {})
160
-        if datavalue != None:
218
+        if datavalue is not None:
161 219
             result.append(datavalue.get('value', ''))
162 220
 
163 221
     if len(result) == 0:
@@ -177,7 +235,7 @@ def get_time(claims, propertyName, defaultValue=None):
177 235
         mainsnak = e.get('mainsnak', {})
178 236
 
179 237
         datavalue = mainsnak.get('datavalue', {})
180
-        if datavalue != None:
238
+        if datavalue is not None:
181 239
             value = datavalue.get('value', '')
182 240
             result.append(value.get('time', ''))
183 241
 
@@ -190,7 +248,7 @@ def get_time(claims, propertyName, defaultValue=None):
190 248
 def get_geolink(claims, propertyName, defaultValue=''):
191 249
     mainsnak = get_mainsnak(claims, propertyName)
192 250
 
193
-    if mainsnak == None:
251
+    if mainsnak is None:
194 252
         return defaultValue
195 253
 
196 254
     datatype = mainsnak.get('datatype', '')
@@ -209,21 +267,25 @@ def get_geolink(claims, propertyName, defaultValue=''):
209 267
     # 1 --> 6
210 268
     # 0.016666666666667 --> 9
211 269
     # 0.00027777777777778 --> 19
212
-    # wolframalpha : quadratic fit { {13, 5}, {1, 6}, {0.0166666, 9}, {0.0002777777,19}}
270
+    # wolframalpha :
271
+    # quadratic fit { {13, 5}, {1, 6}, {0.0166666, 9}, {0.0002777777,19}}
213 272
     # 14.1186-8.8322 x+0.625447 x^2
214 273
     if precision < 0.0003:
215 274
         zoom = 19
216 275
     else:
217 276
         zoom = int(15 - precision*8.8322 + precision*precision*0.625447)
218 277
 
219
-    url = urlMap.replace('{latitude}', str(value.get('latitude',0))).replace('{longitude}', str(value.get('longitude',0))).replace('{zoom}', str(zoom))
278
+    url = url_map\
279
+        .replace('{latitude}', str(value.get('latitude', 0)))\
280
+        .replace('{longitude}', str(value.get('longitude', 0)))\
281
+        .replace('{zoom}', str(zoom))
220 282
 
221 283
     return url
222 284
 
223 285
 
224 286
 def get_wikilink(result, wikiid):
225 287
     url = result.get('sitelinks', {}).get(wikiid, {}).get('url', None)
226
-    if url == None:
288
+    if url is None:
227 289
         return url
228 290
     elif url.startswith('http://'):
229 291
         url = url.replace('http://', 'https://')
@@ -231,8 +293,9 @@ def get_wikilink(result, wikiid):
231 293
         url = 'https:' + url
232 294
     return url
233 295
 
296
+
234 297
 def get_wiki_firstlanguage(result, wikipatternid):
235 298
     for k in result.get('sitelinks', {}).keys():
236
-        if k.endswith(wikipatternid) and len(k)==(2+len(wikipatternid)):
299
+        if k.endswith(wikipatternid) and len(k) == (2+len(wikipatternid)):
237 300
             return k[0:2]
238 301
     return None

+ 6
- 5
searx/engines/yacy.py 查看文件

@@ -1,8 +1,9 @@
1 1
 ## Yacy (Web, Images, Videos, Music, Files)
2
-# 
2
+#
3 3
 # @website     http://yacy.net
4
-# @provide-api yes (http://www.yacy-websuche.de/wiki/index.php/Dev:APIyacysearch)
5
-# 
4
+# @provide-api yes
5
+#              (http://www.yacy-websuche.de/wiki/index.php/Dev:APIyacysearch)
6
+#
6 7
 # @using-api   yes
7 8
 # @results     JSON
8 9
 # @stable      yes
@@ -16,7 +17,7 @@ from urllib import urlencode
16 17
 from dateutil import parser
17 18
 
18 19
 # engine dependent config
19
-categories = ['general', 'images'] #TODO , 'music', 'videos', 'files'
20
+categories = ['general', 'images']  # TODO , 'music', 'videos', 'files'
20 21
 paging = True
21 22
 language_support = True
22 23
 number_of_results = 5
@@ -28,7 +29,7 @@ search_url = '/yacysearch.json?{query}&startRecord={offset}&maximumRecords={limi
28 29
 # yacy specific type-definitions
29 30
 search_types = {'general': 'text',
30 31
                 'images': 'image',
31
-                'files': 'app',               
32
+                'files': 'app',
32 33
                 'music': 'audio',
33 34
                 'videos': 'video'}
34 35
 

+ 8
- 7
searx/engines/yahoo.py 查看文件

@@ -1,8 +1,9 @@
1 1
 ## Yahoo (Web)
2
-# 
2
+#
3 3
 # @website     https://search.yahoo.com/web
4
-# @provide-api yes (https://developer.yahoo.com/boss/search/), $0.80/1000 queries
5
-# 
4
+# @provide-api yes (https://developer.yahoo.com/boss/search/),
5
+#              $0.80/1000 queries
6
+#
6 7
 # @using-api   no (because pricing)
7 8
 # @results     HTML (using search portal)
8 9
 # @stable      no (HTML can change)
@@ -40,8 +41,8 @@ def parse_url(url_string):
40 41
         if endpos > -1:
41 42
             endpositions.append(endpos)
42 43
 
43
-    if start==0 or len(endpositions) == 0:
44
-        return url_string        
44
+    if start == 0 or len(endpositions) == 0:
45
+        return url_string
45 46
     else:
46 47
         end = min(endpositions)
47 48
         return unquote(url_string[start:end])
@@ -84,8 +85,8 @@ def response(resp):
84 85
         content = extract_text(result.xpath(content_xpath)[0])
85 86
 
86 87
         # append result
87
-        results.append({'url': url, 
88
-                        'title': title, 
88
+        results.append({'url': url,
89
+                        'title': title,
89 90
                         'content': content})
90 91
 
91 92
     # if no suggestion found, return results

+ 2
- 2
searx/engines/youtube.py 查看文件

@@ -1,8 +1,8 @@
1 1
 ## Youtube (Videos)
2
-# 
2
+#
3 3
 # @website     https://www.youtube.com/
4 4
 # @provide-api yes (http://gdata-samples-youtube-search-py.appspot.com/)
5
-# 
5
+#
6 6
 # @using-api   yes
7 7
 # @results     JSON
8 8
 # @stable      yes