Pārlūkot izejas kodu

[fix] pep8 : engines (errors E121, E127, E128 and E501 still exist)

dalf 10 gadus atpakaļ
vecāks
revīzija
7c13d630e4

+ 8
- 7
searx/engines/bing.py Parādīt failu

1
 ## Bing (Web)
1
 ## Bing (Web)
2
-# 
2
+#
3
 # @website     https://www.bing.com
3
 # @website     https://www.bing.com
4
-# @provide-api yes (http://datamarket.azure.com/dataset/bing/search), max. 5000 query/month
5
-# 
4
+# @provide-api yes (http://datamarket.azure.com/dataset/bing/search),
5
+#              max. 5000 query/month
6
+#
6
 # @using-api   no (because of query limit)
7
 # @using-api   no (because of query limit)
7
 # @results     HTML (using search portal)
8
 # @results     HTML (using search portal)
8
 # @stable      no (HTML can change)
9
 # @stable      no (HTML can change)
58
         content = escape(' '.join(result.xpath('.//p//text()')))
59
         content = escape(' '.join(result.xpath('.//p//text()')))
59
 
60
 
60
         # append result
61
         # append result
61
-        results.append({'url': url, 
62
-                        'title': title, 
62
+        results.append({'url': url,
63
+                        'title': title,
63
                         'content': content})
64
                         'content': content})
64
 
65
 
65
     # return results if something is found
66
     # return results if something is found
74
         content = escape(' '.join(result.xpath('.//p//text()')))
75
         content = escape(' '.join(result.xpath('.//p//text()')))
75
 
76
 
76
         # append result
77
         # append result
77
-        results.append({'url': url, 
78
-                        'title': title, 
78
+        results.append({'url': url,
79
+                        'title': title,
79
                         'content': content})
80
                         'content': content})
80
 
81
 
81
     # return results
82
     # return results

+ 11
- 9
searx/engines/bing_images.py Parādīt failu

1
 ## Bing (Images)
1
 ## Bing (Images)
2
-# 
2
+#
3
 # @website     https://www.bing.com/images
3
 # @website     https://www.bing.com/images
4
-# @provide-api yes (http://datamarket.azure.com/dataset/bing/search), max. 5000 query/month
5
-# 
4
+# @provide-api yes (http://datamarket.azure.com/dataset/bing/search),
5
+#              max. 5000 query/month
6
+#
6
 # @using-api   no (because of query limit)
7
 # @using-api   no (because of query limit)
7
 # @results     HTML (using search portal)
8
 # @results     HTML (using search portal)
8
 # @stable      no (HTML can change)
9
 # @stable      no (HTML can change)
9
 # @parse       url, title, img_src
10
 # @parse       url, title, img_src
10
 #
11
 #
11
-# @todo        currently there are up to 35 images receive per page, because bing does not parse count=10. limited response to 10 images
12
+# @todo        currently there are up to 35 images receive per page,
13
+#              because bing does not parse count=10.
14
+#              limited response to 10 images
12
 
15
 
13
 from urllib import urlencode
16
 from urllib import urlencode
14
-from cgi import escape
15
 from lxml import html
17
 from lxml import html
16
 from yaml import load
18
 from yaml import load
17
 import re
19
 import re
51
     dom = html.fromstring(resp.content)
53
     dom = html.fromstring(resp.content)
52
 
54
 
53
     # init regex for yaml-parsing
55
     # init regex for yaml-parsing
54
-    p = re.compile( '({|,)([a-z]+):(")')
56
+    p = re.compile('({|,)([a-z]+):(")')
55
 
57
 
56
     # parse results
58
     # parse results
57
     for result in dom.xpath('//div[@class="dg_u"]'):
59
     for result in dom.xpath('//div[@class="dg_u"]'):
58
         link = result.xpath('./a')[0]
60
         link = result.xpath('./a')[0]
59
 
61
 
60
         # parse yaml-data (it is required to add a space, to make it parsable)
62
         # parse yaml-data (it is required to add a space, to make it parsable)
61
-        yaml_data = load(p.sub( r'\1\2: \3', link.attrib.get('m')))
62
- 
63
+        yaml_data = load(p.sub(r'\1\2: \3', link.attrib.get('m')))
64
+
63
         title = link.attrib.get('t1')
65
         title = link.attrib.get('t1')
64
         #url = 'http://' + link.attrib.get('t3')
66
         #url = 'http://' + link.attrib.get('t3')
65
         url = yaml_data.get('surl')
67
         url = yaml_data.get('surl')
69
         results.append({'template': 'images.html',
71
         results.append({'template': 'images.html',
70
                         'url': url,
72
                         'url': url,
71
                         'title': title,
73
                         'title': title,
72
-                        'content': '',  
74
+                        'content': '',
73
                         'img_src': img_src})
75
                         'img_src': img_src})
74
 
76
 
75
         # TODO stop parsing if 10 images are found
77
         # TODO stop parsing if 10 images are found

+ 10
- 9
searx/engines/bing_news.py Parādīt failu

1
 ## Bing (News)
1
 ## Bing (News)
2
-# 
2
+#
3
 # @website     https://www.bing.com/news
3
 # @website     https://www.bing.com/news
4
-# @provide-api yes (http://datamarket.azure.com/dataset/bing/search), max. 5000 query/month
5
-# 
4
+# @provide-api yes (http://datamarket.azure.com/dataset/bing/search),
5
+#              max. 5000 query/month
6
+#
6
 # @using-api   no (because of query limit)
7
 # @using-api   no (because of query limit)
7
 # @results     HTML (using search portal)
8
 # @results     HTML (using search portal)
8
 # @stable      no (HTML can change)
9
 # @stable      no (HTML can change)
57
         url = link.attrib.get('href')
58
         url = link.attrib.get('href')
58
         title = ' '.join(link.xpath('.//text()'))
59
         title = ' '.join(link.xpath('.//text()'))
59
         contentXPath = result.xpath('.//div[@class="sn_txt"]/div//span[@class="sn_snip"]//text()')
60
         contentXPath = result.xpath('.//div[@class="sn_txt"]/div//span[@class="sn_snip"]//text()')
60
-        if contentXPath != None:
61
+        if contentXPath is not None:
61
             content = escape(' '.join(contentXPath))
62
             content = escape(' '.join(contentXPath))
62
-            
63
+
63
         # parse publishedDate
64
         # parse publishedDate
64
         publishedDateXPath = result.xpath('.//div[@class="sn_txt"]/div//span[contains(@class,"sn_ST")]//span[contains(@class,"sn_tm")]//text()')
65
         publishedDateXPath = result.xpath('.//div[@class="sn_txt"]/div//span[contains(@class,"sn_ST")]//span[contains(@class,"sn_tm")]//text()')
65
-        if publishedDateXPath != None:
66
+        if publishedDateXPath is not None:
66
             publishedDate = escape(' '.join(publishedDateXPath))
67
             publishedDate = escape(' '.join(publishedDateXPath))
67
 
68
 
68
         if re.match("^[0-9]+ minute(s|) ago$", publishedDate):
69
         if re.match("^[0-9]+ minute(s|) ago$", publishedDate):
89
             except TypeError:
90
             except TypeError:
90
                 # FIXME
91
                 # FIXME
91
                 publishedDate = datetime.now()
92
                 publishedDate = datetime.now()
92
-                
93
+
93
         # append result
94
         # append result
94
-        results.append({'url': url, 
95
-                        'title': title, 
95
+        results.append({'url': url,
96
+                        'title': title,
96
                         'publishedDate': publishedDate,
97
                         'publishedDate': publishedDate,
97
                         'content': content})
98
                         'content': content})
98
 
99
 

+ 1
- 1
searx/engines/currency_convert.py Parādīt failu

55
         resp.search_params['to'].lower()
55
         resp.search_params['to'].lower()
56
     )
56
     )
57
 
57
 
58
-    results.append({'answer' : answer, 'url': url})
58
+    results.append({'answer': answer, 'url': url})
59
 
59
 
60
     return results
60
     return results

+ 2
- 3
searx/engines/dailymotion.py Parādīt failu

1
 ## Dailymotion (Videos)
1
 ## Dailymotion (Videos)
2
-# 
2
+#
3
 # @website     https://www.dailymotion.com
3
 # @website     https://www.dailymotion.com
4
 # @provide-api yes (http://www.dailymotion.com/developer)
4
 # @provide-api yes (http://www.dailymotion.com/developer)
5
-# 
5
+#
6
 # @using-api   yes
6
 # @using-api   yes
7
 # @results     JSON
7
 # @results     JSON
8
 # @stable      yes
8
 # @stable      yes
12
 
12
 
13
 from urllib import urlencode
13
 from urllib import urlencode
14
 from json import loads
14
 from json import loads
15
-from lxml import html
16
 
15
 
17
 # engine dependent config
16
 # engine dependent config
18
 categories = ['videos']
17
 categories = ['videos']

+ 2
- 2
searx/engines/deviantart.py Parādīt failu

1
 ## Deviantart (Images)
1
 ## Deviantart (Images)
2
-# 
2
+#
3
 # @website     https://www.deviantart.com/
3
 # @website     https://www.deviantart.com/
4
 # @provide-api yes (https://www.deviantart.com/developers/) (RSS)
4
 # @provide-api yes (https://www.deviantart.com/developers/) (RSS)
5
-# 
5
+#
6
 # @using-api   no (TODO, rewrite to api)
6
 # @using-api   no (TODO, rewrite to api)
7
 # @results     HTML
7
 # @results     HTML
8
 # @stable      no (HTML can change)
8
 # @stable      no (HTML can change)

+ 7
- 5
searx/engines/duckduckgo.py Parādīt failu

1
 ## DuckDuckGo (Web)
1
 ## DuckDuckGo (Web)
2
-# 
2
+#
3
 # @website     https://duckduckgo.com/
3
 # @website     https://duckduckgo.com/
4
-# @provide-api yes (https://duckduckgo.com/api), but not all results from search-site
5
-# 
4
+# @provide-api yes (https://duckduckgo.com/api),
5
+#              but not all results from search-site
6
+#
6
 # @using-api   no
7
 # @using-api   no
7
 # @results     HTML (using search portal)
8
 # @results     HTML (using search portal)
8
 # @stable      no (HTML can change)
9
 # @stable      no (HTML can change)
9
 # @parse       url, title, content
10
 # @parse       url, title, content
10
 #
11
 #
11
 # @todo        rewrite to api
12
 # @todo        rewrite to api
12
-# @todo        language support (the current used site does not support language-change)
13
+# @todo        language support
14
+#              (the current used site does not support language-change)
13
 
15
 
14
 from urllib import urlencode
16
 from urllib import urlencode
15
 from lxml.html import fromstring
17
 from lxml.html import fromstring
37
     if params['language'] == 'all':
39
     if params['language'] == 'all':
38
         locale = 'en-us'
40
         locale = 'en-us'
39
     else:
41
     else:
40
-        locale = params['language'].replace('_','-').lower()
42
+        locale = params['language'].replace('_', '-').lower()
41
 
43
 
42
     params['url'] = url.format(
44
     params['url'] = url.format(
43
         query=urlencode({'q': query, 'kl': locale}),
45
         query=urlencode({'q': query, 'kl': locale}),

+ 1
- 1
searx/engines/dummy.py Parādīt failu

1
 ## Dummy
1
 ## Dummy
2
-# 
2
+#
3
 # @results     empty array
3
 # @results     empty array
4
 # @stable      yes
4
 # @stable      yes
5
 
5
 

+ 6
- 5
searx/engines/faroo.py Parādīt failu

1
 ## Faroo (Web, News)
1
 ## Faroo (Web, News)
2
-# 
2
+#
3
 # @website     http://www.faroo.com
3
 # @website     http://www.faroo.com
4
 # @provide-api yes (http://www.faroo.com/hp/api/api.html), require API-key
4
 # @provide-api yes (http://www.faroo.com/hp/api/api.html), require API-key
5
-# 
5
+#
6
 # @using-api   yes
6
 # @using-api   yes
7
 # @results     JSON
7
 # @results     JSON
8
 # @stable      yes
8
 # @stable      yes
24
 url = 'http://www.faroo.com/'
24
 url = 'http://www.faroo.com/'
25
 search_url = url + 'api?{query}&start={offset}&length={number_of_results}&l={language}&src={categorie}&i=false&f=json&key={api_key}'
25
 search_url = url + 'api?{query}&start={offset}&length={number_of_results}&l={language}&src={categorie}&i=false&f=json&key={api_key}'
26
 
26
 
27
-search_category = {'general': 'web',                
27
+search_category = {'general': 'web',
28
                 'news': 'news'}
28
                 'news': 'news'}
29
 
29
 
30
+
30
 # do search-request
31
 # do search-request
31
 def request(query, params):
32
 def request(query, params):
32
     offset = (params['pageno']-1) * number_of_results + 1
33
     offset = (params['pageno']-1) * number_of_results + 1
48
                                       query=urlencode({'q': query}),
49
                                       query=urlencode({'q': query}),
49
                                       language=language,
50
                                       language=language,
50
                                       categorie=categorie,
51
                                       categorie=categorie,
51
-                                      api_key=api_key )
52
+                                      api_key=api_key)
52
 
53
 
53
     # using searx User-Agent
54
     # using searx User-Agent
54
     params['headers']['User-Agent'] = searx_useragent()
55
     params['headers']['User-Agent'] = searx_useragent()
101
             results.append({'template': 'images.html',
102
             results.append({'template': 'images.html',
102
                             'url': result['url'],
103
                             'url': result['url'],
103
                             'title': result['title'],
104
                             'title': result['title'],
104
-                            'content': result['kwic'],  
105
+                            'content': result['kwic'],
105
                             'img_src': result['iurl']})
106
                             'img_src': result['iurl']})
106
 
107
 
107
     # return results
108
     # return results

+ 2
- 2
searx/engines/generalfile.py Parādīt failu

1
 ## General Files (Files)
1
 ## General Files (Files)
2
-# 
2
+#
3
 # @website     http://www.general-files.org
3
 # @website     http://www.general-files.org
4
 # @provide-api no (nothing found)
4
 # @provide-api no (nothing found)
5
-# 
5
+#
6
 # @using-api   no (because nothing found)
6
 # @using-api   no (because nothing found)
7
 # @results     HTML (using search portal)
7
 # @results     HTML (using search portal)
8
 # @stable      no (HTML can change)
8
 # @stable      no (HTML can change)

+ 2
- 2
searx/engines/github.py Parādīt failu

1
 ## Github (It)
1
 ## Github (It)
2
-# 
2
+#
3
 # @website     https://github.com/
3
 # @website     https://github.com/
4
 # @provide-api yes (https://developer.github.com/v3/)
4
 # @provide-api yes (https://developer.github.com/v3/)
5
-# 
5
+#
6
 # @using-api   yes
6
 # @using-api   yes
7
 # @results     JSON
7
 # @results     JSON
8
 # @stable      yes (using api)
8
 # @stable      yes (using api)

+ 4
- 3
searx/engines/google_images.py Parādīt failu

1
 ## Google (Images)
1
 ## Google (Images)
2
-# 
2
+#
3
 # @website     https://www.google.com
3
 # @website     https://www.google.com
4
-# @provide-api yes (https://developers.google.com/web-search/docs/), deprecated!
5
-# 
4
+# @provide-api yes (https://developers.google.com/web-search/docs/),
5
+#              deprecated!
6
+#
6
 # @using-api   yes
7
 # @using-api   yes
7
 # @results     JSON
8
 # @results     JSON
8
 # @stable      yes (but deprecated)
9
 # @stable      yes (but deprecated)

+ 4
- 3
searx/engines/google_news.py Parādīt failu

1
 ## Google (News)
1
 ## Google (News)
2
-# 
2
+#
3
 # @website     https://www.google.com
3
 # @website     https://www.google.com
4
-# @provide-api yes (https://developers.google.com/web-search/docs/), deprecated!
5
-# 
4
+# @provide-api yes (https://developers.google.com/web-search/docs/),
5
+#              deprecated!
6
+#
6
 # @using-api   yes
7
 # @using-api   yes
7
 # @results     JSON
8
 # @results     JSON
8
 # @stable      yes (but deprecated)
9
 # @stable      yes (but deprecated)

+ 15
- 15
searx/engines/openstreetmap.py Parādīt failu

39
         url = result_base_url.format(osm_type=osm_type,
39
         url = result_base_url.format(osm_type=osm_type,
40
                                      osm_id=r['osm_id'])
40
                                      osm_id=r['osm_id'])
41
 
41
 
42
-        osm = {'type':osm_type,
43
-               'id':r['osm_id']}
42
+        osm = {'type': osm_type,
43
+               'id': r['osm_id']}
44
 
44
 
45
-        geojson =  r.get('geojson')
45
+        geojson = r.get('geojson')
46
 
46
 
47
         # if no geojson is found and osm_type is a node, add geojson Point
47
         # if no geojson is found and osm_type is a node, add geojson Point
48
         if not geojson and\
48
         if not geojson and\
49
            osm_type == 'node':
49
            osm_type == 'node':
50
-            geojson = {u'type':u'Point', 
51
-                       u'coordinates':[r['lon'],r['lat']]}
50
+            geojson = {u'type': u'Point',
51
+                       u'coordinates': [r['lon'], r['lat']]}
52
 
52
 
53
         address_raw = r.get('address')
53
         address_raw = r.get('address')
54
         address = {}
54
         address = {}
59
            r['class'] == 'tourism' or\
59
            r['class'] == 'tourism' or\
60
            r['class'] == 'leisure':
60
            r['class'] == 'leisure':
61
             if address_raw.get('address29'):
61
             if address_raw.get('address29'):
62
-                address = {'name':address_raw.get('address29')}
62
+                address = {'name': address_raw.get('address29')}
63
             else:
63
             else:
64
-                address = {'name':address_raw.get(r['type'])}
64
+                address = {'name': address_raw.get(r['type'])}
65
 
65
 
66
         # add rest of adressdata, if something is already found
66
         # add rest of adressdata, if something is already found
67
         if address.get('name'):
67
         if address.get('name'):
68
-            address.update({'house_number':address_raw.get('house_number'),
69
-                       'road':address_raw.get('road'),
70
-                       'locality':address_raw.get('city',
71
-                                  address_raw.get('town', 
72
-                                  address_raw.get('village'))),
73
-                       'postcode':address_raw.get('postcode'),
74
-                       'country':address_raw.get('country'),
75
-                       'country_code':address_raw.get('country_code')})
68
+            address.update({'house_number': address_raw.get('house_number'),
69
+                           'road': address_raw.get('road'),
70
+                           'locality': address_raw.get('city',
71
+                                       address_raw.get('town',
72
+                                       address_raw.get('village'))),
73
+                           'postcode': address_raw.get('postcode'),
74
+                           'country': address_raw.get('country'),
75
+                           'country_code': address_raw.get('country_code')})
76
         else:
76
         else:
77
             address = None
77
             address = None
78
 
78
 

+ 3
- 3
searx/engines/piratebay.py Parādīt failu

1
 ## Piratebay (Videos, Music, Files)
1
 ## Piratebay (Videos, Music, Files)
2
-# 
2
+#
3
 # @website     https://thepiratebay.se
3
 # @website     https://thepiratebay.se
4
 # @provide-api no (nothing found)
4
 # @provide-api no (nothing found)
5
-# 
5
+#
6
 # @using-api   no
6
 # @using-api   no
7
 # @results     HTML (using search portal)
7
 # @results     HTML (using search portal)
8
 # @stable      yes (HTML can change)
8
 # @stable      yes (HTML can change)
23
 search_url = url + 'search/{search_term}/{pageno}/99/{search_type}'
23
 search_url = url + 'search/{search_term}/{pageno}/99/{search_type}'
24
 
24
 
25
 # piratebay specific type-definitions
25
 # piratebay specific type-definitions
26
-search_types = {'files': '0',                
26
+search_types = {'files': '0',
27
                 'music': '100',
27
                 'music': '100',
28
                 'videos': '200'}
28
                 'videos': '200'}
29
 
29
 

+ 2
- 2
searx/engines/soundcloud.py Parādīt failu

1
 ## Soundcloud (Music)
1
 ## Soundcloud (Music)
2
-# 
2
+#
3
 # @website     https://soundcloud.com
3
 # @website     https://soundcloud.com
4
 # @provide-api yes (https://developers.soundcloud.com/)
4
 # @provide-api yes (https://developers.soundcloud.com/)
5
-# 
5
+#
6
 # @using-api   yes
6
 # @using-api   yes
7
 # @results     JSON
7
 # @results     JSON
8
 # @stable      yes
8
 # @stable      yes

+ 4
- 4
searx/engines/stackoverflow.py Parādīt failu

1
 ## Stackoverflow (It)
1
 ## Stackoverflow (It)
2
-# 
2
+#
3
 # @website     https://stackoverflow.com/
3
 # @website     https://stackoverflow.com/
4
 # @provide-api not clear (https://api.stackexchange.com/docs/advanced-search)
4
 # @provide-api not clear (https://api.stackexchange.com/docs/advanced-search)
5
-# 
5
+#
6
 # @using-api   no
6
 # @using-api   no
7
 # @results     HTML
7
 # @results     HTML
8
 # @stable      no (HTML can change)
8
 # @stable      no (HTML can change)
50
         content = escape(' '.join(result.xpath(content_xpath)))
50
         content = escape(' '.join(result.xpath(content_xpath)))
51
 
51
 
52
         # append result
52
         # append result
53
-        results.append({'url': href, 
54
-                        'title': title, 
53
+        results.append({'url': href,
54
+                        'title': title,
55
                         'content': content})
55
                         'content': content})
56
 
56
 
57
     # return results
57
     # return results

+ 2
- 2
searx/engines/twitter.py Parādīt failu

1
 ## Twitter (Social media)
1
 ## Twitter (Social media)
2
-# 
2
+#
3
 # @website     https://www.bing.com/news
3
 # @website     https://www.bing.com/news
4
 # @provide-api yes (https://dev.twitter.com/docs/using-search)
4
 # @provide-api yes (https://dev.twitter.com/docs/using-search)
5
-# 
5
+#
6
 # @using-api   no
6
 # @using-api   no
7
 # @results     HTML (using search portal)
7
 # @results     HTML (using search portal)
8
 # @stable      no (HTML can change)
8
 # @stable      no (HTML can change)

+ 7
- 5
searx/engines/vimeo.py Parādīt failu

1
 ## Vimeo (Videos)
1
 ## Vimeo (Videos)
2
-# 
2
+#
3
 # @website     https://vimeo.com/
3
 # @website     https://vimeo.com/
4
-# @provide-api yes (http://developer.vimeo.com/api), they have a maximum count of queries/hour
5
-# 
4
+# @provide-api yes (http://developer.vimeo.com/api),
5
+#              they have a maximum count of queries/hour
6
+#
6
 # @using-api   no (TODO, rewrite to api)
7
 # @using-api   no (TODO, rewrite to api)
7
 # @results     HTML (using search portal)
8
 # @results     HTML (using search portal)
8
 # @stable      no (HTML can change)
9
 # @stable      no (HTML can change)
35
 
36
 
36
 # do search-request
37
 # do search-request
37
 def request(query, params):
38
 def request(query, params):
38
-    params['url'] = search_url.format(pageno=params['pageno'] ,
39
+    params['url'] = search_url.format(pageno=params['pageno'],
39
                                       query=urlencode({'q': query}))
40
                                       query=urlencode({'q': query}))
40
 
41
 
41
     # TODO required?
42
     # TODO required?
42
-    params['cookies']['__utma'] = '00000000.000#0000000.0000000000.0000000000.0000000000.0'
43
+    params['cookies']['__utma'] =\
44
+        '00000000.000#0000000.0000000000.0000000000.0000000000.0'
43
 
45
 
44
     return params
46
     return params
45
 
47
 

+ 6
- 5
searx/engines/yacy.py Parādīt failu

1
 ## Yacy (Web, Images, Videos, Music, Files)
1
 ## Yacy (Web, Images, Videos, Music, Files)
2
-# 
2
+#
3
 # @website     http://yacy.net
3
 # @website     http://yacy.net
4
-# @provide-api yes (http://www.yacy-websuche.de/wiki/index.php/Dev:APIyacysearch)
5
-# 
4
+# @provide-api yes
5
+#              (http://www.yacy-websuche.de/wiki/index.php/Dev:APIyacysearch)
6
+#
6
 # @using-api   yes
7
 # @using-api   yes
7
 # @results     JSON
8
 # @results     JSON
8
 # @stable      yes
9
 # @stable      yes
16
 from dateutil import parser
17
 from dateutil import parser
17
 
18
 
18
 # engine dependent config
19
 # engine dependent config
19
-categories = ['general', 'images'] #TODO , 'music', 'videos', 'files'
20
+categories = ['general', 'images']  # TODO , 'music', 'videos', 'files'
20
 paging = True
21
 paging = True
21
 language_support = True
22
 language_support = True
22
 number_of_results = 5
23
 number_of_results = 5
28
 # yacy specific type-definitions
29
 # yacy specific type-definitions
29
 search_types = {'general': 'text',
30
 search_types = {'general': 'text',
30
                 'images': 'image',
31
                 'images': 'image',
31
-                'files': 'app',               
32
+                'files': 'app',
32
                 'music': 'audio',
33
                 'music': 'audio',
33
                 'videos': 'video'}
34
                 'videos': 'video'}
34
 
35
 

+ 8
- 7
searx/engines/yahoo.py Parādīt failu

1
 ## Yahoo (Web)
1
 ## Yahoo (Web)
2
-# 
2
+#
3
 # @website     https://search.yahoo.com/web
3
 # @website     https://search.yahoo.com/web
4
-# @provide-api yes (https://developer.yahoo.com/boss/search/), $0.80/1000 queries
5
-# 
4
+# @provide-api yes (https://developer.yahoo.com/boss/search/),
5
+#              $0.80/1000 queries
6
+#
6
 # @using-api   no (because pricing)
7
 # @using-api   no (because pricing)
7
 # @results     HTML (using search portal)
8
 # @results     HTML (using search portal)
8
 # @stable      no (HTML can change)
9
 # @stable      no (HTML can change)
40
         if endpos > -1:
41
         if endpos > -1:
41
             endpositions.append(endpos)
42
             endpositions.append(endpos)
42
 
43
 
43
-    if start==0 or len(endpositions) == 0:
44
-        return url_string        
44
+    if start == 0 or len(endpositions) == 0:
45
+        return url_string
45
     else:
46
     else:
46
         end = min(endpositions)
47
         end = min(endpositions)
47
         return unquote(url_string[start:end])
48
         return unquote(url_string[start:end])
84
         content = extract_text(result.xpath(content_xpath)[0])
85
         content = extract_text(result.xpath(content_xpath)[0])
85
 
86
 
86
         # append result
87
         # append result
87
-        results.append({'url': url, 
88
-                        'title': title, 
88
+        results.append({'url': url,
89
+                        'title': title,
89
                         'content': content})
90
                         'content': content})
90
 
91
 
91
     # if no suggestion found, return results
92
     # if no suggestion found, return results

+ 2
- 2
searx/engines/youtube.py Parādīt failu

1
 ## Youtube (Videos)
1
 ## Youtube (Videos)
2
-# 
2
+#
3
 # @website     https://www.youtube.com/
3
 # @website     https://www.youtube.com/
4
 # @provide-api yes (http://gdata-samples-youtube-search-py.appspot.com/)
4
 # @provide-api yes (http://gdata-samples-youtube-search-py.appspot.com/)
5
-# 
5
+#
6
 # @using-api   yes
6
 # @using-api   yes
7
 # @results     JSON
7
 # @results     JSON
8
 # @stable      yes
8
 # @stable      yes