Browse Source

[fix] pep8 compatibilty

Adam Tauber 9 years ago
parent
commit
bd22e9a336

+ 4
- 1
manage.sh View File

16
 
16
 
17
 pep8_check() {
17
 pep8_check() {
18
     echo '[!] Running pep8 check'
18
     echo '[!] Running pep8 check'
19
-    pep8 --max-line-length=120 "$SEARX_DIR" "$BASE_DIR/tests"
19
+    # ignored rules:
20
+    #  E402 module level import not at top of file
21
+    #  W503 line break before binary operator
22
+    pep8 --max-line-length=120 --ignore "E402,W503" "$SEARX_DIR" "$BASE_DIR/tests"
20
 }
23
 }
21
 
24
 
22
 unit_tests() {
25
 unit_tests() {

+ 1
- 1
requirements-dev.txt View File

1
 babel==2.2.0
1
 babel==2.2.0
2
-flake8==2.5.1
3
 mock==1.0.1
2
 mock==1.0.1
4
 nose2[coverage-plugin]
3
 nose2[coverage-plugin]
4
+pep8==1.7.0
5
 plone.testing==4.0.15
5
 plone.testing==4.0.15
6
 robotframework-selenium2library==1.7.4
6
 robotframework-selenium2library==1.7.4
7
 robotsuite==1.7.0
7
 robotsuite==1.7.0

+ 2
- 4
searx/autocomplete.py View File

114
     # dbpedia autocompleter, no HTTPS
114
     # dbpedia autocompleter, no HTTPS
115
     autocomplete_url = 'http://lookup.dbpedia.org/api/search.asmx/KeywordSearch?'
115
     autocomplete_url = 'http://lookup.dbpedia.org/api/search.asmx/KeywordSearch?'
116
 
116
 
117
-    response = get(autocomplete_url
118
-                   + urlencode(dict(QueryString=query)))
117
+    response = get(autocomplete_url + urlencode(dict(QueryString=query)))
119
 
118
 
120
     results = []
119
     results = []
121
 
120
 
141
     # google autocompleter
140
     # google autocompleter
142
     autocomplete_url = 'https://suggestqueries.google.com/complete/search?client=toolbar&'
141
     autocomplete_url = 'https://suggestqueries.google.com/complete/search?client=toolbar&'
143
 
142
 
144
-    response = get(autocomplete_url
145
-                   + urlencode(dict(q=query)))
143
+    response = get(autocomplete_url + urlencode(dict(q=query)))
146
 
144
 
147
     results = []
145
     results = []
148
 
146
 

+ 1
- 1
searx/engines/blekko_images.py View File

37
                           c=c)
37
                           c=c)
38
 
38
 
39
     if params['pageno'] != 1:
39
     if params['pageno'] != 1:
40
-        params['url'] += '&page={pageno}'.format(pageno=(params['pageno']-1))
40
+        params['url'] += '&page={pageno}'.format(pageno=(params['pageno'] - 1))
41
 
41
 
42
     # let Blekko know we wan't have profiling
42
     # let Blekko know we wan't have profiling
43
     params['cookies']['tag_lesslogging'] = '1'
43
     params['cookies']['tag_lesslogging'] = '1'

+ 1
- 1
searx/engines/btdigg.py View File

29
 # do search-request
29
 # do search-request
30
 def request(query, params):
30
 def request(query, params):
31
     params['url'] = search_url.format(search_term=quote(query),
31
     params['url'] = search_url.format(search_term=quote(query),
32
-                                      pageno=params['pageno']-1)
32
+                                      pageno=params['pageno'] - 1)
33
 
33
 
34
     return params
34
     return params
35
 
35
 

+ 1
- 1
searx/engines/deviantart.py View File

24
 
24
 
25
 # search-url
25
 # search-url
26
 base_url = 'https://www.deviantart.com/'
26
 base_url = 'https://www.deviantart.com/'
27
-search_url = base_url+'browse/all/?offset={offset}&{query}'
27
+search_url = base_url + 'browse/all/?offset={offset}&{query}'
28
 
28
 
29
 
29
 
30
 # do search-request
30
 # do search-request

+ 1
- 1
searx/engines/digg.py View File

22
 
22
 
23
 # search-url
23
 # search-url
24
 base_url = 'https://digg.com/'
24
 base_url = 'https://digg.com/'
25
-search_url = base_url+'api/search/{query}.json?position={position}&format=html'
25
+search_url = base_url + 'api/search/{query}.json?position={position}&format=html'
26
 
26
 
27
 # specific xpath variables
27
 # specific xpath variables
28
 results_xpath = '//article'
28
 results_xpath = '//article'

+ 1
- 1
searx/engines/faroo.py View File

88
     for result in search_res['results']:
88
     for result in search_res['results']:
89
         if result['news']:
89
         if result['news']:
90
             # timestamp (milliseconds since 1970)
90
             # timestamp (milliseconds since 1970)
91
-            publishedDate = datetime.datetime.fromtimestamp(result['date']/1000.0)  # noqa
91
+            publishedDate = datetime.datetime.fromtimestamp(result['date'] / 1000.0)  # noqa
92
 
92
 
93
             # append news result
93
             # append news result
94
             results.append({'url': result['url'],
94
             results.append({'url': result['url'],

+ 2
- 2
searx/engines/searchcode_code.py View File

20
 
20
 
21
 # search-url
21
 # search-url
22
 url = 'https://searchcode.com/'
22
 url = 'https://searchcode.com/'
23
-search_url = url+'api/codesearch_I/?{query}&p={pageno}'
23
+search_url = url + 'api/codesearch_I/?{query}&p={pageno}'
24
 
24
 
25
 # special code-endings which are not recognised by the file ending
25
 # special code-endings which are not recognised by the file ending
26
 code_endings = {'cs': 'c#',
26
 code_endings = {'cs': 'c#',
32
 # do search-request
32
 # do search-request
33
 def request(query, params):
33
 def request(query, params):
34
     params['url'] = search_url.format(query=urlencode({'q': query}),
34
     params['url'] = search_url.format(query=urlencode({'q': query}),
35
-                                      pageno=params['pageno']-1)
35
+                                      pageno=params['pageno'] - 1)
36
 
36
 
37
     # Disable SSL verification
37
     # Disable SSL verification
38
     # error: (60) SSL certificate problem: unable to get local issuer
38
     # error: (60) SSL certificate problem: unable to get local issuer

+ 2
- 2
searx/engines/searchcode_doc.py View File

19
 
19
 
20
 # search-url
20
 # search-url
21
 url = 'https://searchcode.com/'
21
 url = 'https://searchcode.com/'
22
-search_url = url+'api/search_IV/?{query}&p={pageno}'
22
+search_url = url + 'api/search_IV/?{query}&p={pageno}'
23
 
23
 
24
 
24
 
25
 # do search-request
25
 # do search-request
26
 def request(query, params):
26
 def request(query, params):
27
     params['url'] = search_url.format(query=urlencode({'q': query}),
27
     params['url'] = search_url.format(query=urlencode({'q': query}),
28
-                                      pageno=params['pageno']-1)
28
+                                      pageno=params['pageno'] - 1)
29
 
29
 
30
     # Disable SSL verification
30
     # Disable SSL verification
31
     # error: (60) SSL certificate problem: unable to get local issuer
31
     # error: (60) SSL certificate problem: unable to get local issuer

+ 1
- 1
searx/engines/stackoverflow.py View File

22
 
22
 
23
 # search-url
23
 # search-url
24
 url = 'https://stackoverflow.com/'
24
 url = 'https://stackoverflow.com/'
25
-search_url = url+'search?{query}&page={pageno}'
25
+search_url = url + 'search?{query}&page={pageno}'
26
 
26
 
27
 # specific xpath variables
27
 # specific xpath variables
28
 results_xpath = '//div[contains(@class,"question-summary")]'
28
 results_xpath = '//div[contains(@class,"question-summary")]'

+ 4
- 4
searx/engines/startpage.py View File

90
 
90
 
91
         # check if search result starts with something like: "2 Sep 2014 ... "
91
         # check if search result starts with something like: "2 Sep 2014 ... "
92
         if re.match("^([1-9]|[1-2][0-9]|3[0-1]) [A-Z][a-z]{2} [0-9]{4} \.\.\. ", content):
92
         if re.match("^([1-9]|[1-2][0-9]|3[0-1]) [A-Z][a-z]{2} [0-9]{4} \.\.\. ", content):
93
-            date_pos = content.find('...')+4
94
-            date_string = content[0:date_pos-5]
93
+            date_pos = content.find('...') + 4
94
+            date_string = content[0:date_pos - 5]
95
             published_date = parser.parse(date_string, dayfirst=True)
95
             published_date = parser.parse(date_string, dayfirst=True)
96
 
96
 
97
             # fix content string
97
             # fix content string
99
 
99
 
100
         # check if search result starts with something like: "5 days ago ... "
100
         # check if search result starts with something like: "5 days ago ... "
101
         elif re.match("^[0-9]+ days? ago \.\.\. ", content):
101
         elif re.match("^[0-9]+ days? ago \.\.\. ", content):
102
-            date_pos = content.find('...')+4
103
-            date_string = content[0:date_pos-5]
102
+            date_pos = content.find('...') + 4
103
+            date_string = content[0:date_pos - 5]
104
 
104
 
105
             # calculate datetime
105
             # calculate datetime
106
             published_date = datetime.now() - timedelta(days=int(re.match(r'\d+', date_string).group()))
106
             published_date = datetime.now() - timedelta(days=int(re.match(r'\d+', date_string).group()))

+ 2
- 2
searx/engines/wikidata.py View File

295
     if precision < 0.0003:
295
     if precision < 0.0003:
296
         zoom = 19
296
         zoom = 19
297
     else:
297
     else:
298
-        zoom = int(15 - precision*8.8322 + precision*precision*0.625447)
298
+        zoom = int(15 - precision * 8.8322 + precision * precision * 0.625447)
299
 
299
 
300
     url = url_map\
300
     url = url_map\
301
         .replace('{latitude}', str(value.get('latitude', 0)))\
301
         .replace('{latitude}', str(value.get('latitude', 0)))\
318
 
318
 
319
 def get_wiki_firstlanguage(result, wikipatternid):
319
 def get_wiki_firstlanguage(result, wikipatternid):
320
     for k in result.get('sitelinks', {}).keys():
320
     for k in result.get('sitelinks', {}).keys():
321
-        if k.endswith(wikipatternid) and len(k) == (2+len(wikipatternid)):
321
+        if k.endswith(wikipatternid) and len(k) == (2 + len(wikipatternid)):
322
             return k[0:2]
322
             return k[0:2]
323
     return None
323
     return None

+ 1
- 1
searx/engines/www1x.py View File

22
 
22
 
23
 # search-url
23
 # search-url
24
 base_url = 'https://1x.com'
24
 base_url = 'https://1x.com'
25
-search_url = base_url+'/backend/search.php?{query}'
25
+search_url = base_url + '/backend/search.php?{query}'
26
 
26
 
27
 
27
 
28
 # do search-request
28
 # do search-request

+ 2
- 2
searx/engines/xpath.py View File

43
     if url.startswith('//'):
43
     if url.startswith('//'):
44
         # add http or https to this kind of url //example.com/
44
         # add http or https to this kind of url //example.com/
45
         parsed_search_url = urlparse(search_url)
45
         parsed_search_url = urlparse(search_url)
46
-        url = parsed_search_url.scheme+url
46
+        url = parsed_search_url.scheme + url
47
     elif url.startswith('/'):
47
     elif url.startswith('/'):
48
         # fix relative url to the search engine
48
         # fix relative url to the search engine
49
         url = urljoin(search_url, url)
49
         url = urljoin(search_url, url)
69
         p = parsed_url.path
69
         p = parsed_url.path
70
         mark = p.find('/**')
70
         mark = p.find('/**')
71
         if mark != -1:
71
         if mark != -1:
72
-            return unquote(p[mark+3:]).decode('utf-8')
72
+            return unquote(p[mark + 3:]).decode('utf-8')
73
 
73
 
74
     return url
74
     return url
75
 
75
 

+ 1
- 1
searx/engines/yandex.py View File

38
 def request(query, params):
38
 def request(query, params):
39
     lang = params['language'].split('_')[0]
39
     lang = params['language'].split('_')[0]
40
     host = base_url.format(tld=language_map.get(lang) or default_tld)
40
     host = base_url.format(tld=language_map.get(lang) or default_tld)
41
-    params['url'] = host + search_url.format(page=params['pageno']-1,
41
+    params['url'] = host + search_url.format(page=params['pageno'] - 1,
42
                                              query=urlencode({'text': query}))
42
                                              query=urlencode({'text': query}))
43
     return params
43
     return params
44
 
44
 

+ 2
- 2
searx/plugins/https_rewrite.py View File

103
             # into a valid python regex group
103
             # into a valid python regex group
104
             rule_from = ruleset.attrib['from'].replace('$', '\\')
104
             rule_from = ruleset.attrib['from'].replace('$', '\\')
105
             if rule_from.endswith('\\'):
105
             if rule_from.endswith('\\'):
106
-                rule_from = rule_from[:-1]+'$'
106
+                rule_from = rule_from[:-1] + '$'
107
             rule_to = ruleset.attrib['to'].replace('$', '\\')
107
             rule_to = ruleset.attrib['to'].replace('$', '\\')
108
             if rule_to.endswith('\\'):
108
             if rule_to.endswith('\\'):
109
-                rule_to = rule_to[:-1]+'$'
109
+                rule_to = rule_to[:-1] + '$'
110
 
110
 
111
             # TODO, not working yet because of the hack above,
111
             # TODO, not working yet because of the hack above,
112
             # currently doing that in webapp.py
112
             # currently doing that in webapp.py

+ 1
- 1
searx/poolrequests.py View File

92
     return request('head', url, **kwargs)
92
     return request('head', url, **kwargs)
93
 
93
 
94
 
94
 
95
-def post(url, data=None,  **kwargs):
95
+def post(url, data=None, **kwargs):
96
     return request('post', url, data=data, **kwargs)
96
     return request('post', url, data=data, **kwargs)
97
 
97
 
98
 
98