Browse Source

[fix] pep/flake8 compatibility

asciimoo 11 years ago
parent
commit
b2492c94f4

+ 1
- 1
searx/engines/__init__.py View File

66
     for engine_attr in dir(engine):
66
     for engine_attr in dir(engine):
67
         if engine_attr.startswith('_'):
67
         if engine_attr.startswith('_'):
68
             continue
68
             continue
69
-        if getattr(engine, engine_attr) == None:
69
+        if getattr(engine, engine_attr) is None:
70
             print '[E] Engine config error: Missing attribute "{0}.{1}"'.format(engine.name, engine_attr)  # noqa
70
             print '[E] Engine config error: Missing attribute "{0}.{1}"'.format(engine.name, engine_attr)  # noqa
71
             sys.exit(1)
71
             sys.exit(1)
72
     engines[engine.name] = engine
72
     engines[engine.name] = engine

+ 5
- 3
searx/engines/currency_convert.py View File

5
 url = 'http://finance.yahoo.com/d/quotes.csv?e=.csv&f=sl1d1t1&s={query}=X'
5
 url = 'http://finance.yahoo.com/d/quotes.csv?e=.csv&f=sl1d1t1&s={query}=X'
6
 weight = 100
6
 weight = 100
7
 
7
 
8
-parser_re = re.compile(r'^\W*(\d+(?:\.\d+)?)\W*([a-z]{3})\W*(?:in)?\W*([a-z]{3})\W*$', re.I)
8
+parser_re = re.compile(r'^\W*(\d+(?:\.\d+)?)\W*([a-z]{3})\W*(?:in)?\W*([a-z]{3})\W*$', re.I)  # noqa
9
 
9
 
10
 
10
 
11
 def request(query, params):
11
 def request(query, params):
46
         resp.search_params['ammount'] * conversion_rate
46
         resp.search_params['ammount'] * conversion_rate
47
     )
47
     )
48
 
48
 
49
-    content = '1 {0} is {1} {2}'.format(resp.search_params['from'], conversion_rate, resp.search_params['to'])
49
+    content = '1 {0} is {1} {2}'.format(resp.search_params['from'],
50
+                                        conversion_rate,
51
+                                        resp.search_params['to'])
50
     now_date = datetime.now().strftime('%Y%m%d')
52
     now_date = datetime.now().strftime('%Y%m%d')
51
-    url = 'http://finance.yahoo.com/currency/converter-results/{0}/{1}-{2}-to-{3}.html'
53
+    url = 'http://finance.yahoo.com/currency/converter-results/{0}/{1}-{2}-to-{3}.html'  # noqa
52
     url = url.format(
54
     url = url.format(
53
         now_date,
55
         now_date,
54
         resp.search_params['ammount'],
56
         resp.search_params['ammount'],

+ 5
- 2
searx/engines/dailymotion.py View File

6
 locale = 'en_US'
6
 locale = 'en_US'
7
 
7
 
8
 # see http://www.dailymotion.com/doc/api/obj-video.html
8
 # see http://www.dailymotion.com/doc/api/obj-video.html
9
-search_url = 'https://api.dailymotion.com/videos?fields=title,description,duration,url,thumbnail_360_url&sort=relevance&limit=25&page=1&{query}'
9
+search_url = 'https://api.dailymotion.com/videos?fields=title,description,duration,url,thumbnail_360_url&sort=relevance&limit=25&page=1&{query}'  # noqa
10
+
11
+# TODO use video result template
12
+content_tpl = '<a href="{0}" title="{0}" ><img src="{1}" /></a><br />'
10
 
13
 
11
 
14
 
12
 def request(query, params):
15
 def request(query, params):
25
         title = res['title']
28
         title = res['title']
26
         url = res['url']
29
         url = res['url']
27
         if res['thumbnail_360_url']:
30
         if res['thumbnail_360_url']:
28
-            content = '<a href="{0}" title="{0}" ><img src="{1}" /></a><br />'.format(url, res['thumbnail_360_url'])
31
+            content = content_tpl.format(url, res['thumbnail_360_url'])
29
         else:
32
         else:
30
             content = ''
33
             content = ''
31
         if res['description']:
34
         if res['description']:

+ 6
- 2
searx/engines/deviantart.py View File

7
 base_url = 'https://www.deviantart.com/'
7
 base_url = 'https://www.deviantart.com/'
8
 search_url = base_url+'search?'
8
 search_url = base_url+'search?'
9
 
9
 
10
+
10
 def request(query, params):
11
 def request(query, params):
11
     global search_url
12
     global search_url
12
     params['url'] = search_url + urlencode({'q': query})
13
     params['url'] = search_url + urlencode({'q': query})
22
     for result in dom.xpath('//div[contains(@class, "tt-a tt-fh")]'):
23
     for result in dom.xpath('//div[contains(@class, "tt-a tt-fh")]'):
23
         link = result.xpath('.//a[contains(@class, "thumb")]')[0]
24
         link = result.xpath('.//a[contains(@class, "thumb")]')[0]
24
         url = urljoin(base_url, link.attrib.get('href'))
25
         url = urljoin(base_url, link.attrib.get('href'))
25
-        title_links = result.xpath('.//span[@class="details"]//a[contains(@class, "t")]')
26
+        title_links = result.xpath('.//span[@class="details"]//a[contains(@class, "t")]')  # noqa
26
         title = ''.join(title_links[0].xpath('.//text()'))
27
         title = ''.join(title_links[0].xpath('.//text()'))
27
         img_src = link.xpath('.//img')[0].attrib['src']
28
         img_src = link.xpath('.//img')[0].attrib['src']
28
-        results.append({'url': url, 'title': title, 'img_src': img_src, 'template': 'images.html'})
29
+        results.append({'url': url,
30
+                        'title': title,
31
+                        'img_src': img_src,
32
+                        'template': 'images.html'})
29
     return results
33
     return results

+ 7
- 5
searx/engines/duckduckgo.py View File

6
 search_url = url + 'd.js?{query}&p=1&s=0'
6
 search_url = url + 'd.js?{query}&p=1&s=0'
7
 locale = 'us-en'
7
 locale = 'us-en'
8
 
8
 
9
+
9
 def request(query, params):
10
 def request(query, params):
10
-    params['url'] = search_url.format(query=urlencode({'q': query, 'l': locale}))
11
+    q = urlencode({'q': query,
12
+                   'l': locale})
13
+    params['url'] = search_url.format(query=q)
11
     return params
14
     return params
12
 
15
 
13
 
16
 
17
     for r in search_res:
20
     for r in search_res:
18
         if not r.get('t'):
21
         if not r.get('t'):
19
             continue
22
             continue
20
-        results.append({'title': r['t']
21
-                       ,'content': html_to_text(r['a'])
22
-                       ,'url': r['u']
23
-                       })
23
+        results.append({'title': r['t'],
24
+                       'content': html_to_text(r['a']),
25
+                       'url': r['u']})
24
     return results
26
     return results

+ 6
- 6
searx/engines/duckduckgo_definitions.py View File

3
 
3
 
4
 url = 'http://api.duckduckgo.com/?{query}&format=json&pretty=0&no_redirect=1'
4
 url = 'http://api.duckduckgo.com/?{query}&format=json&pretty=0&no_redirect=1'
5
 
5
 
6
+
6
 def request(query, params):
7
 def request(query, params):
7
-    params['url'] =  url.format(query=urlencode({'q': query}))
8
+    params['url'] = url.format(query=urlencode({'q': query}))
8
     return params
9
     return params
9
 
10
 
10
 
11
 
13
     results = []
14
     results = []
14
     if 'Definition' in search_res:
15
     if 'Definition' in search_res:
15
         if search_res.get('AbstractURL'):
16
         if search_res.get('AbstractURL'):
16
-            res = {'title'    : search_res.get('Heading', '')
17
-                  ,'content'  : search_res.get('Definition', '')
18
-                  ,'url'      : search_res.get('AbstractURL', '')
19
-                  ,'class'   : 'definition_result'
20
-                  }
17
+            res = {'title': search_res.get('Heading', ''),
18
+                   'content': search_res.get('Definition', ''),
19
+                   'url': search_res.get('AbstractURL', ''),
20
+                   'class': 'definition_result'}
21
             results.append(res)
21
             results.append(res)
22
 
22
 
23
     return results
23
     return results

+ 17
- 8
searx/engines/filecrop.py View File

2
 from HTMLParser import HTMLParser
2
 from HTMLParser import HTMLParser
3
 
3
 
4
 url = 'http://www.filecrop.com/'
4
 url = 'http://www.filecrop.com/'
5
-search_url = url + '/search.php?{query}&size_i=0&size_f=100000000&engine_r=1&engine_d=1&engine_e=1&engine_4=1&engine_m=1'
5
+search_url = url + '/search.php?{query}&size_i=0&size_f=100000000&engine_r=1&engine_d=1&engine_e=1&engine_4=1&engine_m=1'  # noqa
6
+
6
 
7
 
7
 class FilecropResultParser(HTMLParser):
8
 class FilecropResultParser(HTMLParser):
8
     def __init__(self):
9
     def __init__(self):
18
     def handle_starttag(self, tag, attrs):
19
     def handle_starttag(self, tag, attrs):
19
 
20
 
20
         if tag == 'tr':
21
         if tag == 'tr':
21
-            if ('bgcolor', '#edeff5') in attrs or ('bgcolor', '#ffffff') in attrs:
22
+            if ('bgcolor', '#edeff5') in attrs or\
23
+               ('bgcolor', '#ffffff') in attrs:
22
                 self.__start_processing = True
24
                 self.__start_processing = True
23
 
25
 
24
         if not self.__start_processing:
26
         if not self.__start_processing:
25
             return
27
             return
26
 
28
 
27
         if tag == 'label':
29
         if tag == 'label':
28
-            self.result['title'] = [attr[1] for attr in attrs if attr[0] == 'title'][0]
29
-        elif tag == 'a' and ('rel', 'nofollow') in attrs and ('class', 'sourcelink') in attrs:
30
+            self.result['title'] = [attr[1] for attr in attrs
31
+                                    if attr[0] == 'title'][0]
32
+        elif tag == 'a' and ('rel', 'nofollow') in attrs\
33
+                and ('class', 'sourcelink') in attrs:
30
             if 'content' in self.result:
34
             if 'content' in self.result:
31
-                self.result['content'] += [attr[1] for attr in attrs if attr[0] == 'title'][0]
35
+                self.result['content'] += [attr[1] for attr in attrs
36
+                                           if attr[0] == 'title'][0]
32
             else:
37
             else:
33
-                self.result['content'] = [attr[1] for attr in attrs if attr[0] == 'title'][0]
38
+                self.result['content'] = [attr[1] for attr in attrs
39
+                                          if attr[0] == 'title'][0]
34
             self.result['content'] += ' '
40
             self.result['content'] += ' '
35
         elif tag == 'a':
41
         elif tag == 'a':
36
-            self.result['url'] = url + [attr[1] for attr in attrs if attr[0] == 'href'][0]
42
+            self.result['url'] = url + [attr[1] for attr in attrs
43
+                                        if attr[0] == 'href'][0]
37
 
44
 
38
     def handle_endtag(self, tag):
45
     def handle_endtag(self, tag):
39
         if self.__start_processing is False:
46
         if self.__start_processing is False:
60
 
67
 
61
         self.data_counter += 1
68
         self.data_counter += 1
62
 
69
 
70
+
63
 def request(query, params):
71
 def request(query, params):
64
-    params['url'] = search_url.format(query=urlencode({'w' :query}))
72
+    params['url'] = search_url.format(query=urlencode({'w': query}))
65
     return params
73
     return params
66
 
74
 
75
+
67
 def response(resp):
76
 def response(resp):
68
     parser = FilecropResultParser()
77
     parser = FilecropResultParser()
69
     parser.feed(resp.text)
78
     parser.feed(resp.text)

+ 8
- 2
searx/engines/flickr.py View File

8
 
8
 
9
 url = 'https://secure.flickr.com/'
9
 url = 'https://secure.flickr.com/'
10
 search_url = url+'search/?{query}'
10
 search_url = url+'search/?{query}'
11
+results_xpath = '//div[@id="thumbnails"]//a[@class="rapidnofollow photo-click" and @data-track="photo-click"]'  # noqa
12
+
11
 
13
 
12
 def request(query, params):
14
 def request(query, params):
13
     params['url'] = search_url.format(query=urlencode({'q': query}))
15
     params['url'] = search_url.format(query=urlencode({'q': query}))
14
     return params
16
     return params
15
 
17
 
18
+
16
 def response(resp):
19
 def response(resp):
17
     global base_url
20
     global base_url
18
     results = []
21
     results = []
19
     dom = html.fromstring(resp.text)
22
     dom = html.fromstring(resp.text)
20
-    for result in dom.xpath('//div[@id="thumbnails"]//a[@class="rapidnofollow photo-click" and @data-track="photo-click"]'):
23
+    for result in dom.xpath(results_xpath):
21
         href = urljoin(url, result.attrib.get('href'))
24
         href = urljoin(url, result.attrib.get('href'))
22
         img = result.xpath('.//img')[0]
25
         img = result.xpath('.//img')[0]
23
         title = img.attrib.get('alt', '')
26
         title = img.attrib.get('alt', '')
24
         img_src = img.attrib.get('data-defer-src')
27
         img_src = img.attrib.get('data-defer-src')
25
         if not img_src:
28
         if not img_src:
26
             continue
29
             continue
27
-        results.append({'url': href, 'title': title, 'img_src': img_src, 'template': 'images.html'})
30
+        results.append({'url': href,
31
+                        'title': title,
32
+                        'img_src': img_src,
33
+                        'template': 'images.html'})
28
     return results
34
     return results

+ 5
- 2
searx/engines/github.py View File

4
 
4
 
5
 categories = ['it']
5
 categories = ['it']
6
 
6
 
7
-search_url = 'https://api.github.com/search/repositories?sort=stars&order=desc&{query}'
7
+search_url = 'https://api.github.com/search/repositories?sort=stars&order=desc&{query}'  # noqa
8
+
9
+accept_header = 'application/vnd.github.preview.text-match+json'
10
+
8
 
11
 
9
 def request(query, params):
12
 def request(query, params):
10
     global search_url
13
     global search_url
11
     params['url'] = search_url.format(query=urlencode({'q': query}))
14
     params['url'] = search_url.format(query=urlencode({'q': query}))
12
-    params['headers']['Accept'] = 'application/vnd.github.preview.text-match+json'
15
+    params['headers']['Accept'] = accept_header
13
     return params
16
     return params
14
 
17
 
15
 
18
 

+ 8
- 2
searx/engines/google_images.py View File

6
 categories = ['images']
6
 categories = ['images']
7
 
7
 
8
 url = 'https://ajax.googleapis.com/'
8
 url = 'https://ajax.googleapis.com/'
9
-search_url = url + 'ajax/services/search/images?v=1.0&start=0&rsz=large&safe=off&filter=off&{query}'
9
+search_url = url + 'ajax/services/search/images?v=1.0&start=0&rsz=large&safe=off&filter=off&{query}'  # noqa
10
+
10
 
11
 
11
 def request(query, params):
12
 def request(query, params):
12
     params['url'] = search_url.format(query=urlencode({'q': query}))
13
     params['url'] = search_url.format(query=urlencode({'q': query}))
13
     return params
14
     return params
14
 
15
 
16
+
15
 def response(resp):
17
 def response(resp):
16
     results = []
18
     results = []
17
     search_res = loads(resp.text)
19
     search_res = loads(resp.text)
24
         title = result['title']
26
         title = result['title']
25
         if not result['url']:
27
         if not result['url']:
26
             continue
28
             continue
27
-        results.append({'url': href, 'title': title, 'content': '', 'img_src': result['url'], 'template': 'images.html'})
29
+        results.append({'url': href,
30
+                        'title': title,
31
+                        'content': '',
32
+                        'img_src': result['url'],
33
+                        'template': 'images.html'})
28
     return results
34
     return results

+ 14
- 6
searx/engines/json_engine.py View File

2
 from json import loads
2
 from json import loads
3
 from collections import Iterable
3
 from collections import Iterable
4
 
4
 
5
-search_url    = None
6
-url_query     = None
5
+search_url = None
6
+url_query = None
7
 content_query = None
7
 content_query = None
8
-title_query   = None
8
+title_query = None
9
 #suggestion_xpath = ''
9
 #suggestion_xpath = ''
10
 
10
 
11
+
11
 def iterate(iterable):
12
 def iterate(iterable):
12
     if type(iterable) == dict:
13
     if type(iterable) == dict:
13
         it = iterable.iteritems()
14
         it = iterable.iteritems()
17
     for index, value in it:
18
     for index, value in it:
18
         yield str(index), value
19
         yield str(index), value
19
 
20
 
21
+
20
 def is_iterable(obj):
22
 def is_iterable(obj):
21
-    if type(obj) == str: return False
22
-    if type(obj) == unicode: return False
23
+    if type(obj) == str:
24
+        return False
25
+    if type(obj) == unicode:
26
+        return False
23
     return isinstance(obj, Iterable)
27
     return isinstance(obj, Iterable)
24
 
28
 
29
+
25
 def parse(query):
30
 def parse(query):
26
     q = []
31
     q = []
27
     for part in query.split('/'):
32
     for part in query.split('/'):
31
             q.append(part)
36
             q.append(part)
32
     return q
37
     return q
33
 
38
 
39
+
34
 def do_query(data, q):
40
 def do_query(data, q):
35
     ret = []
41
     ret = []
36
     if not len(q):
42
     if not len(q):
38
 
44
 
39
     qkey = q[0]
45
     qkey = q[0]
40
 
46
 
41
-    for key,value in iterate(data):
47
+    for key, value in iterate(data):
42
 
48
 
43
         if len(q) == 1:
49
         if len(q) == 1:
44
             if key == qkey:
50
             if key == qkey:
54
                 ret.extend(do_query(value, q))
60
                 ret.extend(do_query(value, q))
55
     return ret
61
     return ret
56
 
62
 
63
+
57
 def query(data, query_string):
64
 def query(data, query_string):
58
     q = parse(query_string)
65
     q = parse(query_string)
59
 
66
 
60
     return do_query(data, q)
67
     return do_query(data, q)
61
 
68
 
69
+
62
 def request(query, params):
70
 def request(query, params):
63
     query = urlencode({'q': query})[2:]
71
     query = urlencode({'q': query})[2:]
64
     params['url'] = search_url.format(query=query)
72
     params['url'] = search_url.format(query=query)

+ 4
- 4
searx/engines/mediawiki.py View File

3
 
3
 
4
 url = 'https://en.wikipedia.org/'
4
 url = 'https://en.wikipedia.org/'
5
 
5
 
6
+search_url = url + 'w/api.php?action=query&list=search&{query}&srprop=timestamp&format=json'  # noqa
7
+
6
 number_of_results = 10
8
 number_of_results = 10
7
 
9
 
10
+
8
 def request(query, params):
11
 def request(query, params):
9
-    search_url = url + 'w/api.php?action=query&list=search&{query}&srprop=timestamp&format=json'
10
     params['url'] = search_url.format(query=urlencode({'srsearch': query}))
12
     params['url'] = search_url.format(query=urlencode({'srsearch': query}))
11
     return params
13
     return params
12
 
14
 
14
 def response(resp):
16
 def response(resp):
15
     search_results = loads(resp.text)
17
     search_results = loads(resp.text)
16
     res = search_results.get('query', {}).get('search', [])
18
     res = search_results.get('query', {}).get('search', [])
17
-
18
-    return [{'url': url + 'wiki/' + quote(result['title'].replace(' ', '_').encode('utf-8')),
19
+    return [{'url': url + 'wiki/' + quote(result['title'].replace(' ', '_').encode('utf-8')),  # noqa
19
         'title': result['title']} for result in res[:int(number_of_results)]]
20
         'title': result['title']} for result in res[:int(number_of_results)]]
20
-

+ 18
- 9
searx/engines/piratebay.py View File

7
 
7
 
8
 url = 'https://thepiratebay.se/'
8
 url = 'https://thepiratebay.se/'
9
 search_url = url + 'search/{search_term}/0/99/{search_type}'
9
 search_url = url + 'search/{search_term}/0/99/{search_type}'
10
-search_types = {'videos': '200'
11
-               ,'music' : '100'
12
-               ,'files' : '0'
13
-               }
10
+search_types = {'videos': '200',
11
+                'music': '100',
12
+                'files': '0'}
13
+
14
+magnet_xpath = './/a[@title="Download this torrent using magnet"]'
15
+content_xpath = './/font[@class="detDesc"]//text()'
16
+
14
 
17
 
15
 def request(query, params):
18
 def request(query, params):
16
-    params['url'] = search_url.format(search_term=quote(query), search_type=search_types.get(params['category']))
19
+    search_type = search_types.get(params['category'])
20
+    params['url'] = search_url.format(search_term=quote(query),
21
+                                      search_type=search_type)
17
     return params
22
     return params
18
 
23
 
19
 
24
 
27
         link = result.xpath('.//div[@class="detName"]//a')[0]
32
         link = result.xpath('.//div[@class="detName"]//a')[0]
28
         href = urljoin(url, link.attrib.get('href'))
33
         href = urljoin(url, link.attrib.get('href'))
29
         title = ' '.join(link.xpath('.//text()'))
34
         title = ' '.join(link.xpath('.//text()'))
30
-        content = escape(' '.join(result.xpath('.//font[@class="detDesc"]//text()')))
35
+        content = escape(' '.join(result.xpath(content_xpath)))
31
         seed, leech = result.xpath('.//td[@align="right"]/text()')[:2]
36
         seed, leech = result.xpath('.//td[@align="right"]/text()')[:2]
32
-        magnetlink = result.xpath('.//a[@title="Download this torrent using magnet"]')[0]
33
-        results.append({'url': href, 'title': title, 'content': content,
34
-                        'seed': seed, 'leech': leech, 'magnetlink': magnetlink.attrib['href'],
37
+        magnetlink = result.xpath(magnet_xpath)[0]
38
+        results.append({'url': href,
39
+                        'title': title,
40
+                        'content': content,
41
+                        'seed': seed,
42
+                        'leech': leech,
43
+                        'magnetlink': magnetlink.attrib['href'],
35
                         'template': 'torrent.html'})
44
                         'template': 'torrent.html'})
36
     return results
45
     return results

+ 5
- 2
searx/engines/soundcloud.py View File

5
 
5
 
6
 guest_client_id = 'b45b1aa10f1ac2941910a7f0d10f8e28'
6
 guest_client_id = 'b45b1aa10f1ac2941910a7f0d10f8e28'
7
 url = 'https://api.soundcloud.com/'
7
 url = 'https://api.soundcloud.com/'
8
-search_url = url + 'search?{query}&facet=model&limit=20&offset=0&linked_partitioning=1&client_id='+guest_client_id
8
+search_url = url + 'search?{query}&facet=model&limit=20&offset=0&linked_partitioning=1&client_id='+guest_client_id  # noqa
9
+
9
 
10
 
10
 def request(query, params):
11
 def request(query, params):
11
     global search_url
12
     global search_url
21
         if result['kind'] in ('track', 'playlist'):
22
         if result['kind'] in ('track', 'playlist'):
22
             title = result['title']
23
             title = result['title']
23
             content = result['description']
24
             content = result['description']
24
-            results.append({'url': result['permalink_url'], 'title': title, 'content': content})
25
+            results.append({'url': result['permalink_url'],
26
+                            'title': title,
27
+                            'content': content})
25
     return results
28
     return results

+ 3
- 1
searx/engines/stackoverflow.py View File

7
 
7
 
8
 url = 'http://stackoverflow.com/'
8
 url = 'http://stackoverflow.com/'
9
 search_url = url+'search?'
9
 search_url = url+'search?'
10
+result_xpath = './/div[@class="excerpt"]//text()'
11
+
10
 
12
 
11
 def request(query, params):
13
 def request(query, params):
12
     params['url'] = search_url + urlencode({'q': query})
14
     params['url'] = search_url + urlencode({'q': query})
20
         link = result.xpath('.//div[@class="result-link"]//a')[0]
22
         link = result.xpath('.//div[@class="result-link"]//a')[0]
21
         href = urljoin(url, link.attrib.get('href'))
23
         href = urljoin(url, link.attrib.get('href'))
22
         title = escape(' '.join(link.xpath('.//text()')))
24
         title = escape(' '.join(link.xpath('.//text()')))
23
-        content = escape(' '.join(result.xpath('.//div[@class="excerpt"]//text()')))
25
+        content = escape(' '.join(result.xpath(result_xpath)))
24
         results.append({'url': href, 'title': title, 'content': content})
26
         results.append({'url': href, 'title': title, 'content': content})
25
     return results
27
     return results

+ 2
- 4
searx/engines/startpage.py View File

1
 from urllib import urlencode
1
 from urllib import urlencode
2
 from lxml import html
2
 from lxml import html
3
-from urlparse import urlparse
4
-from cgi import escape
5
 
3
 
6
 base_url = 'https://startpage.com/'
4
 base_url = 'https://startpage.com/'
7
 search_url = base_url+'do/search'
5
 search_url = base_url+'do/search'
8
 
6
 
7
+
9
 def request(query, params):
8
 def request(query, params):
10
     global search_url
9
     global search_url
11
     query = urlencode({'q': query})[2:]
10
     query = urlencode({'q': query})[2:]
20
     results = []
19
     results = []
21
     dom = html.fromstring(resp.content)
20
     dom = html.fromstring(resp.content)
22
     # ads xpath //div[@id="results"]/div[@id="sponsored"]//div[@class="result"]
21
     # ads xpath //div[@id="results"]/div[@id="sponsored"]//div[@class="result"]
23
-    # not ads : div[@class="result"] are the direct childs of div[@id="results"]
22
+    # not ads: div[@class="result"] are the direct childs of div[@id="results"]
24
     for result in dom.xpath('//div[@id="results"]/div[@class="result"]'):
23
     for result in dom.xpath('//div[@id="results"]/div[@class="result"]'):
25
         link = result.xpath('.//h3/a')[0]
24
         link = result.xpath('.//h3/a')[0]
26
         url = link.attrib.get('href')
25
         url = link.attrib.get('href')
27
-        parsed_url = urlparse(url)
28
         title = link.text_content()
26
         title = link.text_content()
29
         content = result.xpath('./p[@class="desc"]')[0].text_content()
27
         content = result.xpath('./p[@class="desc"]')[0].text_content()
30
         results.append({'url': url, 'title': title, 'content': content})
28
         results.append({'url': url, 'title': title, 'content': content})

+ 8
- 3
searx/engines/twitter.py View File

7
 
7
 
8
 base_url = 'https://twitter.com/'
8
 base_url = 'https://twitter.com/'
9
 search_url = base_url+'search?'
9
 search_url = base_url+'search?'
10
+title_xpath = './/span[@class="username js-action-profile-name"]//text()'
11
+content_xpath = './/p[@class="js-tweet-text tweet-text"]//text()'
12
+
10
 
13
 
11
 def request(query, params):
14
 def request(query, params):
12
     global search_url
15
     global search_url
21
     for tweet in dom.xpath('//li[@data-item-type="tweet"]'):
24
     for tweet in dom.xpath('//li[@data-item-type="tweet"]'):
22
         link = tweet.xpath('.//small[@class="time"]//a')[0]
25
         link = tweet.xpath('.//small[@class="time"]//a')[0]
23
         url = urljoin(base_url, link.attrib.get('href'))
26
         url = urljoin(base_url, link.attrib.get('href'))
24
-        title = ''.join(tweet.xpath('.//span[@class="username js-action-profile-name"]//text()'))
25
-        content = escape(''.join(tweet.xpath('.//p[@class="js-tweet-text tweet-text"]//text()')))
26
-        results.append({'url': url, 'title': title, 'content': content})
27
+        title = ''.join(tweet.xpath(title_xpath))
28
+        content = escape(''.join(tweet.xpath(content_xpath)))
29
+        results.append({'url': url,
30
+                        'title': title,
31
+                        'content': content})
27
     return results
32
     return results

+ 15
- 12
searx/engines/vimeo.py View File

5
 
5
 
6
 base_url = 'http://vimeo.com'
6
 base_url = 'http://vimeo.com'
7
 search_url = base_url + '/search?{query}'
7
 search_url = base_url + '/search?{query}'
8
-url_xpath     = None
8
+url_xpath = None
9
 content_xpath = None
9
 content_xpath = None
10
-title_xpath   = None
10
+title_xpath = None
11
 results_xpath = ''
11
 results_xpath = ''
12
+content_tpl = '<a href="{0}">  <img src="{2}"/> </a>'
12
 
13
 
13
-# the cookie set by vimeo contains all the following values, but only __utma seems to be requiered
14
+# the cookie set by vimeo contains all the following values,
15
+# but only __utma seems to be requiered
14
 cookie = {
16
 cookie = {
15
     #'vuid':'918282893.1027205400'
17
     #'vuid':'918282893.1027205400'
16
     # 'ab_bs':'%7B%223%22%3A279%7D'
18
     # 'ab_bs':'%7B%223%22%3A279%7D'
17
-     '__utma':'00000000.000#0000000.0000000000.0000000000.0000000000.0'
19
+     '__utma': '00000000.000#0000000.0000000000.0000000000.0000000000.0'
18
     # '__utmb':'18302654.1.10.1388942090'
20
     # '__utmb':'18302654.1.10.1388942090'
19
     #, '__utmc':'18302654'
21
     #, '__utmc':'18302654'
20
-    #, '__utmz':'18#302654.1388942090.1.1.utmcsr=(direct)|utmccn=(direct)|utmcmd=(none)'
22
+    #, '__utmz':'18#302654.1388942090.1.1.utmcsr=(direct)|utmccn=(direct)|utmcmd=(none)'  # noqa
21
     #, '__utml':'search'
23
     #, '__utml':'search'
22
 }
24
 }
23
 
25
 
26
+
24
 def request(query, params):
27
 def request(query, params):
25
-    params['url'] = search_url.format(query=urlencode({'q' :query}))
28
+    params['url'] = search_url.format(query=urlencode({'q': query}))
26
     params['cookies'] = cookie
29
     params['cookies'] = cookie
27
     return params
30
     return params
28
 
31
 
32
+
29
 def response(resp):
33
 def response(resp):
30
     results = []
34
     results = []
31
     dom = html.fromstring(resp.text)
35
     dom = html.fromstring(resp.text)
36
         url = base_url + result.xpath(url_xpath)[0]
40
         url = base_url + result.xpath(url_xpath)[0]
37
         title = p.unescape(extract_text(result.xpath(title_xpath)))
41
         title = p.unescape(extract_text(result.xpath(title_xpath)))
38
         thumbnail = extract_text(result.xpath(content_xpath)[0])
42
         thumbnail = extract_text(result.xpath(content_xpath)[0])
39
-        content = '<a href="{0}">  <img src="{2}"/> </a>'.format(url, title, thumbnail)
40
-        results.append({'url': url
41
-                        , 'title': title
42
-                        , 'content': content 
43
-                        , 'template':'videos.html'
44
-                        , 'thumbnail': thumbnail})
43
+        results.append({'url': url,
44
+                        'title': title,
45
+                        'content': content_tpl.format(url, title, thumbnail),
46
+                        'template': 'videos.html',
47
+                        'thumbnail': thumbnail})
45
     return results
48
     return results

+ 15
- 11
searx/engines/xpath.py View File

1
 from lxml import html
1
 from lxml import html
2
 from urllib import urlencode, unquote
2
 from urllib import urlencode, unquote
3
 from urlparse import urlparse, urljoin
3
 from urlparse import urlparse, urljoin
4
-from cgi import escape
5
 from lxml.etree import _ElementStringResult
4
 from lxml.etree import _ElementStringResult
6
 
5
 
7
-search_url    = None
8
-url_xpath     = None
6
+search_url = None
7
+url_xpath = None
9
 content_xpath = None
8
 content_xpath = None
10
-title_xpath   = None
9
+title_xpath = None
11
 suggestion_xpath = ''
10
 suggestion_xpath = ''
12
 results_xpath = ''
11
 results_xpath = ''
13
 
12
 
13
+
14
 '''
14
 '''
15
 if xpath_results is list, extract the text from each result and concat the list
15
 if xpath_results is list, extract the text from each result and concat the list
16
-if xpath_results is a xml element, extract all the text node from it ( text_content() method from lxml )
16
+if xpath_results is a xml element, extract all the text node from it
17
+   ( text_content() method from lxml )
17
 if xpath_results is a string element, then it's already done
18
 if xpath_results is a string element, then it's already done
18
 '''
19
 '''
20
+
21
+
19
 def extract_text(xpath_results):
22
 def extract_text(xpath_results):
20
     if type(xpath_results) == list:
23
     if type(xpath_results) == list:
21
         # it's list of result : concat everything using recursive call
24
         # it's list of result : concat everything using recursive call
60
         url += '/'
63
         url += '/'
61
 
64
 
62
     # FIXME : hack for yahoo
65
     # FIXME : hack for yahoo
63
-    if parsed_url.hostname == 'search.yahoo.com' and parsed_url.path.startswith('/r'):
66
+    if parsed_url.hostname == 'search.yahoo.com'\
67
+       and parsed_url.path.startswith('/r'):
64
         p = parsed_url.path
68
         p = parsed_url.path
65
         mark = p.find('/**')
69
         mark = p.find('/**')
66
         if mark != -1:
70
         if mark != -1:
82
     if results_xpath:
86
     if results_xpath:
83
         for result in dom.xpath(results_xpath):
87
         for result in dom.xpath(results_xpath):
84
             url = extract_url(result.xpath(url_xpath))
88
             url = extract_url(result.xpath(url_xpath))
85
-            title = extract_text(result.xpath(title_xpath)[0 ])
89
+            title = extract_text(result.xpath(title_xpath)[0])
86
             content = extract_text(result.xpath(content_xpath)[0])
90
             content = extract_text(result.xpath(content_xpath)[0])
87
             results.append({'url': url, 'title': title, 'content': content})
91
             results.append({'url': url, 'title': title, 'content': content})
88
     else:
92
     else:
89
         for url, title, content in zip(
93
         for url, title, content in zip(
90
-            map(extract_url, dom.xpath(url_xpath)), \
91
-            map(extract_text, dom.xpath(title_xpath)), \
92
-            map(extract_text, dom.xpath(content_xpath)), \
93
-                ):
94
+            map(extract_url, dom.xpath(url_xpath)),
95
+            map(extract_text, dom.xpath(title_xpath)),
96
+            map(extract_text, dom.xpath(content_xpath))
97
+        ):
94
             results.append({'url': url, 'title': title, 'content': content})
98
             results.append({'url': url, 'title': title, 'content': content})
95
 
99
 
96
     if not suggestion_xpath:
100
     if not suggestion_xpath:

+ 4
- 2
searx/engines/yacy.py View File

4
 url = 'http://localhost:8090'
4
 url = 'http://localhost:8090'
5
 search_url = '/yacysearch.json?{query}&maximumRecords=10'
5
 search_url = '/yacysearch.json?{query}&maximumRecords=10'
6
 
6
 
7
+
7
 def request(query, params):
8
 def request(query, params):
8
-    params['url'] = url + search_url.format(query=urlencode({'query':query}))
9
+    params['url'] = url + search_url.format(query=urlencode({'query': query}))
9
     return params
10
     return params
10
 
11
 
12
+
11
 def response(resp):
13
 def response(resp):
12
     raw_search_results = loads(resp.text)
14
     raw_search_results = loads(resp.text)
13
 
15
 
25
         tmp_result['content'] = ''
27
         tmp_result['content'] = ''
26
 
28
 
27
         if len(result['description']):
29
         if len(result['description']):
28
-            tmp_result['content'] += result['description'] +"<br/>"
30
+            tmp_result['content'] += result['description'] + "<br/>"
29
 
31
 
30
         if len(result['pubDate']):
32
         if len(result['pubDate']):
31
             tmp_result['content'] += result['pubDate'] + "<br/>"
33
             tmp_result['content'] += result['pubDate'] + "<br/>"

+ 7
- 7
searx/engines/youtube.py View File

5
 
5
 
6
 search_url = 'https://gdata.youtube.com/feeds/api/videos?alt=json&{query}'
6
 search_url = 'https://gdata.youtube.com/feeds/api/videos?alt=json&{query}'
7
 
7
 
8
+
8
 def request(query, params):
9
 def request(query, params):
9
     params['url'] = search_url.format(query=urlencode({'q': query}))
10
     params['url'] = search_url.format(query=urlencode({'q': query}))
10
     return params
11
     return params
30
         thumbnail = ''
31
         thumbnail = ''
31
         if len(result['media$group']['media$thumbnail']):
32
         if len(result['media$group']['media$thumbnail']):
32
             thumbnail = result['media$group']['media$thumbnail'][0]['url']
33
             thumbnail = result['media$group']['media$thumbnail'][0]['url']
33
-            content += '<a href="{0}" title="{0}" ><img src="{1}" /></a>'.format(url, thumbnail)
34
+            content += '<a href="{0}" title="{0}" ><img src="{1}" /></a>'.format(url, thumbnail)  # noqa
34
         if len(content):
35
         if len(content):
35
             content += '<br />' + result['content']['$t']
36
             content += '<br />' + result['content']['$t']
36
         else:
37
         else:
37
             content = result['content']['$t']
38
             content = result['content']['$t']
38
 
39
 
39
-        results.append({'url': url
40
-                        , 'title': title
41
-                        , 'content': content
42
-                        , 'template':'videos.html'
43
-                        , 'thumbnail':thumbnail})
40
+        results.append({'url': url,
41
+                        'title': title,
42
+                        'content': content,
43
+                        'template': 'videos.html',
44
+                        'thumbnail': thumbnail})
44
 
45
 
45
     return results
46
     return results
46
-

+ 15
- 5
searx/utils.py View File

1
 from HTMLParser import HTMLParser
1
 from HTMLParser import HTMLParser
2
 #import htmlentitydefs
2
 #import htmlentitydefs
3
 import csv
3
 import csv
4
-import codecs
4
+from codecs import getincrementalencoder
5
 import cStringIO
5
 import cStringIO
6
 import re
6
 import re
7
 
7
 
8
 
8
 
9
 def gen_useragent():
9
 def gen_useragent():
10
     # TODO
10
     # TODO
11
-    return "Mozilla/5.0 (X11; Linux x86_64; rv:26.0) Gecko/20100101 Firefox/26.0"
11
+    ua = "Mozilla/5.0 (X11; Linux x86_64; rv:26.0) Gecko/20100101 Firefox/26.0"
12
+    return ua
12
 
13
 
13
 
14
 
14
 def highlight_content(content, query):
15
 def highlight_content(content, query):
46
         self.result.append(d)
47
         self.result.append(d)
47
 
48
 
48
     def handle_charref(self, number):
49
     def handle_charref(self, number):
49
-        codepoint = int(number[1:], 16) if number[0] in (u'x', u'X') else int(number)
50
+        if number[0] in (u'x', u'X'):
51
+            codepoint = int(number[1:], 16)
52
+        else:
53
+            codepoint = int(number)
50
         self.result.append(unichr(codepoint))
54
         self.result.append(unichr(codepoint))
51
 
55
 
52
     def handle_entityref(self, name):
56
     def handle_entityref(self, name):
75
         self.queue = cStringIO.StringIO()
79
         self.queue = cStringIO.StringIO()
76
         self.writer = csv.writer(self.queue, dialect=dialect, **kwds)
80
         self.writer = csv.writer(self.queue, dialect=dialect, **kwds)
77
         self.stream = f
81
         self.stream = f
78
-        self.encoder = codecs.getincrementalencoder(encoding)()
82
+        self.encoder = getincrementalencoder(encoding)()
79
 
83
 
80
     def writerow(self, row):
84
     def writerow(self, row):
81
-        self.writer.writerow([(s.encode("utf-8").strip() if type(s) == str or type(s) == unicode else str(s)) for s in row])
85
+        unicode_row = []
86
+        for col in row:
87
+            if type(col) == str or type(col) == unicode:
88
+                unicode_row.append(col.encode('utf-8').strip())
89
+            else:
90
+                unicode_row.append(col)
91
+        self.writer.writerow(unicode_row)
82
         # Fetch UTF-8 output from the queue ...
92
         # Fetch UTF-8 output from the queue ...
83
         data = self.queue.getvalue()
93
         data = self.queue.getvalue()
84
         data = data.decode("utf-8")
94
         data = data.decode("utf-8")

+ 19
- 10
searx/webapp.py View File

18
 '''
18
 '''
19
 
19
 
20
 from searx import settings
20
 from searx import settings
21
-from flask import Flask, request, render_template, url_for, Response, make_response, redirect
21
+from flask import Flask, request, render_template
22
+from flask import url_for, Response, make_response, redirect
22
 from searx.engines import search, categories, engines, get_engines_stats
23
 from searx.engines import search, categories, engines, get_engines_stats
23
 import json
24
 import json
24
 import cStringIO
25
 import cStringIO
70
 def render(template_name, **kwargs):
71
 def render(template_name, **kwargs):
71
     global categories
72
     global categories
72
     kwargs['categories'] = ['general']
73
     kwargs['categories'] = ['general']
73
-    kwargs['categories'].extend(x for x in sorted(categories.keys()) if x != 'general')
74
+    kwargs['categories'].extend(x for x in
75
+                                sorted(categories.keys()) if x != 'general')
74
     if not 'selected_categories' in kwargs:
76
     if not 'selected_categories' in kwargs:
75
         kwargs['selected_categories'] = []
77
         kwargs['selected_categories'] = []
76
         cookie_categories = request.cookies.get('categories', '').split(',')
78
         cookie_categories = request.cookies.get('categories', '').split(',')
114
                     continue
116
                     continue
115
                 selected_categories.append(category)
117
                 selected_categories.append(category)
116
         if not len(selected_categories):
118
         if not len(selected_categories):
117
-            cookie_categories = request.cookies.get('categories', '').split(',')
119
+            cookie_categories = request.cookies.get('categories', '')
120
+            cookie_categories = cookie_categories.split(',')
118
             for ccateg in cookie_categories:
121
             for ccateg in cookie_categories:
119
                 if ccateg in categories:
122
                 if ccateg in categories:
120
                     selected_categories.append(ccateg)
123
                     selected_categories.append(ccateg)
122
             selected_categories = ['general']
125
             selected_categories = ['general']
123
 
126
 
124
         for categ in selected_categories:
127
         for categ in selected_categories:
125
-            selected_engines.extend({'category': categ, 'name': x.name} for x in categories[categ])
128
+            selected_engines.extend({'category': categ,
129
+                                     'name': x.name}
130
+                                    for x in categories[categ])
126
 
131
 
127
     results, suggestions = search(query, request, selected_engines)
132
     results, suggestions = search(query, request, selected_engines)
128
 
133
 
137
                 result['content'] = html_to_text(result['content']).strip()
142
                 result['content'] = html_to_text(result['content']).strip()
138
             result['title'] = html_to_text(result['title']).strip()
143
             result['title'] = html_to_text(result['title']).strip()
139
         if len(result['url']) > 74:
144
         if len(result['url']) > 74:
140
-            result['pretty_url'] = result['url'][:35] + '[..]' + result['url'][-35:]
145
+            url_parts = result['url'][:35], result['url'][-35:]
146
+            result['pretty_url'] = '{0}[...]{1}'.format(*url_parts)
141
         else:
147
         else:
142
             result['pretty_url'] = result['url']
148
             result['pretty_url'] = result['url']
143
 
149
 
146
                 result['favicon'] = engine
152
                 result['favicon'] = engine
147
 
153
 
148
     if request_data.get('format') == 'json':
154
     if request_data.get('format') == 'json':
149
-        return Response(json.dumps({'query': query, 'results': results}), mimetype='application/json')
155
+        return Response(json.dumps({'query': query, 'results': results}),
156
+                        mimetype='application/json')
150
     elif request_data.get('format') == 'csv':
157
     elif request_data.get('format') == 'csv':
151
         csv = UnicodeWriter(cStringIO.StringIO())
158
         csv = UnicodeWriter(cStringIO.StringIO())
152
         keys = ('title', 'url', 'content', 'host', 'engine', 'score')
159
         keys = ('title', 'url', 'content', 'host', 'engine', 'score')
157
                 csv.writerow([row.get(key, '') for key in keys])
164
                 csv.writerow([row.get(key, '') for key in keys])
158
         csv.stream.seek(0)
165
         csv.stream.seek(0)
159
         response = Response(csv.stream.read(), mimetype='application/csv')
166
         response = Response(csv.stream.read(), mimetype='application/csv')
160
-        response.headers.add('Content-Disposition', 'attachment;Filename=searx_-_{0}.csv'.format('_'.join(query.split())))
167
+        content_disp = 'attachment;Filename=searx_-_{0}.csv'.format(query)
168
+        response.headers.add('Content-Disposition', content_disp)
161
         return response
169
         return response
162
     elif request_data.get('format') == 'rss':
170
     elif request_data.get('format') == 'rss':
163
         response_rss = render(
171
         response_rss = render(
240
     base_url = get_base_url()
248
     base_url = get_base_url()
241
     ret = opensearch_xml.format(method=method, host=base_url)
249
     ret = opensearch_xml.format(method=method, host=base_url)
242
     resp = Response(response=ret,
250
     resp = Response(response=ret,
243
-                status=200,
244
-                mimetype="application/xml")
251
+                    status=200,
252
+                    mimetype="application/xml")
245
     return resp
253
     return resp
246
 
254
 
247
 
255
 
248
 @app.route('/favicon.ico')
256
 @app.route('/favicon.ico')
249
 def favicon():
257
 def favicon():
250
     return send_from_directory(os.path.join(app.root_path, 'static/img'),
258
     return send_from_directory(os.path.join(app.root_path, 'static/img'),
251
-                               'favicon.png', mimetype='image/vnd.microsoft.icon')
259
+                               'favicon.png',
260
+                               mimetype='image/vnd.microsoft.icon')
252
 
261
 
253
 
262
 
254
 def run():
263
 def run():