Explorar el Código

Merge pull request #152 from pointhi/search_engines

[enh] add photon engine, and fix pep8 errors
Adam Tauber hace 10 años
padre
commit
813247b37a

+ 8
- 3
searx/engines/bing_news.py Ver fichero

@@ -57,12 +57,16 @@ def response(resp):
57 57
         link = result.xpath('.//div[@class="newstitle"]/a')[0]
58 58
         url = link.attrib.get('href')
59 59
         title = ' '.join(link.xpath('.//text()'))
60
-        contentXPath = result.xpath('.//div[@class="sn_txt"]/div//span[@class="sn_snip"]//text()')
60
+        contentXPath = result.xpath('.//div[@class="sn_txt"]/div'
61
+                                    '//span[@class="sn_snip"]//text()')
61 62
         if contentXPath is not None:
62 63
             content = escape(' '.join(contentXPath))
63 64
 
64 65
         # parse publishedDate
65
-        publishedDateXPath = result.xpath('.//div[@class="sn_txt"]/div//span[contains(@class,"sn_ST")]//span[contains(@class,"sn_tm")]//text()')
66
+        publishedDateXPath = result.xpath('.//div[@class="sn_txt"]/div'
67
+                                          '//span[contains(@class,"sn_ST")]'
68
+                                          '//span[contains(@class,"sn_tm")]'
69
+                                          '//text()')
66 70
         if publishedDateXPath is not None:
67 71
             publishedDate = escape(' '.join(publishedDateXPath))
68 72
 
@@ -74,7 +78,8 @@ def response(resp):
74 78
             timeNumbers = re.findall(r'\d+', publishedDate)
75 79
             publishedDate = datetime.now()\
76 80
                 - timedelta(hours=int(timeNumbers[0]))
77
-        elif re.match("^[0-9]+ hour(s|), [0-9]+ minute(s|) ago$", publishedDate):
81
+        elif re.match("^[0-9]+ hour(s|),"
82
+                      " [0-9]+ minute(s|) ago$", publishedDate):
78 83
             timeNumbers = re.findall(r'\d+', publishedDate)
79 84
             publishedDate = datetime.now()\
80 85
                 - timedelta(hours=int(timeNumbers[0]))\

+ 11
- 4
searx/engines/faroo.py Ver fichero

@@ -22,10 +22,17 @@ api_key = None
22 22
 
23 23
 # search-url
24 24
 url = 'http://www.faroo.com/'
25
-search_url = url + 'api?{query}&start={offset}&length={number_of_results}&l={language}&src={categorie}&i=false&f=json&key={api_key}'
25
+search_url = url + 'api?{query}'\
26
+                      '&start={offset}'\
27
+                      '&length={number_of_results}'\
28
+                      '&l={language}'\
29
+                      '&src={categorie}'\
30
+                      '&i=false'\
31
+                      '&f=json'\
32
+                      '&key={api_key}'  # noqa
26 33
 
27 34
 search_category = {'general': 'web',
28
-                'news': 'news'}
35
+                   'news': 'news'}
29 36
 
30 37
 
31 38
 # do search-request
@@ -80,8 +87,8 @@ def response(resp):
80 87
     # parse results
81 88
     for result in search_res['results']:
82 89
         if result['news']:
83
-            # timestamp (how many milliseconds have passed between now and the beginning of 1970)
84
-            publishedDate = datetime.datetime.fromtimestamp(result['date']/1000.0)
90
+            # timestamp (milliseconds since 1970)
91
+            publishedDate = datetime.datetime.fromtimestamp(result['date']/1000.0)  # noqa
85 92
 
86 93
             # append news result
87 94
             results.append({'url': result['url'],

+ 1
- 1
searx/engines/google_images.py Ver fichero

@@ -9,7 +9,7 @@
9 9
 # @stable      yes (but deprecated)
10 10
 # @parse       url, title, img_src
11 11
 
12
-from urllib import urlencode,unquote
12
+from urllib import urlencode, unquote
13 13
 from json import loads
14 14
 
15 15
 # engine dependent config

+ 4
- 4
searx/engines/kickass.py Ver fichero

@@ -1,8 +1,8 @@
1 1
 ## Kickass Torrent (Videos, Music, Files)
2
-# 
2
+#
3 3
 # @website     https://kickass.so
4 4
 # @provide-api no (nothing found)
5
-# 
5
+#
6 6
 # @using-api   no
7 7
 # @results     HTML (using search portal)
8 8
 # @stable      yes (HTML can change)
@@ -13,7 +13,6 @@ from cgi import escape
13 13
 from urllib import quote
14 14
 from lxml import html
15 15
 from operator import itemgetter
16
-from dateutil import parser
17 16
 
18 17
 # engine dependent config
19 18
 categories = ['videos', 'music', 'files']
@@ -33,7 +32,8 @@ def request(query, params):
33 32
     params['url'] = search_url.format(search_term=quote(query),
34 33
                                       pageno=params['pageno'])
35 34
 
36
-    # FIX: SSLError: hostname 'kickass.so' doesn't match either of '*.kickass.to', 'kickass.to'
35
+    # FIX: SSLError: hostname 'kickass.so'
36
+    # doesn't match either of '*.kickass.to', 'kickass.to'
37 37
     params['verify'] = False
38 38
 
39 39
     return params

+ 7
- 4
searx/engines/mediawiki.py Ver fichero

@@ -28,15 +28,17 @@ search_url = base_url + 'w/api.php?action=query'\
28 28
                                  '&srprop=timestamp'\
29 29
                                  '&format=json'\
30 30
                                  '&sroffset={offset}'\
31
-                                 '&srlimit={limit}'
31
+                                 '&srlimit={limit}'     # noqa
32 32
 
33 33
 
34 34
 # do search-request
35 35
 def request(query, params):
36 36
     offset = (params['pageno'] - 1) * number_of_results
37
+
37 38
     string_args = dict(query=urlencode({'srsearch': query}),
38
-                        offset=offset,
39
-                        limit=number_of_results)
39
+                       offset=offset,
40
+                       limit=number_of_results)
41
+
40 42
     format_strings = list(Formatter().parse(base_url))
41 43
 
42 44
     if params['language'] == 'all':
@@ -67,7 +69,8 @@ def response(resp):
67 69
 
68 70
     # parse results
69 71
     for result in search_results['query']['search']:
70
-        url = base_url.format(language=resp.search_params['language']) + 'wiki/' + quote(result['title'].replace(' ', '_').encode('utf-8'))
72
+        url = base_url.format(language=resp.search_params['language']) +\
73
+            'wiki/' + quote(result['title'].replace(' ', '_').encode('utf-8'))
71 74
 
72 75
         # append result
73 76
         results.append({'url': url,

+ 9
- 5
searx/engines/openstreetmap.py Ver fichero

@@ -9,20 +9,24 @@
9 9
 # @parse       url, title
10 10
 
11 11
 from json import loads
12
+from searx.utils import searx_useragent
12 13
 
13 14
 # engine dependent config
14 15
 categories = ['map']
15 16
 paging = False
16 17
 
17 18
 # search-url
18
-url = 'https://nominatim.openstreetmap.org/search/{query}?format=json&polygon_geojson=1&addressdetails=1'
19
-
19
+base_url = 'https://nominatim.openstreetmap.org/'
20
+search_string = 'search/{query}?format=json&polygon_geojson=1&addressdetails=1'
20 21
 result_base_url = 'https://openstreetmap.org/{osm_type}/{osm_id}'
21 22
 
22 23
 
23 24
 # do search-request
24 25
 def request(query, params):
25
-    params['url'] = url.format(query=query)
26
+    params['url'] = base_url + search_string.format(query=query)
27
+
28
+    # using searx User-Agent
29
+    params['headers']['User-Agent'] = searx_useragent()
26 30
 
27 31
     return params
28 32
 
@@ -68,8 +72,8 @@ def response(resp):
68 72
             address.update({'house_number': address_raw.get('house_number'),
69 73
                            'road': address_raw.get('road'),
70 74
                            'locality': address_raw.get('city',
71
-                                       address_raw.get('town',
72
-                                       address_raw.get('village'))),
75
+                                       address_raw.get('town',          # noqa
76
+                                       address_raw.get('village'))),    # noqa
73 77
                            'postcode': address_raw.get('postcode'),
74 78
                            'country': address_raw.get('country'),
75 79
                            'country_code': address_raw.get('country_code')})

+ 128
- 0
searx/engines/photon.py Ver fichero

@@ -0,0 +1,128 @@
1
+## Photon (Map)
2
+#
3
+# @website     https://photon.komoot.de
4
+# @provide-api yes (https://photon.komoot.de/)
5
+#
6
+# @using-api   yes
7
+# @results     JSON
8
+# @stable      yes
9
+# @parse       url, title
10
+
11
+from urllib import urlencode
12
+from json import loads
13
+from searx.utils import searx_useragent
14
+
15
+# engine dependent config
16
+categories = ['map']
17
+paging = False
18
+language_support = True
19
+number_of_results = 10
20
+
21
+# search-url
22
+base_url = 'https://photon.komoot.de/'
23
+search_string = 'api/?{query}&limit={limit}'
24
+result_base_url = 'https://openstreetmap.org/{osm_type}/{osm_id}'
25
+
26
+
27
+# do search-request
28
+def request(query, params):
29
+    params['url'] = base_url +\
30
+        search_string.format(query=urlencode({'q': query}),
31
+                             limit=number_of_results)
32
+
33
+    if params['language'] != 'all':
34
+        params['url'] = params['url'] +\
35
+            "&lang=" + params['language'].replace('_', '-')
36
+
37
+    # using searx User-Agent
38
+    params['headers']['User-Agent'] = searx_useragent()
39
+
40
+    # FIX: SSLError: SSL3_GET_SERVER_CERTIFICATE:certificate verify failed
41
+    params['verify'] = False
42
+
43
+    return params
44
+
45
+
46
+# get response from search-request
47
+def response(resp):
48
+    results = []
49
+    json = loads(resp.text)
50
+
51
+    # parse results
52
+    for r in json.get('features', {}):
53
+
54
+        properties = r.get('properties')
55
+
56
+        if not properties:
57
+            continue
58
+
59
+        # get title
60
+        title = properties['name']
61
+
62
+        # get osm-type
63
+        if properties.get('osm_type') == 'N':
64
+            osm_type = 'node'
65
+        elif properties.get('osm_type') == 'W':
66
+            osm_type = 'way'
67
+        elif properties.get('osm_type') == 'R':
68
+            osm_type = 'relation'
69
+        else:
70
+            # continue if invalide osm-type
71
+            continue
72
+
73
+        url = result_base_url.format(osm_type=osm_type,
74
+                                     osm_id=properties.get('osm_id'))
75
+
76
+        osm = {'type': osm_type,
77
+               'id': properties.get('osm_id')}
78
+
79
+        geojson = r.get('geometry')
80
+
81
+        if properties.get('extent'):
82
+            boundingbox = [properties.get('extent')[3],
83
+                           properties.get('extent')[1],
84
+                           properties.get('extent')[0],
85
+                           properties.get('extent')[2]]
86
+        else:
87
+            # TODO: better boundingbox calculation
88
+            boundingbox = [geojson['coordinates'][1],
89
+                           geojson['coordinates'][1],
90
+                           geojson['coordinates'][0],
91
+                           geojson['coordinates'][0]]
92
+
93
+        # address calculation
94
+        address = {}
95
+
96
+        # get name
97
+        if properties.get('osm_key') == 'amenity' or\
98
+           properties.get('osm_key') == 'shop' or\
99
+           properties.get('osm_key') == 'tourism' or\
100
+           properties.get('osm_key') == 'leisure':
101
+            address = {'name': properties.get('name')}
102
+
103
+        # add rest of adressdata, if something is already found
104
+        if address.get('name'):
105
+            address.update({'house_number': properties.get('housenumber'),
106
+                           'road': properties.get('street'),
107
+                           'locality': properties.get('city',
108
+                                       properties.get('town',           # noqa
109
+                                       properties.get('village'))),     # noqa
110
+                           'postcode': properties.get('postcode'),
111
+                           'country': properties.get('country')})
112
+        else:
113
+            address = None
114
+
115
+        # append result
116
+        results.append({'template': 'map.html',
117
+                        'title': title,
118
+                        'content': '',
119
+                        'longitude': geojson['coordinates'][0],
120
+                        'latitude': geojson['coordinates'][1],
121
+                        'boundingbox': boundingbox,
122
+                        'geojson': geojson,
123
+                        'address': address,
124
+                        'osm': osm,
125
+                        'url': url})
126
+
127
+    # return results
128
+    return results

+ 6
- 1
searx/engines/soundcloud.py Ver fichero

@@ -20,7 +20,12 @@ guest_client_id = 'b45b1aa10f1ac2941910a7f0d10f8e28'
20 20
 
21 21
 # search-url
22 22
 url = 'https://api.soundcloud.com/'
23
-search_url = url + 'search?{query}&facet=model&limit=20&offset={offset}&linked_partitioning=1&client_id={client_id}'
23
+search_url = url + 'search?{query}'\
24
+                         '&facet=model'\
25
+                         '&limit=20'\
26
+                         '&offset={offset}'\
27
+                         '&linked_partitioning=1'\
28
+                         '&client_id={client_id}'   # noqa
24 29
 
25 30
 
26 31
 # do search-request

+ 17
- 12
searx/engines/yacy.py Ver fichero

@@ -24,7 +24,11 @@ number_of_results = 5
24 24
 
25 25
 # search-url
26 26
 base_url = 'http://localhost:8090'
27
-search_url = '/yacysearch.json?{query}&startRecord={offset}&maximumRecords={limit}&contentdom={search_type}&resource=global'
27
+search_url = '/yacysearch.json?{query}'\
28
+                             '&startRecord={offset}'\
29
+                             '&maximumRecords={limit}'\
30
+                             '&contentdom={search_type}'\
31
+                             '&resource=global'             # noqa
28 32
 
29 33
 # yacy specific type-definitions
30 34
 search_types = {'general': 'text',
@@ -39,10 +43,11 @@ def request(query, params):
39 43
     offset = (params['pageno'] - 1) * number_of_results
40 44
     search_type = search_types.get(params['category'], '0')
41 45
 
42
-    params['url'] = base_url + search_url.format(query=urlencode({'query': query}),
43
-                                                 offset=offset,
44
-                                                 limit=number_of_results,
45
-                                                 search_type=search_type)
46
+    params['url'] = base_url +\
47
+        search_url.format(query=urlencode({'query': query}),
48
+                          offset=offset,
49
+                          limit=number_of_results,
50
+                          search_type=search_type)
46 51
 
47 52
     # add language tag if specified
48 53
     if params['language'] != 'all':
@@ -70,19 +75,19 @@ def response(resp):
70 75
 
71 76
             # append result
72 77
             results.append({'url': result['link'],
73
-                        'title': result['title'],
74
-                        'content': result['description'],
75
-                        'publishedDate': publishedDate})
78
+                            'title': result['title'],
79
+                            'content': result['description'],
80
+                            'publishedDate': publishedDate})
76 81
 
77 82
     elif resp.search_params['category'] == 'images':
78 83
         # parse image results
79 84
         for result in search_results:
80 85
             # append result
81 86
             results.append({'url': result['url'],
82
-                        'title': result['title'],
83
-                        'content': '',
84
-                        'img_src': result['image'],
85
-                        'template': 'images.html'})
87
+                            'title': result['title'],
88
+                            'content': '',
89
+                            'img_src': result['image'],
90
+                            'template': 'images.html'})
86 91
 
87 92
     #TODO parse video, audio and file results
88 93
 

+ 5
- 4
searx/engines/yahoo.py Ver fichero

@@ -20,7 +20,8 @@ paging = True
20 20
 language_support = True
21 21
 
22 22
 # search-url
23
-search_url = 'https://search.yahoo.com/search?{query}&b={offset}&fl=1&vl=lang_{lang}'
23
+base_url = 'https://search.yahoo.com/'
24
+search_url = 'search?{query}&b={offset}&fl=1&vl=lang_{lang}'
24 25
 
25 26
 # specific xpath variables
26 27
 results_xpath = '//div[@class="res"]'
@@ -57,9 +58,9 @@ def request(query, params):
57 58
     else:
58 59
         language = params['language'].split('_')[0]
59 60
 
60
-    params['url'] = search_url.format(offset=offset,
61
-                                      query=urlencode({'p': query}),
62
-                                      lang=language)
61
+    params['url'] = base_url + search_url.format(offset=offset,
62
+                                                 query=urlencode({'p': query}),
63
+                                                 lang=language)
63 64
 
64 65
     # TODO required?
65 66
     params['cookies']['sB'] = 'fl=1&vl=lang_{lang}&sh=1&rw=new&v=1'\

+ 4
- 0
searx/settings.yml Ver fichero

@@ -95,6 +95,10 @@ engines:
95 95
     engine : openstreetmap
96 96
     shortcut : osm
97 97
 
98
+  - name : photon
99
+    engine : photon
100
+    shortcut : ph
101
+
98 102
 #  - name : piratebay
99 103
 #    engine : piratebay
100 104
 #    shortcut : tpb