|
@@ -8,6 +8,7 @@
|
8
|
8
|
# @stable no (HTML can change)
|
9
|
9
|
# @parse url, title, content, suggestion
|
10
|
10
|
|
|
11
|
+import re
|
11
|
12
|
from urllib import urlencode
|
12
|
13
|
from urlparse import urlparse, parse_qsl
|
13
|
14
|
from lxml import html
|
|
@@ -78,15 +79,22 @@ country_to_hostname = {
|
78
|
79
|
'TW': 'www.google.com.tw' # Taiwan
|
79
|
80
|
}
|
80
|
81
|
|
|
82
|
+# osm
|
|
83
|
+url_map = 'https://www.openstreetmap.org/'\
|
|
84
|
+ + '?lat={latitude}&lon={longitude}&zoom={zoom}&layers=M'
|
|
85
|
+
|
81
|
86
|
# search-url
|
82
|
87
|
search_path = '/search'
|
83
|
|
-maps_path = '/maps/'
|
84
|
|
-redirect_path = '/url'
|
85
|
|
-images_path = '/images'
|
86
|
88
|
search_url = ('https://{hostname}' +
|
87
|
89
|
search_path +
|
88
|
90
|
'?{query}&start={offset}&gbv=1')
|
89
|
91
|
|
|
92
|
+# other URLs
|
|
93
|
+map_hostname_start = 'maps.google.'
|
|
94
|
+maps_path = '/maps'
|
|
95
|
+redirect_path = '/url'
|
|
96
|
+images_path = '/images'
|
|
97
|
+
|
90
|
98
|
# specific xpath variables
|
91
|
99
|
results_xpath = '//li[@class="g"]'
|
92
|
100
|
url_xpath = './/h3/a/@href'
|
|
@@ -95,10 +103,32 @@ content_xpath = './/span[@class="st"]'
|
95
|
103
|
content_misc_xpath = './/div[@class="f slp"]'
|
96
|
104
|
suggestion_xpath = '//p[@class="_Bmc"]'
|
97
|
105
|
|
|
106
|
+# map : detail location
|
|
107
|
+map_address_xpath = './/div[@class="s"]//table//td[2]/span/text()'
|
|
108
|
+map_phone_xpath = './/div[@class="s"]//table//td[2]/span/span'
|
|
109
|
+map_website_url_xpath = 'h3[2]/a/@href'
|
|
110
|
+map_website_title_xpath = 'h3[2]'
|
|
111
|
+
|
|
112
|
+# map : near the location
|
|
113
|
+map_near = 'table[@class="ts"]//tr'
|
|
114
|
+map_near_title = './/h4'
|
|
115
|
+map_near_url = './/h4/a/@href'
|
|
116
|
+map_near_phone = './/span[@class="nobr"]'
|
|
117
|
+
|
|
118
|
+# images
|
98
|
119
|
images_xpath = './/div/a'
|
99
|
120
|
image_url_xpath = './@href'
|
100
|
121
|
image_img_src_xpath = './img/@src'
|
101
|
122
|
|
|
123
|
+# property names
|
|
124
|
+# FIXME : no translation
|
|
125
|
+property_address = "Address"
|
|
126
|
+property_phone = "Phone number"
|
|
127
|
+property_location = "Location"
|
|
128
|
+property_website = "Web site"
|
|
129
|
+property_gplus_website = "Google plus"
|
|
130
|
+
|
|
131
|
+# cookies
|
102
|
132
|
pref_cookie = ''
|
103
|
133
|
nid_cookie = {}
|
104
|
134
|
|
|
@@ -122,6 +152,11 @@ def get_google_nid_cookie(google_hostname):
|
122
|
152
|
|
123
|
153
|
# remove google-specific tracking-url
|
124
|
154
|
def parse_url(url_string, google_hostname):
|
|
155
|
+ # sanity check
|
|
156
|
+ if url_string is None:
|
|
157
|
+ return url_string
|
|
158
|
+
|
|
159
|
+ # normal case
|
125
|
160
|
parsed_url = urlparse(url_string)
|
126
|
161
|
if (parsed_url.netloc in [google_hostname, '']
|
127
|
162
|
and parsed_url.path == redirect_path):
|
|
@@ -131,6 +166,19 @@ def parse_url(url_string, google_hostname):
|
131
|
166
|
return url_string
|
132
|
167
|
|
133
|
168
|
|
|
169
|
+# URL : get label
|
|
170
|
+def url_get_label(url_string):
|
|
171
|
+ # sanity check
|
|
172
|
+ if url_string is None:
|
|
173
|
+ return url_string
|
|
174
|
+
|
|
175
|
+ # normal case
|
|
176
|
+ parsed_url = urlparse(url_string)
|
|
177
|
+ if parsed_url.netloc == 'plus.google.com':
|
|
178
|
+ return property_gplus_website
|
|
179
|
+ return property_website
|
|
180
|
+
|
|
181
|
+
|
134
|
182
|
# returns extract_text on the first result selected by the xpath or None
|
135
|
183
|
def extract_text_from_dom(result, xpath):
|
136
|
184
|
r = result.xpath(xpath)
|
|
@@ -151,7 +199,7 @@ def request(query, params):
|
151
|
199
|
if len(language_array) == 2:
|
152
|
200
|
country = language_array[1]
|
153
|
201
|
else:
|
154
|
|
- country = ' '
|
|
202
|
+ country = 'US'
|
155
|
203
|
language = language_array[0] + ',' + language_array[0] + '-' + country
|
156
|
204
|
|
157
|
205
|
if use_locale_domain:
|
|
@@ -196,21 +244,32 @@ def response(resp):
|
196
|
244
|
try:
|
197
|
245
|
url = parse_url(extract_url(result.xpath(url_xpath), google_url), google_hostname)
|
198
|
246
|
parsed_url = urlparse(url, google_hostname)
|
199
|
|
- if (parsed_url.netloc == google_hostname
|
200
|
|
- and (parsed_url.path == search_path
|
201
|
|
- or parsed_url.path.startswith(maps_path))):
|
202
|
|
- # remove the link to google news and google maps
|
203
|
|
- # FIXME : sometimes the URL is https://maps.google.*/maps
|
204
|
|
- # no consequence, the result trigger an exception after which is ignored
|
205
|
|
- continue
|
|
247
|
+
|
|
248
|
+ # map result
|
|
249
|
+ if ((parsed_url.netloc == google_hostname and parsed_url.path.startswith(maps_path))
|
|
250
|
+ or (parsed_url.netloc.startswith(map_hostname_start))):
|
|
251
|
+ x = result.xpath(map_near)
|
|
252
|
+ if len(x) > 0:
|
|
253
|
+ # map : near the location
|
|
254
|
+ results = results + parse_map_near(parsed_url, x, google_hostname)
|
|
255
|
+ else:
|
|
256
|
+ # map : detail about a location
|
|
257
|
+ results = results + parse_map_detail(parsed_url, result, google_hostname)
|
|
258
|
+
|
|
259
|
+ # google news
|
|
260
|
+ elif (parsed_url.netloc == google_hostname
|
|
261
|
+ and parsed_url.path == search_path):
|
|
262
|
+ # skipping news results
|
|
263
|
+ pass
|
206
|
264
|
|
207
|
265
|
# images result
|
208
|
|
- if (parsed_url.netloc == google_hostname
|
209
|
|
- and parsed_url.path == images_path):
|
|
266
|
+ elif (parsed_url.netloc == google_hostname
|
|
267
|
+ and parsed_url.path == images_path):
|
210
|
268
|
# only thumbnail image provided,
|
211
|
269
|
# so skipping image results
|
212
|
270
|
# results = results + parse_images(result, google_hostname)
|
213
|
271
|
pass
|
|
272
|
+
|
214
|
273
|
else:
|
215
|
274
|
# normal result
|
216
|
275
|
content = extract_text_from_dom(result, content_xpath)
|
|
@@ -223,7 +282,7 @@ def response(resp):
|
223
|
282
|
results.append({'url': url,
|
224
|
283
|
'title': title,
|
225
|
284
|
'content': content})
|
226
|
|
- except Exception:
|
|
285
|
+ except:
|
227
|
286
|
continue
|
228
|
287
|
|
229
|
288
|
# parse suggestion
|
|
@@ -249,3 +308,96 @@ def parse_images(result, google_hostname):
|
249
|
308
|
'template': 'images.html'})
|
250
|
309
|
|
251
|
310
|
return results
|
|
311
|
+
|
|
312
|
+
|
|
313
|
+def parse_map_near(parsed_url, x, google_hostname):
|
|
314
|
+ results = []
|
|
315
|
+
|
|
316
|
+ for result in x:
|
|
317
|
+ title = extract_text_from_dom(result, map_near_title)
|
|
318
|
+ url = parse_url(extract_text_from_dom(result, map_near_url), google_hostname)
|
|
319
|
+ phone = extract_text_from_dom(result, map_near_phone)
|
|
320
|
+ if phone is not None:
|
|
321
|
+ phone = property_phone + ": " + phone
|
|
322
|
+ results.append({'url': url,
|
|
323
|
+ 'title': title,
|
|
324
|
+ 'content': phone})
|
|
325
|
+
|
|
326
|
+ return results
|
|
327
|
+
|
|
328
|
+
|
|
329
|
+def parse_map_detail(parsed_url, result, google_hostname):
|
|
330
|
+ results = []
|
|
331
|
+
|
|
332
|
+ # try to parse the geoloc
|
|
333
|
+ m = re.search('@([0-9\.]+),([0-9\.]+),([0-9]+)', parsed_url.path)
|
|
334
|
+ if m is None:
|
|
335
|
+ m = re.search('ll\=([0-9\.]+),([0-9\.]+)\&z\=([0-9]+)', parsed_url.query)
|
|
336
|
+
|
|
337
|
+ if m is not None:
|
|
338
|
+ # geoloc found
|
|
339
|
+ lon = float(m.group(2))
|
|
340
|
+ lat = float(m.group(1))
|
|
341
|
+ zoom = int(m.group(3))
|
|
342
|
+
|
|
343
|
+ # TODO : map zoom to dlon / dlat
|
|
344
|
+ dlon = 0.000001
|
|
345
|
+ dlat = 0.000001
|
|
346
|
+
|
|
347
|
+ boundingbox = [round(lat - dlat, 7), round(lat + dlat, 7), round(lon - dlon, 7), round(lon + dlon, 7)]
|
|
348
|
+ map_url = url_map\
|
|
349
|
+ .replace('{latitude}', str(lat))\
|
|
350
|
+ .replace('{longitude}', str(lon))\
|
|
351
|
+ .replace('{zoom}', str(zoom+2))
|
|
352
|
+
|
|
353
|
+ geojson = {u'type': u'Point',
|
|
354
|
+ u'coordinates': [lon, lat]
|
|
355
|
+ }
|
|
356
|
+
|
|
357
|
+ # attributes
|
|
358
|
+ attributes = []
|
|
359
|
+ add_attributes(attributes, property_address, extract_text_from_dom(result, map_address_xpath))
|
|
360
|
+ add_attributes(attributes, property_phone, extract_text_from_dom(result, map_phone_xpath))
|
|
361
|
+
|
|
362
|
+ # title / content / url
|
|
363
|
+ website_title = extract_text_from_dom(result, map_website_title_xpath)
|
|
364
|
+ content = extract_text_from_dom(result, content_xpath)
|
|
365
|
+ website_url = parse_url(extract_text_from_dom(result, map_website_url_xpath), google_hostname)
|
|
366
|
+
|
|
367
|
+ # add an infobox if there is a website
|
|
368
|
+ if website_url is not None:
|
|
369
|
+ results.append({'infobox': website_title,
|
|
370
|
+ 'id': website_url,
|
|
371
|
+ 'content': content,
|
|
372
|
+ 'attributes': attributes,
|
|
373
|
+ 'urls': [
|
|
374
|
+ {'title': url_get_label(website_url), 'url': website_url},
|
|
375
|
+ {'title': property_location, 'url': map_url}
|
|
376
|
+ ]
|
|
377
|
+ })
|
|
378
|
+
|
|
379
|
+ # usefull because user can see the map directly into searx
|
|
380
|
+ results.append({'template': 'map.html',
|
|
381
|
+ 'title': website_title,
|
|
382
|
+ 'content': (content + '<br />' if content is not None else '')
|
|
383
|
+ + attributes_to_html(attributes),
|
|
384
|
+ 'longitude': lon,
|
|
385
|
+ 'latitude': lat,
|
|
386
|
+ 'boundingbox': boundingbox,
|
|
387
|
+ 'geojson': geojson,
|
|
388
|
+ 'url': website_url if website_url is not None else map_url
|
|
389
|
+ })
|
|
390
|
+ return results
|
|
391
|
+
|
|
392
|
+
|
|
393
|
+def add_attributes(attributes, name, value):
|
|
394
|
+ if value is not None and len(value) > 0:
|
|
395
|
+ attributes.append({'label': name, 'value': value})
|
|
396
|
+
|
|
397
|
+
|
|
398
|
+def attributes_to_html(attributes):
|
|
399
|
+ retval = '<table class="table table-striped">'
|
|
400
|
+ for a in attributes:
|
|
401
|
+ retval = retval + '<tr><th>' + a.get('label') + '</th><td>' + a.get('value') + '</td></tr>'
|
|
402
|
+ retval = retval + '</table>'
|
|
403
|
+ return retval
|