Преглед на файлове

[enh] google engine : avoid some "sorry google" by adding another cookie : NID. This cookie is specific by hostname.

This allow to send request to google.* (according to the search language).
Before this commit, request in other languages than english was sent to www.google.com which was redirected to www.google.*
The PREF is still use on the www.google.com domain.
Alexandre Flament преди 9 години
родител
ревизия
39ff21237c
променени са 2 файла, в които са добавени 150 реда и са изтрити 21 реда
  1. 126
    16
      searx/engines/google.py
  2. 24
    5
      searx/tests/engines/test_google.py

+ 126
- 16
searx/engines/google.py Целия файл

@@ -14,18 +14,76 @@ from lxml import html
14 14
 from searx.poolrequests import get
15 15
 from searx.engines.xpath import extract_text, extract_url
16 16
 
17
+
17 18
 # engine dependent config
18 19
 categories = ['general']
19 20
 paging = True
20 21
 language_support = True
22
+use_locale_domain = True
23
+
24
+# based on https://en.wikipedia.org/wiki/List_of_Google_domains and tests
25
+default_hostname = 'www.google.com'
26
+
27
+country_to_hostname = {
28
+    'BG': 'www.google.bg',  # Bulgaria
29
+    'CZ': 'www.google.cz',  # Czech Republic
30
+    'DE': 'www.google.de',  # Germany
31
+    'DK': 'www.google.dk',  # Denmark
32
+    'AT': 'www.google.at',  # Austria
33
+    'CH': 'www.google.ch',  # Switzerland
34
+    'GR': 'www.google.gr',  # Greece
35
+    'AU': 'www.google.com.au',  # Australia
36
+    'CA': 'www.google.ca',  # Canada
37
+    'GB': 'www.google.co.uk',  # United Kingdom
38
+    'ID': 'www.google.co.id',  # Indonesia
39
+    'IE': 'www.google.ie',  # Ireland
40
+    'IN': 'www.google.co.in',  # India
41
+    'MY': 'www.google.com.my',  # Malaysia
42
+    'NZ': 'www.google.co.nz',  # New Zealand
43
+    'PH': 'www.google.com.ph',  # Philippines
44
+    'SG': 'www.google.com.sg',  # Singapore
45
+    # 'US': 'www.google.us',  # United State, redirect to .com
46
+    'ZA': 'www.google.co.za',  # South Africa
47
+    'AR': 'www.google.com.ar',  # Argentina
48
+    'CL': 'www.google.cl',  # Chile
49
+    'ES': 'www.google.es',  # Span
50
+    'MX': 'www.google.com.mx',  # Mexico
51
+    'EE': 'www.google.ee',  # Estonia
52
+    'FI': 'www.google.fi',  # Finland
53
+    'BE': 'www.google.be',  # Belgium
54
+    'FR': 'www.google.fr',  # France
55
+    'IL': 'www.google.co.il',  # Israel
56
+    'HR': 'www.google.hr',  # Croatia
57
+    'HU': 'www.google.hu',  # Hungary
58
+    'IT': 'www.google.it',  # Italy
59
+    'JP': 'www.google.co.jp',  # Japan
60
+    'KR': 'www.google.co.kr',  # South Korean
61
+    'LT': 'www.google.lt',  # Lithuania
62
+    'LV': 'www.google.lv',  # Latvia
63
+    'NO': 'www.google.no',  # Norway
64
+    'NL': 'www.google.nl',  # Netherlands
65
+    'PL': 'www.google.pl',  # Poland
66
+    'BR': 'www.google.com.br',  # Brazil
67
+    'PT': 'www.google.pt',  # Portugal
68
+    'RO': 'www.google.ro',  # Romania
69
+    'RU': 'www.google.ru',  # Russia
70
+    'SK': 'www.google.sk',  # Slovakia
71
+    'SL': 'www.google.si',  # Slovenia (SL -> si)
72
+    'SE': 'www.google.se',  # Sweden
73
+    'TH': 'www.google.co.th',  # Thailand
74
+    'TR': 'www.google.com.tr',  # Turkey
75
+    'UA': 'www.google.com.ua',  # Ikraine
76
+    # 'CN': 'www.google.cn',  # China, only from china ?
77
+    'HK': 'www.google.com.hk',  # Hong kong
78
+    'TW': 'www.google.com.tw'  # Taiwan
79
+}
21 80
 
22 81
 # search-url
23
-google_hostname = 'www.google.com'
24 82
 search_path = '/search'
83
+maps_path = '/maps/'
25 84
 redirect_path = '/url'
26 85
 images_path = '/images'
27
-search_url = ('https://' +
28
-              google_hostname +
86
+search_url = ('https://{hostname}' +
29 87
               search_path +
30 88
               '?{query}&start={offset}&gbv=1')
31 89
 
@@ -34,6 +92,7 @@ results_xpath = '//li[@class="g"]'
34 92
 url_xpath = './/h3/a/@href'
35 93
 title_xpath = './/h3'
36 94
 content_xpath = './/span[@class="st"]'
95
+content_misc_xpath = './/div[@class="f slp"]'
37 96
 suggestion_xpath = '//p[@class="_Bmc"]'
38 97
 
39 98
 images_xpath = './/div/a'
@@ -41,6 +100,7 @@ image_url_xpath = './@href'
41 100
 image_img_src_xpath = './img/@src'
42 101
 
43 102
 pref_cookie = ''
103
+nid_cookie = {}
44 104
 
45 105
 
46 106
 # see https://support.google.com/websearch/answer/873?hl=en
@@ -52,8 +112,16 @@ def get_google_pref_cookie():
52 112
     return pref_cookie
53 113
 
54 114
 
115
+def get_google_nid_cookie(google_hostname):
116
+    global nid_cookie
117
+    if google_hostname not in nid_cookie:
118
+        resp = get('https://' + google_hostname)
119
+        nid_cookie[google_hostname] = resp.cookies.get("NID", None)
120
+    return nid_cookie[google_hostname]
121
+
122
+
55 123
 # remove google-specific tracking-url
56
-def parse_url(url_string):
124
+def parse_url(url_string, google_hostname):
57 125
     parsed_url = urlparse(url_string)
58 126
     if (parsed_url.netloc in [google_hostname, '']
59 127
             and parsed_url.path == redirect_path):
@@ -63,21 +131,45 @@ def parse_url(url_string):
63 131
         return url_string
64 132
 
65 133
 
134
+# returns extract_text on the first result selected by the xpath or None
135
+def extract_text_from_dom(result, xpath):
136
+    r = result.xpath(xpath)
137
+    if len(r) > 0:
138
+        return extract_text(r[0])
139
+    return None
140
+
141
+
66 142
 # do search-request
67 143
 def request(query, params):
68 144
     offset = (params['pageno'] - 1) * 10
69 145
 
70 146
     if params['language'] == 'all':
71 147
         language = 'en'
148
+        country = 'US'
72 149
     else:
73
-        language = params['language'].replace('_', '-').lower()
150
+        language_array = params['language'].lower().split('_')
151
+        if len(language_array) == 2:
152
+            country = language_array[1]
153
+        else:
154
+            country = '  '
155
+        language = language_array[0] + ',' + language_array[0] + '-' + country
156
+
157
+    if use_locale_domain:
158
+        google_hostname = country_to_hostname.get(country.upper(), default_hostname)
159
+    else:
160
+        google_hostname = default_hostname
74 161
 
75 162
     params['url'] = search_url.format(offset=offset,
76
-                                      query=urlencode({'q': query}))
163
+                                      query=urlencode({'q': query}),
164
+                                      hostname=google_hostname)
77 165
 
78 166
     params['headers']['Accept-Language'] = language
79
-    if language.startswith('en'):
167
+    params['headers']['Accept'] = 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'
168
+    if google_hostname == default_hostname:
80 169
         params['cookies']['PREF'] = get_google_pref_cookie()
170
+    params['cookies']['NID'] = get_google_nid_cookie(google_hostname)
171
+
172
+    params['google_hostname'] = google_hostname
81 173
 
82 174
     return params
83 175
 
@@ -86,17 +178,30 @@ def request(query, params):
86 178
 def response(resp):
87 179
     results = []
88 180
 
181
+    # detect google sorry
182
+    resp_url = urlparse(resp.url)
183
+    if resp_url.netloc == 'sorry.google.com' or resp_url.path == '/sorry/IndexRedirect':
184
+        raise RuntimeWarning('sorry.google.com')
185
+
186
+    # which hostname ?
187
+    google_hostname = resp.search_params.get('google_hostname')
188
+    google_url = "https://" + google_hostname
189
+
190
+    # convert the text to dom
89 191
     dom = html.fromstring(resp.text)
90 192
 
91 193
     # parse results
92 194
     for result in dom.xpath(results_xpath):
93 195
         title = extract_text(result.xpath(title_xpath)[0])
94 196
         try:
95
-            url = parse_url(extract_url(result.xpath(url_xpath), search_url))
96
-            parsed_url = urlparse(url)
197
+            url = parse_url(extract_url(result.xpath(url_xpath), google_url), google_hostname)
198
+            parsed_url = urlparse(url, google_hostname)
97 199
             if (parsed_url.netloc == google_hostname
98
-                    and parsed_url.path == search_path):
99
-                # remove the link to google news
200
+                and (parsed_url.path == search_path
201
+                     or parsed_url.path.startswith(maps_path))):
202
+                # remove the link to google news and google maps
203
+                # FIXME : sometimes the URL is https://maps.google.*/maps
204
+                # no consequence, the result trigger an exception after which is ignored
100 205
                 continue
101 206
 
102 207
             # images result
@@ -104,16 +209,21 @@ def response(resp):
104 209
                     and parsed_url.path == images_path):
105 210
                 # only thumbnail image provided,
106 211
                 # so skipping image results
107
-                # results = results + parse_images(result)
212
+                # results = results + parse_images(result, google_hostname)
108 213
                 pass
109 214
             else:
110 215
                 # normal result
111
-                content = extract_text(result.xpath(content_xpath)[0])
216
+                content = extract_text_from_dom(result, content_xpath)
217
+                if content is None:
218
+                    continue
219
+                content_misc = extract_text_from_dom(result, content_misc_xpath)
220
+                if content_misc is not None:
221
+                    content = content_misc + "<br />" + content
112 222
                 # append result
113 223
                 results.append({'url': url,
114 224
                                 'title': title,
115 225
                                 'content': content})
116
-        except:
226
+        except Exception:
117 227
             continue
118 228
 
119 229
     # parse suggestion
@@ -125,10 +235,10 @@ def response(resp):
125 235
     return results
126 236
 
127 237
 
128
-def parse_images(result):
238
+def parse_images(result, google_hostname):
129 239
     results = []
130 240
     for image in result.xpath(images_xpath):
131
-        url = parse_url(extract_text(image.xpath(image_url_xpath)[0]))
241
+        url = parse_url(extract_text(image.xpath(image_url_xpath)[0]), google_hostname)
132 242
         img_src = extract_text(image.xpath(image_img_src_xpath)[0])
133 243
 
134 244
         # append result

+ 24
- 5
searx/tests/engines/test_google.py Целия файл

@@ -8,6 +8,12 @@ from searx.testing import SearxTestCase
8 8
 
9 9
 class TestGoogleEngine(SearxTestCase):
10 10
 
11
+    def mock_response(self, text):
12
+        response = mock.Mock(text=text, url='https://www.google.com/search?q=test&start=0&gbv=1')
13
+        response.search_params = mock.Mock()
14
+        response.search_params.get = mock.Mock(return_value='www.google.com')
15
+        return response
16
+
11 17
     def test_request(self):
12 18
         query = 'test_query'
13 19
         dicto = defaultdict(dict)
@@ -16,14 +22,17 @@ class TestGoogleEngine(SearxTestCase):
16 22
         params = google.request(query, dicto)
17 23
         self.assertIn('url', params)
18 24
         self.assertIn(query, params['url'])
19
-        self.assertIn('google.com', params['url'])
25
+        self.assertIn('google.fr', params['url'])
20 26
         self.assertNotIn('PREF', params['cookies'])
27
+        self.assertIn('NID', params['cookies'])
21 28
         self.assertIn('fr', params['headers']['Accept-Language'])
22 29
 
23 30
         dicto['language'] = 'all'
24 31
         params = google.request(query, dicto)
32
+        self.assertIn('google.com', params['url'])
25 33
         self.assertIn('en', params['headers']['Accept-Language'])
26 34
         self.assertIn('PREF', params['cookies'])
35
+        self.assertIn('NID', params['cookies'])
27 36
 
28 37
     def test_response(self):
29 38
         self.assertRaises(AttributeError, google.response, None)
@@ -31,7 +40,7 @@ class TestGoogleEngine(SearxTestCase):
31 40
         self.assertRaises(AttributeError, google.response, '')
32 41
         self.assertRaises(AttributeError, google.response, '[]')
33 42
 
34
-        response = mock.Mock(text='<html></html>')
43
+        response = self.mock_response('<html></html>')
35 44
         self.assertEqual(google.response(response), [])
36 45
 
37 46
         html = """
@@ -124,7 +133,7 @@ class TestGoogleEngine(SearxTestCase):
124 133
             </a>
125 134
         </p>
126 135
         """
127
-        response = mock.Mock(text=html)
136
+        response = self.mock_response(html)
128 137
         results = google.response(response)
129 138
         self.assertEqual(type(results), list)
130 139
         self.assertEqual(len(results), 2)
@@ -137,11 +146,21 @@ class TestGoogleEngine(SearxTestCase):
137 146
         <li class="b_algo" u="0|5109|4755453613245655|UAGjXgIrPH5yh-o5oNHRx_3Zta87f_QO">
138 147
         </li>
139 148
         """
140
-        response = mock.Mock(text=html)
149
+        response = self.mock_response(html)
141 150
         results = google.response(response)
142 151
         self.assertEqual(type(results), list)
143 152
         self.assertEqual(len(results), 0)
144 153
 
154
+        response = mock.Mock(text='<html></html>', url='https://sorry.google.com')
155
+        response.search_params = mock.Mock()
156
+        response.search_params.get = mock.Mock(return_value='www.google.com')
157
+        self.assertRaises(RuntimeWarning, google.response, response)
158
+
159
+        response = mock.Mock(text='<html></html>', url='https://www.google.com/sorry/IndexRedirect')
160
+        response.search_params = mock.Mock()
161
+        response.search_params.get = mock.Mock(return_value='www.google.com')
162
+        self.assertRaises(RuntimeWarning, google.response, response)
163
+
145 164
     def test_parse_images(self):
146 165
         html = """
147 166
         <li>
@@ -154,7 +173,7 @@ class TestGoogleEngine(SearxTestCase):
154 173
         </li>
155 174
         """
156 175
         dom = lxml.html.fromstring(html)
157
-        results = google.parse_images(dom)
176
+        results = google.parse_images(dom, 'www.google.com')
158 177
         self.assertEqual(type(results), list)
159 178
         self.assertEqual(len(results), 1)
160 179
         self.assertEqual(results[0]['url'], 'http://this.is.the.url/')