Sfoglia il codice sorgente

Merge pull request #99 from dalf/master

[enh] stick results from the same category and template and [fix] rewrite the google engine
Adam Tauber 10 anni fa
parent
commit
090254feca
2 ha cambiato i file con 120 aggiunte e 26 eliminazioni
  1. 82
    24
      searx/engines/google.py
  2. 38
    2
      searx/search.py

+ 82
- 24
searx/engines/google.py Vedi File

@@ -1,15 +1,17 @@
1 1
 ## Google (Web)
2 2
 # 
3 3
 # @website     https://www.google.com
4
-# @provide-api yes (https://developers.google.com/web-search/docs/), deprecated!
4
+# @provide-api yes (https://developers.google.com/custom-search/)
5 5
 # 
6
-# @using-api   yes
7
-# @results     JSON
8
-# @stable      yes (but deprecated)
9
-# @parse       url, title, content
6
+# @using-api   no
7
+# @results     HTML
8
+# @stable      no (HTML can change)
9
+# @parse       url, title, content, suggestion
10 10
 
11 11
 from urllib import urlencode
12
-from json import loads
12
+from urlparse import unquote,urlparse,parse_qsl
13
+from lxml import html
14
+from searx.engines.xpath import extract_text, extract_url
13 15
 
14 16
 # engine dependent config
15 17
 categories = ['general']
@@ -17,21 +19,45 @@ paging = True
17 19
 language_support = True
18 20
 
19 21
 # search-url
20
-url = 'https://ajax.googleapis.com/'
21
-search_url = url + 'ajax/services/search/web?v=2.0&start={offset}&rsz=large&safe=off&filter=off&{query}&hl={language}'  # noqa
22
+google_hostname = 'www.google.com'
23
+search_path = '/search'
24
+redirect_path = '/url'
25
+images_path = '/images'
26
+search_url = 'https://' + google_hostname + search_path + '?{query}&start={offset}&gbv=1'
22 27
 
28
+# specific xpath variables
29
+results_xpath= '//li[@class="g"]'
30
+url_xpath = './/h3/a/@href'
31
+title_xpath = './/h3'
32
+content_xpath = './/span[@class="st"]'
33
+suggestion_xpath = '//p[@class="_Bmc"]'
34
+
35
+images_xpath = './/div/a'
36
+image_url_xpath = './@href'
37
+image_img_src_xpath = './img/@src'
38
+
39
+# remove google-specific tracking-url
40
+def parse_url(url_string):
41
+    parsed_url = urlparse(url_string)
42
+    if parsed_url.netloc in [google_hostname, ''] and parsed_url.path==redirect_path:
43
+        query = dict(parse_qsl(parsed_url.query))
44
+        return query['q']
45
+    else:
46
+        return url_string
23 47
 
24 48
 # do search-request
25 49
 def request(query, params):
26
-    offset = (params['pageno'] - 1) * 8
50
+    offset = (params['pageno'] - 1) * 10
27 51
 
28
-    language = 'en-US'
29
-    if params['language'] != 'all':
30
-        language = params['language'].replace('_', '-')
52
+    if params['language'] == 'all':
53
+        language = 'en'
54
+    else:
55
+        language = params['language'].replace('_','-').lower()
31 56
 
32 57
     params['url'] = search_url.format(offset=offset,
33
-                                      query=urlencode({'q': query}),
34
-                                      language=language)
58
+                                      query=urlencode({'q': query}))
59
+
60
+    params['headers']['Accept-Language'] = language
35 61
 
36 62
     return params
37 63
 
@@ -40,18 +66,50 @@ def request(query, params):
40 66
 def response(resp):
41 67
     results = []
42 68
 
43
-    search_res = loads(resp.text)
44
-
45
-    # return empty array if there are no results
46
-    if not search_res.get('responseData', {}).get('results'):
47
-        return []
69
+    dom = html.fromstring(resp.text)
48 70
 
49 71
     # parse results
50
-    for result in search_res['responseData']['results']:
51
-        # append result
52
-        results.append({'url': result['unescapedUrl'],
53
-                        'title': result['titleNoFormatting'],
54
-                        'content': result['content']})
72
+    for result in dom.xpath(results_xpath):
73
+        title = extract_text(result.xpath(title_xpath)[0])
74
+        try:
75
+            url = parse_url(extract_url(result.xpath(url_xpath), search_url))
76
+            parsed_url = urlparse(url)
77
+            if parsed_url.netloc==google_hostname and parsed_url.path==search_path:
78
+                # remove the link to google news
79
+                continue
80
+
81
+            if parsed_url.netloc==google_hostname and parsed_url.path==images_path:
82
+                # images result
83
+                results = results + parse_images(result)
84
+            else:
85
+                # normal result
86
+                content = extract_text(result.xpath(content_xpath)[0])
87
+                # append result
88
+                results.append({'url': url, 
89
+                                'title': title, 
90
+                                'content': content})
91
+        except:
92
+            continue
93
+
94
+    # parse suggestion
95
+    for suggestion in dom.xpath(suggestion_xpath):
96
+        # append suggestion
97
+        results.append({'suggestion': extract_text(suggestion)})
55 98
 
56 99
     # return results
57 100
     return results
101
+
102
+def parse_images(result):
103
+    results = []
104
+    for image in result.xpath(images_xpath):
105
+        url = parse_url(extract_text(image.xpath(image_url_xpath)[0]))
106
+        img_src = extract_text(image.xpath(image_img_src_xpath)[0])
107
+        
108
+        # append result
109
+        results.append({'url': url,
110
+                        'title': '',
111
+                        'content': '',
112
+                        'img_src': img_src,
113
+                        'template': 'images.html'})
114
+
115
+    return results

+ 38
- 2
searx/search.py Vedi File

@@ -49,7 +49,8 @@ def score_results(results):
49 49
     flat_len = len(flat_res)
50 50
     engines_len = len(results)
51 51
     results = []
52
-    # deduplication + scoring
52
+
53
+    # pass 1: deduplication + scoring
53 54
     for i, res in enumerate(flat_res):
54 55
 
55 56
         res['parsed_url'] = urlparse(res['url'])
@@ -90,7 +91,42 @@ def score_results(results):
90 91
         else:
91 92
             res['score'] = score
92 93
             results.append(res)
93
-    return sorted(results, key=itemgetter('score'), reverse=True)
94
+    results = sorted(results, key=itemgetter('score'), reverse=True)
95
+
96
+    # pass 2 : group results by category and template
97
+    gresults = []
98
+    categoryPositions = {}
99
+
100
+    for i, res in enumerate(results):
101
+        # FIXME : handle more than one category per engine
102
+        category = engines[res['engine']].categories[0] + ':' + '' if 'template' not in res else res['template'] 
103
+
104
+        current = None if category not in categoryPositions else categoryPositions[category]
105
+
106
+        # group with previous results using the same category if the group can accept more result and is not too far from the current position
107
+        if current != None and (current['count'] > 0) and (len(gresults) - current['index'] < 20):
108
+            # group with the previous results using the same category with this one
109
+            index = current['index']
110
+            gresults.insert(index, res)
111
+
112
+            # update every index after the current one (including the current one)
113
+            for k in categoryPositions:
114
+                v = categoryPositions[k]['index']
115
+                if v >= index:
116
+                    categoryPositions[k]['index'] = v+1
117
+
118
+            # update this category
119
+            current['count'] -= 1
120
+
121
+        else:
122
+            # same category
123
+            gresults.append(res)
124
+
125
+            # update categoryIndex
126
+            categoryPositions[category] = { 'index' : len(gresults), 'count' : 8 }
127
+
128
+    # return gresults
129
+    return gresults
94 130
 
95 131
 
96 132
 class Search(object):