瀏覽代碼

update bing engines and fix bing_news

Thomas Pointhuber 10 年之前
父節點
當前提交
4b1e0423a0
共有 2 個檔案被更改,包括 92 行新增23 行删除
  1. 39
    6
      searx/engines/bing.py
  2. 53
    17
      searx/engines/bing_news.py

+ 39
- 6
searx/engines/bing.py 查看文件

1
+## Bing (Web)
2
+# 
3
+# @website     https://www.bing.com
4
+# @provide-api yes (http://datamarket.azure.com/dataset/bing/search), max. 5000 query/month
5
+# 
6
+# @using-api   no (because of query limit)
7
+# @results     HTML (using search portal)
8
+# @stable      no (HTML can change)
9
+# @parse       url, title, content
10
+#
11
+# @todo        publishedDate
12
+
1
 from urllib import urlencode
13
 from urllib import urlencode
2
 from cgi import escape
14
 from cgi import escape
3
 from lxml import html
15
 from lxml import html
4
 
16
 
5
-base_url = 'http://www.bing.com/'
6
-search_string = 'search?{query}&first={offset}'
17
+# engine dependent config
18
+categories = ['general']
7
 paging = True
19
 paging = True
8
 language_support = True
20
 language_support = True
9
 
21
 
22
+# search-url
23
+base_url = 'https://www.bing.com/'
24
+search_string = 'search?{query}&first={offset}'
10
 
25
 
26
+# do search-request
11
 def request(query, params):
27
 def request(query, params):
12
     offset = (params['pageno'] - 1) * 10 + 1
28
     offset = (params['pageno'] - 1) * 10 + 1
29
+
13
     if params['language'] == 'all':
30
     if params['language'] == 'all':
14
         language = 'en-US'
31
         language = 'en-US'
15
     else:
32
     else:
16
         language = params['language'].replace('_', '-')
33
         language = params['language'].replace('_', '-')
34
+
17
     search_path = search_string.format(
35
     search_path = search_string.format(
18
         query=urlencode({'q': query, 'setmkt': language}),
36
         query=urlencode({'q': query, 'setmkt': language}),
19
         offset=offset)
37
         offset=offset)
20
 
38
 
21
     params['cookies']['SRCHHPGUSR'] = \
39
     params['cookies']['SRCHHPGUSR'] = \
22
         'NEWWND=0&NRSLT=-1&SRCHLANG=' + language.split('-')[0]
40
         'NEWWND=0&NRSLT=-1&SRCHLANG=' + language.split('-')[0]
23
-    #if params['category'] == 'images':
24
-    #    params['url'] = base_url + 'images/' + search_path
41
+
25
     params['url'] = base_url + search_path
42
     params['url'] = base_url + search_path
26
     return params
43
     return params
27
 
44
 
28
 
45
 
46
+# get response from search-request
29
 def response(resp):
47
 def response(resp):
30
     results = []
48
     results = []
49
+
31
     dom = html.fromstring(resp.content)
50
     dom = html.fromstring(resp.content)
51
+
52
+    # parse results
32
     for result in dom.xpath('//div[@class="sa_cc"]'):
53
     for result in dom.xpath('//div[@class="sa_cc"]'):
33
         link = result.xpath('.//h3/a')[0]
54
         link = result.xpath('.//h3/a')[0]
34
         url = link.attrib.get('href')
55
         url = link.attrib.get('href')
35
         title = ' '.join(link.xpath('.//text()'))
56
         title = ' '.join(link.xpath('.//text()'))
36
         content = escape(' '.join(result.xpath('.//p//text()')))
57
         content = escape(' '.join(result.xpath('.//p//text()')))
37
-        results.append({'url': url, 'title': title, 'content': content})
38
 
58
 
59
+        # append result
60
+        results.append({'url': url, 
61
+                        'title': title, 
62
+                        'content': content})
63
+
64
+    # return results if something is found
39
     if results:
65
     if results:
40
         return results
66
         return results
41
 
67
 
68
+    # parse results again if nothing is found yet
42
     for result in dom.xpath('//li[@class="b_algo"]'):
69
     for result in dom.xpath('//li[@class="b_algo"]'):
43
         link = result.xpath('.//h2/a')[0]
70
         link = result.xpath('.//h2/a')[0]
44
         url = link.attrib.get('href')
71
         url = link.attrib.get('href')
45
         title = ' '.join(link.xpath('.//text()'))
72
         title = ' '.join(link.xpath('.//text()'))
46
         content = escape(' '.join(result.xpath('.//p//text()')))
73
         content = escape(' '.join(result.xpath('.//p//text()')))
47
-        results.append({'url': url, 'title': title, 'content': content})
74
+
75
+        # append result
76
+        results.append({'url': url, 
77
+                        'title': title, 
78
+                        'content': content})
79
+
80
+    # return results
48
     return results
81
     return results

+ 53
- 17
searx/engines/bing_news.py 查看文件

1
+## Bing (News)
2
+# 
3
+# @website     https://www.bing.com/news
4
+# @provide-api yes (http://datamarket.azure.com/dataset/bing/search), max. 5000 query/month
5
+# 
6
+# @using-api   no (because of query limit)
7
+# @results     HTML (using search portal)
8
+# @stable      no (HTML can change)
9
+# @parse       url, title, content, publishedDate
10
+
1
 from urllib import urlencode
11
 from urllib import urlencode
2
 from cgi import escape
12
 from cgi import escape
3
 from lxml import html
13
 from lxml import html
14
+from datetime import datetime, timedelta
15
+from dateutil import parser
16
+import re
4
 
17
 
18
+# engine dependent config
5
 categories = ['news']
19
 categories = ['news']
6
-
7
-base_url = 'http://www.bing.com/'
8
-search_string = 'news/search?{query}&first={offset}'
9
 paging = True
20
 paging = True
10
 language_support = True
21
 language_support = True
11
 
22
 
23
+# search-url
24
+base_url = 'https://www.bing.com/'
25
+search_string = 'news/search?{query}&first={offset}'
12
 
26
 
27
+# do search-request
13
 def request(query, params):
28
 def request(query, params):
14
     offset = (params['pageno'] - 1) * 10 + 1
29
     offset = (params['pageno'] - 1) * 10 + 1
30
+
15
     if params['language'] == 'all':
31
     if params['language'] == 'all':
16
         language = 'en-US'
32
         language = 'en-US'
17
     else:
33
     else:
18
         language = params['language'].replace('_', '-')
34
         language = params['language'].replace('_', '-')
35
+
19
     search_path = search_string.format(
36
     search_path = search_string.format(
20
         query=urlencode({'q': query, 'setmkt': language}),
37
         query=urlencode({'q': query, 'setmkt': language}),
21
         offset=offset)
38
         offset=offset)
22
 
39
 
23
     params['cookies']['SRCHHPGUSR'] = \
40
     params['cookies']['SRCHHPGUSR'] = \
24
         'NEWWND=0&NRSLT=-1&SRCHLANG=' + language.split('-')[0]
41
         'NEWWND=0&NRSLT=-1&SRCHLANG=' + language.split('-')[0]
25
-    #if params['category'] == 'images':
26
-    # params['url'] = base_url + 'images/' + search_path
42
+
27
     params['url'] = base_url + search_path
43
     params['url'] = base_url + search_path
28
     return params
44
     return params
29
 
45
 
30
 
46
 
47
+# get response from search-request
31
 def response(resp):
48
 def response(resp):
32
     results = []
49
     results = []
50
+
33
     dom = html.fromstring(resp.content)
51
     dom = html.fromstring(resp.content)
34
-    for result in dom.xpath('//div[@class="sa_cc"]'):
35
-        link = result.xpath('.//h3/a')[0]
52
+
53
+    # parse results
54
+    for result in dom.xpath('//div[@class="sn_r"]'):
55
+        link = result.xpath('.//div[@class="newstitle"]/a')[0]
36
         url = link.attrib.get('href')
56
         url = link.attrib.get('href')
37
         title = ' '.join(link.xpath('.//text()'))
57
         title = ' '.join(link.xpath('.//text()'))
38
-        content = escape(' '.join(result.xpath('.//p//text()')))
39
-        results.append({'url': url, 'title': title, 'content': content})
58
+        content = escape(' '.join(result.xpath('.//div[@class="sn_txt"]/div//span[@class="sn_snip"]//text()')))
59
+        
60
+        # parse publishedDate
61
+        publishedDate = escape(' '.join(result.xpath('.//div[@class="sn_txt"]/div//span[@class="sn_ST"]//span[@class="sn_tm"]//text()')))
40
 
62
 
41
-    if results:
42
-        return results
63
+        if re.match("^[0-9]+ minute(s|) ago$", publishedDate):
64
+            timeNumbers = re.findall(r'\d+', publishedDate)
65
+            publishedDate = datetime.now()\
66
+                - timedelta(minutes=int(timeNumbers[0]))
67
+        elif re.match("^[0-9]+ hour(s|) ago$", publishedDate):
68
+            timeNumbers = re.findall(r'\d+', publishedDate)
69
+            publishedDate = datetime.now()\
70
+                - timedelta(hours=int(timeNumbers[0]))
71
+        elif re.match("^[0-9]+ hour(s|), [0-9]+ minute(s|) ago$", publishedDate):
72
+            timeNumbers = re.findall(r'\d+', publishedDate)
73
+            publishedDate = datetime.now()\
74
+                - timedelta(hours=int(timeNumbers[0]))\
75
+                - timedelta(minutes=int(timeNumbers[1]))
76
+        else:
77
+            publishedDate = parser.parse(publishedDate)  
43
 
78
 
44
-    for result in dom.xpath('//li[@class="b_algo"]'):
45
-        link = result.xpath('.//h2/a')[0]
46
-        url = link.attrib.get('href')
47
-        title = ' '.join(link.xpath('.//text()'))
48
-        content = escape(' '.join(result.xpath('.//p//text()')))
49
-        results.append({'url': url, 'title': title, 'content': content})
79
+        # append result
80
+        results.append({'url': url, 
81
+                        'title': title, 
82
+                        'publishedDate': publishedDate,
83
+                        'content': content})
84
+
85
+    # return results
50
     return results
86
     return results