浏览代码

Merge pull request #167 from Cqoicebordel/minor-fixes

Minor fixes
Adam Tauber 10 年前
父节点
当前提交
3b672039aa
共有 2 个文件被更改,包括 10 次插入15 次删除
  1. 3
    0
      searx/engines/digg.py
  2. 7
    15
      searx/engines/vimeo.py

+ 3
- 0
searx/engines/digg.py 查看文件

44
 
44
 
45
     search_result = loads(resp.text)
45
     search_result = loads(resp.text)
46
 
46
 
47
+    if search_result['html'] == '':
48
+        return results
49
+
47
     dom = html.fromstring(search_result['html'])
50
     dom = html.fromstring(search_result['html'])
48
 
51
 
49
     # parse results
52
     # parse results

+ 7
- 15
searx/engines/vimeo.py 查看文件

13
 # @todo        set content-parameter with correct data
13
 # @todo        set content-parameter with correct data
14
 
14
 
15
 from urllib import urlencode
15
 from urllib import urlencode
16
-from HTMLParser import HTMLParser
17
 from lxml import html
16
 from lxml import html
18
-from searx.engines.xpath import extract_text
19
 from dateutil import parser
17
 from dateutil import parser
18
+from cgi import escape
20
 
19
 
21
 # engine dependent config
20
 # engine dependent config
22
 categories = ['videos']
21
 categories = ['videos']
23
 paging = True
22
 paging = True
24
 
23
 
25
 # search-url
24
 # search-url
26
-base_url = 'https://vimeo.com'
25
+base_url = 'http://vimeo.com'
27
 search_url = base_url + '/search/page:{pageno}?{query}'
26
 search_url = base_url + '/search/page:{pageno}?{query}'
28
 
27
 
29
 # specific xpath variables
28
 # specific xpath variables
29
+results_xpath = '//div[@id="browse_content"]/ol/li'
30
 url_xpath = './a/@href'
30
 url_xpath = './a/@href'
31
+title_xpath = './a/div[@class="data"]/p[@class="title"]'
31
 content_xpath = './a/img/@src'
32
 content_xpath = './a/img/@src'
32
-title_xpath = './a/div[@class="data"]/p[@class="title"]/text()'
33
-results_xpath = '//div[@id="browse_content"]/ol/li'
34
 publishedDate_xpath = './/p[@class="meta"]//attribute::datetime'
33
 publishedDate_xpath = './/p[@class="meta"]//attribute::datetime'
35
 
34
 
36
 
35
 
39
     params['url'] = search_url.format(pageno=params['pageno'],
38
     params['url'] = search_url.format(pageno=params['pageno'],
40
                                       query=urlencode({'q': query}))
39
                                       query=urlencode({'q': query}))
41
 
40
 
42
-    # TODO required?
43
-    params['cookies']['__utma'] =\
44
-        '00000000.000#0000000.0000000000.0000000000.0000000000.0'
45
-
46
     return params
41
     return params
47
 
42
 
48
 
43
 
52
 
47
 
53
     dom = html.fromstring(resp.text)
48
     dom = html.fromstring(resp.text)
54
 
49
 
55
-    p = HTMLParser()
56
-
57
     # parse results
50
     # parse results
58
     for result in dom.xpath(results_xpath):
51
     for result in dom.xpath(results_xpath):
59
         url = base_url + result.xpath(url_xpath)[0]
52
         url = base_url + result.xpath(url_xpath)[0]
60
-        title = p.unescape(extract_text(result.xpath(title_xpath)))
61
-        thumbnail = extract_text(result.xpath(content_xpath)[0])
62
-        publishedDate = parser.parse(extract_text(
63
-            result.xpath(publishedDate_xpath)[0]))
53
+        title = escape(html.tostring(result.xpath(title_xpath)[0], method='text', encoding='UTF-8').decode("utf-8"))
54
+        thumbnail = result.xpath(content_xpath)[0]
55
+        publishedDate = parser.parse(result.xpath(publishedDate_xpath)[0])
64
 
56
 
65
         # append result
57
         # append result
66
         results.append({'url': url,
58
         results.append({'url': url,