|
@@ -4,6 +4,7 @@ from urllib import urlencode
|
4
|
4
|
from lxml import html
|
5
|
5
|
from searx.engines.xpath import extract_text, extract_url
|
6
|
6
|
from searx.engines.yahoo import parse_url
|
|
7
|
+from datetime import datetime
|
7
|
8
|
|
8
|
9
|
categories = ['news']
|
9
|
10
|
search_url = 'http://news.search.yahoo.com/search?{query}&b={offset}'
|
|
@@ -11,6 +12,7 @@ results_xpath = '//div[@class="res"]'
|
11
|
12
|
url_xpath = './/h3/a/@href'
|
12
|
13
|
title_xpath = './/h3/a'
|
13
|
14
|
content_xpath = './/div[@class="abstr"]'
|
|
15
|
+publishedDate_xpath = './/span[@class="timestamp"]'
|
14
|
16
|
suggestion_xpath = '//div[@id="satat"]//a'
|
15
|
17
|
|
16
|
18
|
paging = True
|
|
@@ -37,7 +39,10 @@ def response(resp):
|
37
|
39
|
url = parse_url(extract_url(result.xpath(url_xpath), search_url))
|
38
|
40
|
title = extract_text(result.xpath(title_xpath)[0])
|
39
|
41
|
content = extract_text(result.xpath(content_xpath)[0])
|
40
|
|
- results.append({'url': url, 'title': title, 'content': content})
|
|
42
|
+# Feb 20 04:02am
|
|
43
|
+ publishedDate = datetime.strptime(extract_text(result.xpath(publishedDate_xpath)[0]),"%b %d %H:%M%p")
|
|
44
|
+ #publishedDate.replace(year=2014)
|
|
45
|
+ results.append({'url': url, 'title': title, 'content': content,'publishedDate':publishedDate})
|
41
|
46
|
|
42
|
47
|
if not suggestion_xpath:
|
43
|
48
|
return results
|