|
@@ -56,10 +56,14 @@ def response(resp):
|
56
|
56
|
link = result.xpath('.//div[@class="newstitle"]/a')[0]
|
57
|
57
|
url = link.attrib.get('href')
|
58
|
58
|
title = ' '.join(link.xpath('.//text()'))
|
59
|
|
- content = escape(' '.join(result.xpath('.//div[@class="sn_txt"]/div//span[@class="sn_snip"]//text()')))
|
60
|
|
-
|
|
59
|
+ contentXPath = result.xpath('.//div[@class="sn_txt"]/div//span[@class="sn_snip"]//text()')
|
|
60
|
+ if contentXPath != None:
|
|
61
|
+ content = escape(' '.join(contentXPath))
|
|
62
|
+
|
61
|
63
|
# parse publishedDate
|
62
|
|
- publishedDate = escape(' '.join(result.xpath('.//div[@class="sn_txt"]/div//span[@class="sn_ST"]//span[@class="sn_tm"]//text()')))
|
|
64
|
+ publishedDateXPath = result.xpath('.//div[@class="sn_txt"]/div//span[contains(@class,"sn_ST")]//span[contains(@class,"sn_tm")]//text()')
|
|
65
|
+ if publishedDateXPath != None:
|
|
66
|
+ publishedDate = escape(' '.join(publishedDateXPath))
|
63
|
67
|
|
64
|
68
|
if re.match("^[0-9]+ minute(s|) ago$", publishedDate):
|
65
|
69
|
timeNumbers = re.findall(r'\d+', publishedDate)
|
|
@@ -74,9 +78,18 @@ def response(resp):
|
74
|
78
|
publishedDate = datetime.now()\
|
75
|
79
|
- timedelta(hours=int(timeNumbers[0]))\
|
76
|
80
|
- timedelta(minutes=int(timeNumbers[1]))
|
|
81
|
+ elif re.match("^[0-9]+ day(s|) ago$", publishedDate):
|
|
82
|
+ timeNumbers = re.findall(r'\d+', publishedDate)
|
|
83
|
+ publishedDate = datetime.now()\
|
|
84
|
+ - timedelta(days=int(timeNumbers[0]))
|
77
|
85
|
else:
|
78
|
|
- publishedDate = parser.parse(publishedDate)
|
79
|
|
-
|
|
86
|
+ try:
|
|
87
|
+ # FIXME use params['language'] to parse either mm/dd or dd/mm
|
|
88
|
+ publishedDate = parser.parse(publishedDate, dayfirst=False)
|
|
89
|
+ except TypeError:
|
|
90
|
+ # FIXME
|
|
91
|
+ publishedDate = datetime.now()
|
|
92
|
+
|
80
|
93
|
# append result
|
81
|
94
|
results.append({'url': url,
|
82
|
95
|
'title': title,
|