Sfoglia il codice sorgente

showing publishedDate for news

Thomas Pointhuber 11 anni fa
parent
commit
b88146d669

+ 9
- 0
searx/engines/google_news.py Vedi File

@@ -2,6 +2,7 @@
2 2
 
3 3
 from urllib import urlencode
4 4
 from json import loads
5
+from datetime import datetime, timedelta
5 6
 
6 7
 categories = ['news']
7 8
 
@@ -31,7 +32,15 @@ def response(resp):
31 32
         return []
32 33
 
33 34
     for result in search_res['responseData']['results']:
35
+# S.149 (159), library.pdf
36
+# datetime.strptime("Mon, 10 Mar 2014 16:26:15 -0700", "%a, %d %b %Y %H:%M:%S %z")
37
+#        publishedDate = parse(result['publishedDate'])
38
+        publishedDate = datetime.strptime(str.join(' ',result['publishedDate'].split(None)[0:5]), "%a, %d %b %Y %H:%M:%S")
39
+        #utc_offset = timedelta(result['publishedDate'].split(None)[5])  # local = utc + offset
40
+        #publishedDate = publishedDate + utc_offset
41
+
34 42
         results.append({'url': result['unescapedUrl'],
35 43
                         'title': result['titleNoFormatting'],
44
+						'publishedDate': publishedDate,
36 45
                         'content': result['content']})
37 46
     return results

+ 6
- 1
searx/engines/yahoo_news.py Vedi File

@@ -4,6 +4,7 @@ from urllib import urlencode
4 4
 from lxml import html
5 5
 from searx.engines.xpath import extract_text, extract_url
6 6
 from searx.engines.yahoo import parse_url
7
+from datetime import datetime
7 8
 
8 9
 categories = ['news']
9 10
 search_url = 'http://news.search.yahoo.com/search?{query}&b={offset}'
@@ -11,6 +12,7 @@ results_xpath = '//div[@class="res"]'
11 12
 url_xpath = './/h3/a/@href'
12 13
 title_xpath = './/h3/a'
13 14
 content_xpath = './/div[@class="abstr"]'
15
+publishedDate_xpath = './/span[@class="timestamp"]'
14 16
 suggestion_xpath = '//div[@id="satat"]//a'
15 17
 
16 18
 paging = True
@@ -37,7 +39,10 @@ def response(resp):
37 39
         url = parse_url(extract_url(result.xpath(url_xpath), search_url))
38 40
         title = extract_text(result.xpath(title_xpath)[0])
39 41
         content = extract_text(result.xpath(content_xpath)[0])
40
-        results.append({'url': url, 'title': title, 'content': content})
42
+# Feb 20 04:02am
43
+        publishedDate = datetime.strptime(extract_text(result.xpath(publishedDate_xpath)[0]),"%b %d %H:%M%p")
44
+        #publishedDate.replace(year=2014)
45
+        results.append({'url': url, 'title': title, 'content': content,'publishedDate':publishedDate})
41 46
 
42 47
     if not suggestion_xpath:
43 48
         return results

+ 1
- 0
searx/templates/result_templates/default.html Vedi File

@@ -6,6 +6,7 @@
6 6
 
7 7
   <div>
8 8
     <h3 class="result_title"><a href="{{ result.url }}">{{ result.title|safe }}</a></h3>
9
+	{% if result.publishedDate %}<p class="published_date">{{ result.publishedDate }}</p>{% endif %}
9 10
     <p class="content">{% if result.content %}{{ result.content|safe }}<br />{% endif %}</p>
10 11
     <p class="url">{{ result.pretty_url }}</p>
11 12
   </div>

+ 12
- 0
searx/webapp.py Vedi File

@@ -26,6 +26,7 @@ import json
26 26
 import cStringIO
27 27
 import os
28 28
 
29
+from datetime import datetime, timedelta
29 30
 from itertools import chain
30 31
 from flask import (
31 32
     Flask, request, render_template, url_for, Response, make_response,
@@ -156,6 +157,17 @@ def index():
156 157
             if engine in favicons:
157 158
                 result['favicon'] = engine
158 159
 
160
+        # TODO, check if timezone is calculated right
161
+        if 'publishedDate' in result:
162
+            if result['publishedDate'].date() == datetime.now().date():
163
+                timedifference = datetime.now()-result['publishedDate']
164
+                if timedifference.seconds < 60*60:
165
+                    result['publishedDate'] = '{0:d} minutes ago'.format(timedifference.seconds/60)
166
+                else:
167
+                    result['publishedDate'] = '{0:d} hours ago'.format(timedifference.seconds/60/60)
168
+            else:
169
+                result['publishedDate'] = result['publishedDate'].strftime('%d.%m.%Y')
170
+
159 171
     if search.request_data.get('format') == 'json':
160 172
         return Response(json.dumps({'query': search.query,
161 173
                                     'results': search.results}),