|  | @@ -1,4 +1,12 @@
 | 
	
		
			
			| 1 |  | -#!/usr/bin/env python
 | 
	
		
			
			|  | 1 | +## Yahoo (News)
 | 
	
		
			
			|  | 2 | +# 
 | 
	
		
			
			|  | 3 | +# @website     https://news.yahoo.com
 | 
	
		
			
			|  | 4 | +# @provide-api yes (https://developer.yahoo.com/boss/search/), $0.80/1000 queries
 | 
	
		
			
			|  | 5 | +# 
 | 
	
		
			
			|  | 6 | +# @using-api   no (because pricing)
 | 
	
		
			
			|  | 7 | +# @results     HTML (using search portal)
 | 
	
		
			
			|  | 8 | +# @stable      no (HTML can change)
 | 
	
		
			
			|  | 9 | +# @parse       url, title, content, publishedDate
 | 
	
		
			
			| 2 | 10 |  
 | 
	
		
			
			| 3 | 11 |  from urllib import urlencode
 | 
	
		
			
			| 4 | 12 |  from lxml import html
 | 
	
	
		
			
			|  | @@ -8,8 +16,15 @@ from datetime import datetime, timedelta
 | 
	
		
			
			| 8 | 16 |  import re
 | 
	
		
			
			| 9 | 17 |  from dateutil import parser
 | 
	
		
			
			| 10 | 18 |  
 | 
	
		
			
			|  | 19 | +# engine dependent config
 | 
	
		
			
			| 11 | 20 |  categories = ['news']
 | 
	
		
			
			| 12 |  | -search_url = 'http://news.search.yahoo.com/search?{query}&b={offset}'
 | 
	
		
			
			|  | 21 | +paging = True
 | 
	
		
			
			|  | 22 | +language_support = True
 | 
	
		
			
			|  | 23 | +
 | 
	
		
			
			|  | 24 | +# search-url
 | 
	
		
			
			|  | 25 | +search_url = 'https://news.search.yahoo.com/search?{query}&b={offset}&fl=1&vl=lang_{lang}'
 | 
	
		
			
			|  | 26 | +
 | 
	
		
			
			|  | 27 | +# specific xpath variables
 | 
	
		
			
			| 13 | 28 |  results_xpath = '//div[@class="res"]'
 | 
	
		
			
			| 14 | 29 |  url_xpath = './/h3/a/@href'
 | 
	
		
			
			| 15 | 30 |  title_xpath = './/h3/a'
 | 
	
	
		
			
			|  | @@ -17,30 +32,39 @@ content_xpath = './/div[@class="abstr"]'
 | 
	
		
			
			| 17 | 32 |  publishedDate_xpath = './/span[@class="timestamp"]'
 | 
	
		
			
			| 18 | 33 |  suggestion_xpath = '//div[@id="satat"]//a'
 | 
	
		
			
			| 19 | 34 |  
 | 
	
		
			
			| 20 |  | -paging = True
 | 
	
		
			
			| 21 |  | -
 | 
	
		
			
			| 22 | 35 |  
 | 
	
		
			
			|  | 36 | +# do search-request
 | 
	
		
			
			| 23 | 37 |  def request(query, params):
 | 
	
		
			
			| 24 | 38 |      offset = (params['pageno'] - 1) * 10 + 1
 | 
	
		
			
			|  | 39 | +
 | 
	
		
			
			| 25 | 40 |      if params['language'] == 'all':
 | 
	
		
			
			| 26 | 41 |          language = 'en'
 | 
	
		
			
			| 27 | 42 |      else:
 | 
	
		
			
			| 28 | 43 |          language = params['language'].split('_')[0]
 | 
	
		
			
			|  | 44 | +    
 | 
	
		
			
			| 29 | 45 |      params['url'] = search_url.format(offset=offset,
 | 
	
		
			
			| 30 |  | -                                      query=urlencode({'p': query}))
 | 
	
		
			
			|  | 46 | +                                      query=urlencode({'p': query}),
 | 
	
		
			
			|  | 47 | +                                      lang=language)
 | 
	
		
			
			|  | 48 | +
 | 
	
		
			
			|  | 49 | +    # TODO required?
 | 
	
		
			
			| 31 | 50 |      params['cookies']['sB'] = 'fl=1&vl=lang_{lang}&sh=1&rw=new&v=1'\
 | 
	
		
			
			| 32 | 51 |          .format(lang=language)
 | 
	
		
			
			| 33 | 52 |      return params
 | 
	
		
			
			| 34 | 53 |  
 | 
	
		
			
			| 35 | 54 |  
 | 
	
		
			
			|  | 55 | +# get response from search-request
 | 
	
		
			
			| 36 | 56 |  def response(resp):
 | 
	
		
			
			| 37 | 57 |      results = []
 | 
	
		
			
			|  | 58 | +
 | 
	
		
			
			| 38 | 59 |      dom = html.fromstring(resp.text)
 | 
	
		
			
			| 39 | 60 |  
 | 
	
		
			
			|  | 61 | +    # parse results
 | 
	
		
			
			| 40 | 62 |      for result in dom.xpath(results_xpath):
 | 
	
		
			
			| 41 | 63 |          url = parse_url(extract_url(result.xpath(url_xpath), search_url))
 | 
	
		
			
			| 42 | 64 |          title = extract_text(result.xpath(title_xpath)[0])
 | 
	
		
			
			| 43 | 65 |          content = extract_text(result.xpath(content_xpath)[0])
 | 
	
		
			
			|  | 66 | +
 | 
	
		
			
			|  | 67 | +        # parse publishedDate
 | 
	
		
			
			| 44 | 68 |          publishedDate = extract_text(result.xpath(publishedDate_xpath)[0])
 | 
	
		
			
			| 45 | 69 |  
 | 
	
		
			
			| 46 | 70 |          if re.match("^[0-9]+ minute(s|) ago$", publishedDate):
 | 
	
	
		
			
			|  | @@ -58,15 +82,11 @@ def response(resp):
 | 
	
		
			
			| 58 | 82 |          if publishedDate.year == 1900:
 | 
	
		
			
			| 59 | 83 |              publishedDate = publishedDate.replace(year=datetime.now().year)
 | 
	
		
			
			| 60 | 84 |  
 | 
	
		
			
			|  | 85 | +        # append result
 | 
	
		
			
			| 61 | 86 |          results.append({'url': url,
 | 
	
		
			
			| 62 | 87 |                          'title': title,
 | 
	
		
			
			| 63 | 88 |                          'content': content,
 | 
	
		
			
			| 64 | 89 |                          'publishedDate': publishedDate})
 | 
	
		
			
			| 65 | 90 |  
 | 
	
		
			
			| 66 |  | -    if not suggestion_xpath:
 | 
	
		
			
			| 67 |  | -        return results
 | 
	
		
			
			| 68 |  | -
 | 
	
		
			
			| 69 |  | -    for suggestion in dom.xpath(suggestion_xpath):
 | 
	
		
			
			| 70 |  | -        results.append({'suggestion': extract_text(suggestion)})
 | 
	
		
			
			| 71 |  | -
 | 
	
		
			
			|  | 91 | +    # return results
 | 
	
		
			
			| 72 | 92 |      return results
 |