Kaynağa Gözat

[fix] yahoo url extraction

Adam Tauber 11 yıl önce
ebeveyn
işleme
0fc481f47e
2 değiştirilmiş dosya ile 7 ekleme ve 9 silme
  1. 5
    4
      searx/engines/yahoo.py
  2. 2
    5
      searx/engines/yahoo_news.py

+ 5
- 4
searx/engines/yahoo.py Dosyayı Görüntüle

@@ -15,6 +15,10 @@ suggestion_xpath = '//div[@id="satat"]//a'
15 15
 
16 16
 paging = True
17 17
 
18
+def parse_url(url_string):
19
+    start = url_string.find('http', url_string.find('/RU=')+1)
20
+    end = min(url_string.rfind('/RS'), url_string.rfind('/RK'))
21
+    return unquote(url_string[start:end])
18 22
 
19 23
 def request(query, params):
20 24
     offset = (params['pageno'] - 1) * 10 + 1
@@ -34,10 +38,7 @@ def response(resp):
34 38
     dom = html.fromstring(resp.text)
35 39
 
36 40
     for result in dom.xpath(results_xpath):
37
-        url_string = extract_url(result.xpath(url_xpath), search_url)
38
-        start = url_string.find('http', url_string.find('/RU=')+1)
39
-        end = url_string.rfind('/RS')
40
-        url = unquote(url_string[start:end])
41
+        url = parse_url(extract_url(result.xpath(url_xpath), search_url))
41 42
         title = extract_text(result.xpath(title_xpath)[0])
42 43
         content = extract_text(result.xpath(content_xpath)[0])
43 44
         results.append({'url': url, 'title': title, 'content': content})

+ 2
- 5
searx/engines/yahoo_news.py Dosyayı Görüntüle

@@ -1,9 +1,9 @@
1 1
 #!/usr/bin/env python
2 2
 
3 3
 from urllib import urlencode
4
-from urlparse import unquote
5 4
 from lxml import html
6 5
 from searx.engines.xpath import extract_text, extract_url
6
+from searx.engines.yahoo import parse_url
7 7
 
8 8
 categories = ['news']
9 9
 search_url = 'http://news.search.yahoo.com/search?{query}&b={offset}'
@@ -34,10 +34,7 @@ def response(resp):
34 34
     dom = html.fromstring(resp.text)
35 35
 
36 36
     for result in dom.xpath(results_xpath):
37
-        url_string = extract_url(result.xpath(url_xpath), search_url)
38
-        start = url_string.find('http', url_string.find('/RU=')+1)
39
-        end = url_string.rfind('/RS')
40
-        url = unquote(url_string[start:end])
37
+        url = parse_url(extract_url(result.xpath(url_xpath), search_url))
41 38
         title = extract_text(result.xpath(title_xpath)[0])
42 39
         content = extract_text(result.xpath(content_xpath)[0])
43 40
         results.append({'url': url, 'title': title, 'content': content})