Explorar el Código

Merge branch 'master' of https://github.com/asciimoo/searx

pw3t hace 11 años
padre
commit
0d93ad2018

+ 9
- 7
engines.cfg_sample Ver fichero

@@ -5,7 +5,7 @@ number_of_results = 1
5 5
 
6 6
 [bing]
7 7
 engine = bing
8
-language = en-us
8
+locale = en-US
9 9
 
10 10
 [cc]
11 11
 engine=currency_convert
@@ -20,6 +20,7 @@ engine = duckduckgo_definitions
20 20
 
21 21
 [duckduckgo]
22 22
 engine = duckduckgo
23
+locale = en-us
23 24
 
24 25
 [flickr]
25 26
 engine = flickr
@@ -63,17 +64,17 @@ categories = social media
63 64
 [urbandictionary]
64 65
 engine        = xpath
65 66
 search_url    = http://www.urbandictionary.com/define.php?term={query}
66
-url_xpath     = //div[@id="entries"]//div[@class="word"]//a
67
-title_xpath   = //div[@id="entries"]//div[@class="word"]//span//text()
68
-content_xpath = //div[@id="entries"]//div[@class="text"]//div[@class="definition"]//text()
67
+url_xpath     = //div[@id="entries"]//div[@class="word"]/a/@href
68
+title_xpath   = //div[@id="entries"]//div[@class="word"]/span
69
+content_xpath = //div[@id="entries"]//div[@class="text"]/div[@class="definition"]
69 70
 
70 71
 [yahoo]
71 72
 engine           = xpath
72 73
 search_url       = http://search.yahoo.com/search?p={query}
73 74
 results_xpath    = //div[@class="res"]
74
-url_xpath        = .//span[@class="url"]//text()
75
-content_xpath    = .//div[@class="abstr"]//text()
76
-title_xpath      = .//h3/a//text()
75
+url_xpath        = .//h3/a/@href
76
+title_xpath      = .//h3/a
77
+content_xpath    = .//div[@class="abstr"]
77 78
 suggestion_xpath = //div[@id="satat"]//a
78 79
 
79 80
 [youtube]
@@ -82,5 +83,6 @@ categories = videos
82 83
 
83 84
 [dailymotion]
84 85
 engine = dailymotion
86
+locale = en_US
85 87
 categories = videos
86 88
 

+ 1
- 1
searx/engines/__init__.py Ver fichero

@@ -261,7 +261,7 @@ def get_engines_stats():
261 261
 
262 262
     for engine in errors:
263 263
         if max_errors:
264
-            engine['percentage'] = int(engine['avg']/max_errors*100)
264
+            engine['percentage'] = int(float(engine['avg'])/max_errors*100)
265 265
         else:
266 266
             engine['percentage'] = 0
267 267
 

+ 2
- 2
searx/engines/bing.py Ver fichero

@@ -4,11 +4,11 @@ from cgi import escape
4 4
 
5 5
 base_url = 'http://www.bing.com/'
6 6
 search_string = 'search?{query}'
7
-language = 'en-us' # see http://msdn.microsoft.com/en-us/library/dd251064.aspx
7
+locale = 'en-US' # see http://msdn.microsoft.com/en-us/library/dd251064.aspx
8 8
 
9 9
 
10 10
 def request(query, params):
11
-    search_path = search_string.format(query=urlencode({'q': query, 'setmkt': language}))
11
+    search_path = search_string.format(query=urlencode({'q': query, 'setmkt': locale}))
12 12
     #if params['category'] == 'images':
13 13
     #    params['url'] = base_url + 'images/' + search_path
14 14
     params['url'] = base_url + search_path

+ 9
- 3
searx/engines/dailymotion.py Ver fichero

@@ -1,16 +1,17 @@
1 1
 from urllib import urlencode
2
+from lxml import html
2 3
 from json import loads
3 4
 from cgi import escape
4 5
 
5 6
 categories = ['videos']
6
-localization = 'en'
7
+locale = 'en_US'
7 8
 
8 9
 # see http://www.dailymotion.com/doc/api/obj-video.html
9 10
 search_url = 'https://api.dailymotion.com/videos?fields=title,description,duration,url,thumbnail_360_url&sort=relevance&limit=25&page=1&{query}'
10 11
 
11 12
 def request(query, params):
12 13
     global search_url
13
-    params['url'] = search_url.format(query=urlencode({'search': query, 'localization': localization }))
14
+    params['url'] = search_url.format(query=urlencode({'search': query, 'localization': locale }))
14 15
     return params
15 16
 
16 17
 
@@ -27,6 +28,11 @@ def response(resp):
27 28
         else:
28 29
             content = ''
29 30
         if res['description']:
30
-            content += escape(res['description'][:500])
31
+            description = text_content_from_html(res['description'])
32
+            content += description[:500]
31 33
         results.append({'url': url, 'title': title, 'content': content})
32 34
     return results
35
+
36
+def text_content_from_html(html_string):
37
+    desc_html = html.fragment_fromstring(html_string, create_parent=True)
38
+    return desc_html.text_content()

+ 3
- 2
searx/engines/duckduckgo.py Ver fichero

@@ -3,10 +3,11 @@ from urllib import urlencode
3 3
 from searx.utils import html_to_text
4 4
 
5 5
 url = 'https://duckduckgo.com/'
6
-search_url = url + 'd.js?{query}&l=us-en&p=1&s=0'
6
+search_url = url + 'd.js?{query}&p=1&s=0'
7
+locale = 'us-en'
7 8
 
8 9
 def request(query, params):
9
-    params['url'] = search_url.format(query=urlencode({'q': query}))
10
+    params['url'] = search_url.format(query=urlencode({'q': query, 'l': locale}))
10 11
     return params
11 12
 
12 13
 

+ 1
- 1
searx/engines/duckduckgo_definitions.py Ver fichero

@@ -1,7 +1,7 @@
1 1
 import json
2 2
 from urllib import urlencode
3 3
 
4
-url = 'http://api.duckduckgo.com/?{query}&format=json&pretty=0'
4
+url = 'http://api.duckduckgo.com/?{query}&format=json&pretty=0&no_redirect=1'
5 5
 
6 6
 def request(query, params):
7 7
     params['url'] =  url.format(query=urlencode({'q': query}))

+ 4
- 6
searx/engines/filecrop.py Ver fichero

@@ -1,6 +1,4 @@
1
-from json import loads
2 1
 from urllib import urlencode
3
-from searx.utils import html_to_text
4 2
 from HTMLParser import HTMLParser
5 3
 
6 4
 url = 'http://www.filecrop.com/'
@@ -10,7 +8,7 @@ class FilecropResultParser(HTMLParser):
10 8
     def __init__(self):
11 9
         HTMLParser.__init__(self)
12 10
         self.__start_processing = False
13
-        
11
+
14 12
         self.results = []
15 13
         self.result = {}
16 14
 
@@ -22,7 +20,7 @@ class FilecropResultParser(HTMLParser):
22 20
         if tag == 'tr':
23 21
             if ('bgcolor', '#edeff5') in attrs or ('bgcolor', '#ffffff') in attrs:
24 22
                 self.__start_processing = True
25
-                
23
+
26 24
         if not self.__start_processing:
27 25
             return
28 26
 
@@ -50,7 +48,7 @@ class FilecropResultParser(HTMLParser):
50 48
                 self.data_counter = 0
51 49
                 self.results.append(self.result)
52 50
                 self.result = {}
53
-                                
51
+
54 52
     def handle_data(self, data):
55 53
         if not self.__start_processing:
56 54
             return
@@ -59,7 +57,7 @@ class FilecropResultParser(HTMLParser):
59 57
             self.result['content'] += data + ' '
60 58
         else:
61 59
             self.result['content'] = data + ' '
62
-        
60
+
63 61
         self.data_counter += 1
64 62
 
65 63
 def request(query, params):

+ 0
- 0
searx/engines/flickr.py Ver fichero


+ 0
- 0
searx/engines/google_images.py Ver fichero


+ 5
- 6
searx/engines/startpage.py Ver fichero

@@ -19,14 +19,13 @@ def response(resp):
19 19
     global base_url
20 20
     results = []
21 21
     dom = html.fromstring(resp.content)
22
-    for result in dom.xpath('//div[@class="result"]'):
22
+    # ads xpath //div[@id="results"]/div[@id="sponsored"]//div[@class="result"]
23
+    # not ads : div[@class="result"] are the direct childs of div[@id="results"]
24
+    for result in dom.xpath('//div[@id="results"]/div[@class="result"]'):
23 25
         link = result.xpath('.//h3/a')[0]
24 26
         url = link.attrib.get('href')
25 27
         parsed_url = urlparse(url)
26
-        # TODO better google link detection
27
-        if parsed_url.netloc.find('www.google.com') >= 0:
28
-            continue
29
-        title = ' '.join(link.xpath('.//text()'))
30
-        content = escape(' '.join(result.xpath('.//p[@class="desc"]//text()')))
28
+        title = link.text_content()
29
+        content = result.xpath('./p[@class="desc"]')[0].text_content()
31 30
         results.append({'url': url, 'title': title, 'content': content})
32 31
     return results

+ 56
- 20
searx/engines/xpath.py Ver fichero

@@ -1,5 +1,5 @@
1 1
 from lxml import html
2
-from urllib import urlencode
2
+from urllib import urlencode, unquote
3 3
 from urlparse import urlparse, urljoin
4 4
 from cgi import escape
5 5
 from lxml.etree import _ElementStringResult
@@ -11,32 +11,64 @@ title_xpath   = None
11 11
 suggestion_xpath = ''
12 12
 results_xpath = ''
13 13
 
14
-def extract_url(xpath_results):
15
-    url = ''
16
-    parsed_search_url = urlparse(search_url)
14
+'''
15
+if xpath_results is list, extract the text from each result and concat the list
16
+if xpath_results is a xml element, extract all the text node from it ( text_content() method from lxml )
17
+if xpath_results is a string element, then it's already done
18
+'''
19
+def extract_text(xpath_results):
17 20
     if type(xpath_results) == list:
21
+        # it's list of result : concat everything using recursive call
18 22
         if not len(xpath_results):
19 23
             raise Exception('Empty url resultset')
20
-        if type(xpath_results[0]) == _ElementStringResult:
21
-            url = ''.join(xpath_results)
22
-            if url.startswith('//'):
23
-                url = parsed_search_url.scheme+url
24
-            elif url.startswith('/'):
25
-                url = urljoin(search_url, url)
26
-        #TODO
27
-        else:
28
-            url = xpath_results[0].attrib.get('href')
24
+        result = ''
25
+        for e in xpath_results:
26
+            result = result + extract_text(e)
27
+        return result
28
+    elif type(xpath_results) == _ElementStringResult:
29
+        # it's a string
30
+        return ''.join(xpath_results)
29 31
     else:
30
-        url = xpath_results.attrib.get('href')
31
-    if not url.startswith('http://') and not url.startswith('https://'):
32
-        url = 'http://'+url
32
+        # it's a element
33
+        return xpath_results.text_content()
34
+
35
+
36
+def extract_url(xpath_results):
37
+    url = extract_text(xpath_results)
38
+
39
+    if url.startswith('//'):
40
+        # add http or https to this kind of url //example.com/
41
+        parsed_search_url = urlparse(search_url)
42
+        url = parsed_search_url.scheme+url
43
+    elif url.startswith('/'):
44
+        # fix relative url to the search engine
45
+        url = urljoin(search_url, url)
46
+
47
+    # normalize url
48
+    url = normalize_url(url)
49
+
50
+    return url
51
+
52
+
53
+def normalize_url(url):
33 54
     parsed_url = urlparse(url)
55
+
56
+    # add a / at this end of the url if there is no path
34 57
     if not parsed_url.netloc:
35 58
         raise Exception('Cannot parse url')
36 59
     if not parsed_url.path:
37 60
         url += '/'
61
+
62
+    # FIXME : hack for yahoo
63
+    if parsed_url.hostname == 'search.yahoo.com' and parsed_url.path.startswith('/r'):
64
+        p = parsed_url.path
65
+        mark = p.find('/**')
66
+        if mark != -1:
67
+            return unquote(p[mark+3:]).decode('utf-8')
68
+
38 69
     return url
39 70
 
71
+
40 72
 def request(query, params):
41 73
     query = urlencode({'q': query})[2:]
42 74
     params['url'] = search_url.format(query=query)
@@ -50,15 +82,19 @@ def response(resp):
50 82
     if results_xpath:
51 83
         for result in dom.xpath(results_xpath):
52 84
             url = extract_url(result.xpath(url_xpath))
53
-            title = ' '.join(result.xpath(title_xpath))
54
-            content = escape(' '.join(result.xpath(content_xpath)))
85
+            title = extract_text(result.xpath(title_xpath)[0 ])
86
+            content = extract_text(result.xpath(content_xpath)[0])
55 87
             results.append({'url': url, 'title': title, 'content': content})
56 88
     else:
57
-        for content, url, title in zip(dom.xpath(content_xpath), map(extract_url, dom.xpath(url_xpath)), dom.xpath(title_xpath)):
89
+        for url, title, content in zip(    
90
+            map(extract_url, dom.xpath(url_xpath)), \
91
+            map(extract_text, dom.xpath(title_xpath)), \
92
+            map(extract_text, dom.xpath(content_xpath)), \
93
+                ):
58 94
             results.append({'url': url, 'title': title, 'content': content})
59 95
 
60 96
     if not suggestion_xpath:
61 97
         return results
62 98
     for suggestion in dom.xpath(suggestion_xpath):
63
-        results.append({'suggestion': escape(''.join(suggestion.xpath('.//text()')))})
99
+        results.append({'suggestion': extract_text(suggestion)})
64 100
     return results

+ 5
- 5
searx/engines/yacy.py Ver fichero

@@ -1,5 +1,5 @@
1 1
 from json import loads
2
-from urllib import urlencode, quote
2
+from urllib import urlencode
3 3
 
4 4
 url = 'http://localhost:8090'
5 5
 search_url = '/yacysearch.json?{query}&maximumRecords=10'
@@ -10,7 +10,7 @@ def request(query, params):
10 10
 
11 11
 def response(resp):
12 12
     raw_search_results = loads(resp.text)
13
-    
13
+
14 14
     if not len(raw_search_results):
15 15
         return []
16 16
 
@@ -22,10 +22,10 @@ def response(resp):
22 22
         tmp_result = {}
23 23
         tmp_result['title'] = result['title']
24 24
         tmp_result['url'] = result['link']
25
-        tmp_result['content'] = '' 
26
-        
25
+        tmp_result['content'] = ''
26
+
27 27
         if len(result['description']):
28
-            tmp_result['content'] += result['description'] +"<br/>" 
28
+            tmp_result['content'] += result['description'] +"<br/>"
29 29
 
30 30
         if len(result['pubDate']):
31 31
             tmp_result['content'] += result['pubDate'] + "<br/>"

+ 1
- 1
searx/templates/about.html Ver fichero

@@ -37,7 +37,7 @@
37 37
 <p>It's ok if you don't trust us regarding the logs, <a href="https://github.com/asciimoo/searx">take the code</a> and run it yourself! decentralize!</p>
38 38
 <h3>How to add to firefox?</h3>
39 39
 <p><a href="#" onclick="window.external.AddSearchProvider(window.location.protocol + '//' + window.location.host + '/opensearch.xml')">Install</a> searx as a search engine on any version of Firefox! (javascript required)</p>
40
-<h2 id="faq">Developer FAQ</h2>
40
+<h2 id="dev_faq">Developer FAQ</h2>
41 41
 <h3>New engines?</h3>
42 42
 <p><ul>
43 43
     <li>Edit your engines.cfg, see <a href="https://raw.github.com/asciimoo/searx/master/engines.cfg_sample">sample config</a></li>

+ 2
- 1
searx/webapp.py Ver fichero

@@ -152,7 +152,8 @@ def preferences():
152 152
                 selected_categories.append(category)
153 153
         if selected_categories:
154 154
             resp = make_response(redirect('/'))
155
-            resp.set_cookie('categories', ','.join(selected_categories))
155
+            # cookie max age: 4 weeks
156
+            resp.set_cookie('categories', ','.join(selected_categories), max_age=60*60*24*7*4)
156 157
             return resp
157 158
     return render('preferences.html')
158 159