Bläddra i källkod

Merge pull request #526 from ukwt/anime

Add a few search engines
Adam Tauber 9 år sedan
förälder
incheckning
85c0351dca

+ 53
- 0
searx/engines/fdroid.py Visa fil

@@ -0,0 +1,53 @@
1
+"""
2
+ F-Droid (a repository of FOSS applications for Android)
3
+
4
+ @website      https://f-droid.org/
5
+ @provide-api  no
6
+ @using-api    no
7
+ @results      HTML
8
+ @stable       no (HTML can change)
9
+ @parse        url, title, content
10
+"""
11
+
12
+from cgi import escape
13
+from urllib import urlencode
14
+from searx.engines.xpath import extract_text
15
+from lxml import html
16
+
17
+# engine dependent config
18
+categories = ['files']
19
+paging = True
20
+
21
+# search-url
22
+base_url = 'https://f-droid.org/'
23
+search_url = base_url + 'repository/browse/?{query}'
24
+
25
+
26
+# do search-request
27
+def request(query, params):
28
+    query = urlencode({'fdfilter': query,
29
+                       'fdpage': params['pageno']})
30
+    params['url'] = search_url.format(query=query)
31
+    return params
32
+
33
+
34
+# get response from search-request
35
+def response(resp):
36
+    results = []
37
+
38
+    dom = html.fromstring(resp.text)
39
+
40
+    for app in dom.xpath('//div[@id="appheader"]'):
41
+        url = app.xpath('./ancestor::a/@href')[0]
42
+        title = app.xpath('./p/span/text()')[0]
43
+        img_src = app.xpath('.//img/@src')[0]
44
+
45
+        content = extract_text(app.xpath('./p')[0])
46
+        content = escape(content.replace(title, '', 1).strip())
47
+
48
+        results.append({'url': url,
49
+                        'title': title,
50
+                        'content': content,
51
+                        'img_src': img_src})
52
+
53
+    return results

+ 6
- 6
searx/engines/google.py Visa fil

@@ -46,11 +46,11 @@ country_to_hostname = {
46 46
     'NZ': 'www.google.co.nz',  # New Zealand
47 47
     'PH': 'www.google.com.ph',  # Philippines
48 48
     'SG': 'www.google.com.sg',  # Singapore
49
-    # 'US': 'www.google.us',  # United State, redirect to .com
49
+    # 'US': 'www.google.us',  # United States, redirect to .com
50 50
     'ZA': 'www.google.co.za',  # South Africa
51 51
     'AR': 'www.google.com.ar',  # Argentina
52 52
     'CL': 'www.google.cl',  # Chile
53
-    'ES': 'www.google.es',  # Span
53
+    'ES': 'www.google.es',  # Spain
54 54
     'MX': 'www.google.com.mx',  # Mexico
55 55
     'EE': 'www.google.ee',  # Estonia
56 56
     'FI': 'www.google.fi',  # Finland
@@ -61,7 +61,7 @@ country_to_hostname = {
61 61
     'HU': 'www.google.hu',  # Hungary
62 62
     'IT': 'www.google.it',  # Italy
63 63
     'JP': 'www.google.co.jp',  # Japan
64
-    'KR': 'www.google.co.kr',  # South Korean
64
+    'KR': 'www.google.co.kr',  # South Korea
65 65
     'LT': 'www.google.lt',  # Lithuania
66 66
     'LV': 'www.google.lv',  # Latvia
67 67
     'NO': 'www.google.no',  # Norway
@@ -76,9 +76,9 @@ country_to_hostname = {
76 76
     'SE': 'www.google.se',  # Sweden
77 77
     'TH': 'www.google.co.th',  # Thailand
78 78
     'TR': 'www.google.com.tr',  # Turkey
79
-    'UA': 'www.google.com.ua',  # Ikraine
80
-    # 'CN': 'www.google.cn',  # China, only from china ?
81
-    'HK': 'www.google.com.hk',  # Hong kong
79
+    'UA': 'www.google.com.ua',  # Ukraine
80
+    # 'CN': 'www.google.cn',  # China, only from China ?
81
+    'HK': 'www.google.com.hk',  # Hong Kong
82 82
     'TW': 'www.google.com.tw'  # Taiwan
83 83
 }
84 84
 

+ 119
- 0
searx/engines/nyaa.py Visa fil

@@ -0,0 +1,119 @@
1
+"""
2
+ Nyaa.se (Anime Bittorrent tracker)
3
+
4
+ @website      http://www.nyaa.se/
5
+ @provide-api  no
6
+ @using-api    no
7
+ @results      HTML
8
+ @stable       no (HTML can change)
9
+ @parse        url, title, content, seed, leech, torrentfile
10
+"""
11
+
12
+from cgi import escape
13
+from urllib import urlencode
14
+from lxml import html
15
+from searx.engines.xpath import extract_text
16
+
17
+# engine dependent config
18
+categories = ['files', 'images', 'videos', 'music']
19
+paging = True
20
+
21
+# search-url
22
+base_url = 'http://www.nyaa.se/'
23
+search_url = base_url + '?page=search&{query}&offset={offset}'
24
+
25
+# xpath queries
26
+xpath_results = '//table[@class="tlist"]//tr[contains(@class, "tlistrow")]'
27
+xpath_category = './/td[@class="tlisticon"]/a'
28
+xpath_title = './/td[@class="tlistname"]/a'
29
+xpath_torrent_file = './/td[@class="tlistdownload"]/a'
30
+xpath_filesize = './/td[@class="tlistsize"]/text()'
31
+xpath_seeds = './/td[@class="tlistsn"]/text()'
32
+xpath_leeches = './/td[@class="tlistln"]/text()'
33
+xpath_downloads = './/td[@class="tlistdn"]/text()'
34
+
35
+
36
+# convert a variable to integer or return 0 if it's not a number
37
+def int_or_zero(num):
38
+    if isinstance(num, list):
39
+        if len(num) < 1:
40
+            return 0
41
+        num = num[0]
42
+    if num.isdigit():
43
+        return int(num)
44
+    return 0
45
+
46
+
47
+# get multiplier to convert torrent size to bytes
48
+def get_filesize_mul(suffix):
49
+    return {
50
+        'KB': 1024,
51
+        'MB': 1024 ** 2,
52
+        'GB': 1024 ** 3,
53
+        'TB': 1024 ** 4,
54
+
55
+        'KIB': 1024,
56
+        'MIB': 1024 ** 2,
57
+        'GIB': 1024 ** 3,
58
+        'TIB': 1024 ** 4
59
+    }[str(suffix).upper()]
60
+
61
+
62
+# do search-request
63
+def request(query, params):
64
+    query = urlencode({'term': query})
65
+    params['url'] = search_url.format(query=query, offset=params['pageno'])
66
+    return params
67
+
68
+
69
+# get response from search-request
70
+def response(resp):
71
+    results = []
72
+
73
+    dom = html.fromstring(resp.text)
74
+
75
+    for result in dom.xpath(xpath_results):
76
+        # category in which our torrent belongs
77
+        category = result.xpath(xpath_category)[0].attrib.get('title')
78
+
79
+        # torrent title
80
+        page_a = result.xpath(xpath_title)[0]
81
+        title = escape(extract_text(page_a))
82
+
83
+        # link to the page
84
+        href = page_a.attrib.get('href')
85
+
86
+        # link to the torrent file
87
+        torrent_link = result.xpath(xpath_torrent_file)[0].attrib.get('href')
88
+
89
+        # torrent size
90
+        try:
91
+            file_size, suffix = result.xpath(xpath_filesize)[0].split(' ')
92
+            file_size = int(float(file_size) * get_filesize_mul(suffix))
93
+        except Exception as e:
94
+            file_size = None
95
+
96
+        # seed count
97
+        seed = int_or_zero(result.xpath(xpath_seeds))
98
+
99
+        # leech count
100
+        leech = int_or_zero(result.xpath(xpath_leeches))
101
+
102
+        # torrent downloads count
103
+        downloads = int_or_zero(result.xpath(xpath_downloads))
104
+
105
+        # content string contains all information not included into template
106
+        content = 'Category: "{category}". Downloaded {downloads} times.'
107
+        content = content.format(category=category, downloads=downloads)
108
+        content = escape(content)
109
+
110
+        results.append({'url': href,
111
+                        'title': title,
112
+                        'content': content,
113
+                        'seed': seed,
114
+                        'leech': leech,
115
+                        'filesize': file_size,
116
+                        'torrentfile': torrent_link,
117
+                        'template': 'torrent.html'})
118
+
119
+    return results

+ 77
- 0
searx/engines/reddit.py Visa fil

@@ -0,0 +1,77 @@
1
+"""
2
+ Reddit
3
+
4
+ @website      https://www.reddit.com/
5
+ @provide-api  yes (https://www.reddit.com/dev/api)
6
+
7
+ @using-api    yes
8
+ @results      JSON
9
+ @stable       yes
10
+ @parse        url, title, content, thumbnail, publishedDate
11
+"""
12
+
13
+import json
14
+from cgi import escape
15
+from urllib import urlencode
16
+from urlparse import urlparse
17
+from datetime import datetime
18
+
19
+# engine dependent config
20
+categories = ['general', 'images', 'news', 'social media']
21
+page_size = 25
22
+
23
+# search-url
24
+search_url = 'https://www.reddit.com/search.json?{query}'
25
+
26
+
27
+# do search-request
28
+def request(query, params):
29
+    query = urlencode({'q': query,
30
+                       'limit': page_size})
31
+    params['url'] = search_url.format(query=query)
32
+
33
+    return params
34
+
35
+
36
+# get response from search-request
37
+def response(resp):
38
+    img_results = []
39
+    text_results = []
40
+
41
+    search_results = json.loads(resp.text)
42
+
43
+    # return empty array if there are no results
44
+    if 'data' not in search_results:
45
+        return []
46
+
47
+    posts = search_results.get('data', {}).get('children', [])
48
+
49
+    # process results
50
+    for post in posts:
51
+        data = post['data']
52
+
53
+        # extract post information
54
+        params = {
55
+            'url': data['url'],
56
+            'title': data['title']
57
+        }
58
+
59
+        # if thumbnail field contains a valid URL, we need to change template
60
+        thumbnail = data['thumbnail']
61
+        url_info = urlparse(thumbnail)
62
+        # netloc & path
63
+        if url_info[1] != '' and url_info[2] != '':
64
+            params['thumbnail_src'] = thumbnail
65
+            params['template'] = 'images.html'
66
+            img_results.append(params)
67
+        else:
68
+            created = datetime.fromtimestamp(data['created_utc'])
69
+            content = escape(data['selftext'])
70
+            if len(content) > 500:
71
+                content = content[:500] + '...'
72
+            params['content'] = content
73
+            params['publishedDate'] = created
74
+            text_results.append(params)
75
+
76
+    # show images first and text results second
77
+    return img_results + text_results

+ 102
- 0
searx/engines/tokyotoshokan.py Visa fil

@@ -0,0 +1,102 @@
1
+"""
2
+ Tokyo Toshokan (A BitTorrent Library for Japanese Media)
3
+
4
+ @website      https://www.tokyotosho.info/
5
+ @provide-api  no
6
+ @using-api    no
7
+ @results      HTML
8
+ @stable       no (HTML can change)
9
+ @parse        url, title, publishedDate, seed, leech,
10
+               filesize, magnetlink, content
11
+"""
12
+
13
+import re
14
+from cgi import escape
15
+from urllib import urlencode
16
+from lxml import html
17
+from searx.engines.xpath import extract_text
18
+from datetime import datetime
19
+from searx.engines.nyaa import int_or_zero, get_filesize_mul
20
+
21
+# engine dependent config
22
+categories = ['files', 'videos', 'music']
23
+paging = True
24
+
25
+# search-url
26
+base_url = 'https://www.tokyotosho.info/'
27
+search_url = base_url + 'search.php?{query}'
28
+
29
+
30
+# do search-request
31
+def request(query, params):
32
+    query = urlencode({'page': params['pageno'],
33
+                       'terms': query})
34
+    params['url'] = search_url.format(query=query)
35
+    return params
36
+
37
+
38
+# get response from search-request
39
+def response(resp):
40
+    results = []
41
+
42
+    dom = html.fromstring(resp.text)
43
+    rows = dom.xpath('//table[@class="listing"]//tr[contains(@class, "category_0")]')
44
+
45
+    # check if there are no results or page layout was changed so we cannot parse it
46
+    # currently there are two rows for each result, so total count must be even
47
+    if len(rows) == 0 or len(rows) % 2 != 0:
48
+        return []
49
+
50
+    # regular expression for parsing torrent size strings
51
+    size_re = re.compile('Size:\s*([\d.]+)(TB|GB|MB|B)', re.IGNORECASE)
52
+
53
+    # processing the results, two rows at a time
54
+    for i in xrange(0, len(rows), 2):
55
+        # parse the first row
56
+        name_row = rows[i]
57
+
58
+        links = name_row.xpath('./td[@class="desc-top"]/a')
59
+        params = {
60
+            'template': 'torrent.html',
61
+            'url': links[-1].attrib.get('href'),
62
+            'title': extract_text(links[-1])
63
+        }
64
+        # I have not yet seen any torrents without magnet links, but
65
+        # it's better to be prepared to stumble upon one some day
66
+        if len(links) == 2:
67
+            magnet = links[0].attrib.get('href')
68
+            if magnet.startswith('magnet'):
69
+                # okay, we have a valid magnet link, let's add it to the result
70
+                params['magnetlink'] = magnet
71
+
72
+        # no more info in the first row, start parsing the second one
73
+        info_row = rows[i + 1]
74
+        desc = extract_text(info_row.xpath('./td[@class="desc-bot"]')[0])
75
+        for item in desc.split('|'):
76
+            item = item.strip()
77
+            if item.startswith('Size:'):
78
+                try:
79
+                    # ('1.228', 'GB')
80
+                    groups = size_re.match(item).groups()
81
+                    multiplier = get_filesize_mul(groups[1])
82
+                    params['filesize'] = int(multiplier * float(groups[0]))
83
+                except Exception as e:
84
+                    pass
85
+            elif item.startswith('Date:'):
86
+                try:
87
+                    # Date: 2016-02-21 21:44 UTC
88
+                    date = datetime.strptime(item, 'Date: %Y-%m-%d %H:%M UTC')
89
+                    params['publishedDate'] = date
90
+                except Exception as e:
91
+                    pass
92
+            elif item.startswith('Comment:'):
93
+                params['content'] = item
94
+        stats = info_row.xpath('./td[@class="stats"]/span')
95
+        # has the layout not changed yet?
96
+        if len(stats) == 3:
97
+            params['seed'] = int_or_zero(extract_text(stats[0]))
98
+            params['leech'] = int_or_zero(extract_text(stats[1]))
99
+
100
+        results.append(params)
101
+
102
+    return results

+ 93
- 0
searx/engines/torrentz.py Visa fil

@@ -0,0 +1,93 @@
1
+"""
2
+ Torrentz.eu (BitTorrent meta-search engine)
3
+
4
+ @website      https://torrentz.eu/
5
+ @provide-api  no
6
+
7
+ @using-api    no
8
+ @results      HTML
9
+ @stable       no (HTML can change, although unlikely,
10
+                   see https://torrentz.eu/torrentz.btsearch)
11
+ @parse        url, title, publishedDate, seed, leech, filesize, magnetlink
12
+"""
13
+
14
+import re
15
+from cgi import escape
16
+from urllib import urlencode
17
+from lxml import html
18
+from searx.engines.xpath import extract_text
19
+from datetime import datetime
20
+from searx.engines.nyaa import int_or_zero, get_filesize_mul
21
+
22
+# engine dependent config
23
+categories = ['files', 'videos', 'music']
24
+paging = True
25
+
26
+# search-url
27
+# https://torrentz.eu/search?f=EXAMPLE&p=6
28
+base_url = 'https://torrentz.eu/'
29
+search_url = base_url + 'search?{query}'
30
+
31
+
32
+# do search-request
33
+def request(query, params):
34
+    page = params['pageno'] - 1
35
+    query = urlencode({'q': query, 'p': page})
36
+    params['url'] = search_url.format(query=query)
37
+    return params
38
+
39
+
40
+# get response from search-request
41
+def response(resp):
42
+    results = []
43
+
44
+    dom = html.fromstring(resp.text)
45
+
46
+    for result in dom.xpath('//div[@class="results"]/dl'):
47
+        name_cell = result.xpath('./dt')[0]
48
+        title = extract_text(name_cell)
49
+
50
+        # skip rows that do not contain a link to a torrent
51
+        links = name_cell.xpath('./a')
52
+        if len(links) != 1:
53
+            continue
54
+
55
+        # extract url and remove a slash in the beginning
56
+        link = links[0].attrib.get('href').lstrip('/')
57
+
58
+        seed = result.xpath('./dd/span[@class="u"]/text()')[0].replace(',', '')
59
+        leech = result.xpath('./dd/span[@class="d"]/text()')[0].replace(',', '')
60
+
61
+        params = {
62
+            'url': base_url + link,
63
+            'title': title,
64
+            'seed': int_or_zero(seed),
65
+            'leech': int_or_zero(leech),
66
+            'template': 'torrent.html'
67
+        }
68
+
69
+        # let's try to calculate the torrent size
70
+        try:
71
+            size_str = result.xpath('./dd/span[@class="s"]/text()')[0]
72
+            size, suffix = size_str.split()
73
+            params['filesize'] = int(size) * get_filesize_mul(suffix)
74
+        except Exception as e:
75
+            pass
76
+
77
+        # does our link contain a valid SHA1 sum?
78
+        if re.compile('[0-9a-fA-F]{40}').match(link):
79
+            # add a magnet link to the result
80
+            params['magnetlink'] = 'magnet:?xt=urn:btih:' + link
81
+
82
+        # extract and convert creation date
83
+        try:
84
+            date_str = result.xpath('./dd/span[@class="a"]/span')[0].attrib.get('title')
85
+            # Fri, 25 Mar 2016 16:29:01
86
+            date = datetime.strptime(date_str, '%a, %d %b %Y %H:%M:%S')
87
+            params['publishedDate'] = date
88
+        except Exception as e:
89
+            pass
90
+
91
+        results.append(params)
92
+
93
+    return results

+ 15
- 1
searx/engines/xpath.py Visa fil

@@ -11,6 +11,14 @@ title_xpath = None
11 11
 suggestion_xpath = ''
12 12
 results_xpath = ''
13 13
 
14
+# parameters for engines with paging support
15
+#
16
+# number of results on each page
17
+# (only needed if the site requires not a page number, but an offset)
18
+page_size = 1
19
+# number of the first page (usually 0 or 1)
20
+first_page_num = 1
21
+
14 22
 
15 23
 '''
16 24
 if xpath_results is list, extract the text from each result and concat the list
@@ -76,8 +84,14 @@ def normalize_url(url):
76 84
 
77 85
 def request(query, params):
78 86
     query = urlencode({'q': query})[2:]
79
-    params['url'] = search_url.format(query=query)
87
+
88
+    fp = {'query': query}
89
+    if paging and search_url.find('{pageno}') >= 0:
90
+        fp['pageno'] = (params['pageno'] + first_page_num - 1) * page_size
91
+
92
+    params['url'] = search_url.format(**fp)
80 93
     params['query'] = query
94
+
81 95
     return params
82 96
 
83 97
 

+ 88
- 0
searx/settings.yml Visa fil

@@ -60,6 +60,18 @@ engines:
60 60
     engine : bing_news
61 61
     shortcut : bin
62 62
 
63
+  - name : bitbucket
64
+    engine : xpath
65
+    paging : True
66
+    search_url : https://bitbucket.org/repo/all/{pageno}?name={query}
67
+    url_xpath : //article[@class="repo-summary"]//a[@class="repo-link"]/@href
68
+    title_xpath : //article[@class="repo-summary"]//a[@class="repo-link"]
69
+    content_xpath : //article[@class="repo-summary"]/p
70
+    categories : it
71
+    timeout : 4.0
72
+    disabled : True
73
+    shortcut : bb
74
+
63 75
   - name : btdigg
64 76
     engine : btdigg
65 77
     shortcut : bt
@@ -86,6 +98,19 @@ engines:
86 98
     engine : digg
87 99
     shortcut : dg
88 100
 
101
+  - name : erowid
102
+    engine : xpath
103
+    paging : True
104
+    first_page_num : 0
105
+    page_size : 30
106
+    search_url : https://www.erowid.org/search.php?q={query}&s={pageno}
107
+    url_xpath : //dl[@class="results-list"]/dt[@class="result-title"]/a/@href
108
+    title_xpath : //dl[@class="results-list"]/dt[@class="result-title"]/a/text()
109
+    content_xpath : //dl[@class="results-list"]/dd[@class="result-details"]
110
+    categories : general
111
+    shortcut : ew
112
+    disabled : True
113
+
89 114
   - name : wikidata
90 115
     engine : wikidata
91 116
     shortcut : wd
@@ -109,6 +134,11 @@ engines:
109 134
     shortcut : 1x
110 135
     disabled : True
111 136
 
137
+  - name : fdroid
138
+    engine : fdroid
139
+    shortcut : fd
140
+    disabled : True
141
+
112 142
   - name : flickr
113 143
     categories : images
114 144
     shortcut : fl
@@ -129,6 +159,18 @@ engines:
129 159
     shortcut : gb
130 160
     disabled: True
131 161
 
162
+  - name : gitlab
163
+    engine : xpath
164
+    paging : True
165
+    search_url : https://gitlab.com/search?page={pageno}&search={query}
166
+    url_xpath : //li[@class="project-row"]//a[@class="project"]/@href
167
+    title_xpath : //li[@class="project-row"]//span[contains(@class, "project-full-name")]
168
+    content_xpath : //li[@class="project-row"]//div[@class="description"]/p
169
+    categories : it
170
+    shortcut : gl
171
+    timeout : 5.0
172
+    disabled : True
173
+
132 174
   - name : github
133 175
     engine : github
134 176
     shortcut : gh
@@ -175,10 +217,38 @@ engines:
175 217
     shortcut : gps
176 218
     disabled : True
177 219
 
220
+  - name : geektimes
221
+    engine : xpath
222
+    paging : True
223
+    search_url : https://geektimes.ru/search/page{pageno}/?q={query}
224
+    url_xpath : //div[@class="search_results"]//a[@class="post_title"]/@href
225
+    title_xpath : //div[@class="search_results"]//a[@class="post_title"]
226
+    content_xpath : //div[@class="search_results"]//div[contains(@class, "content")]
227
+    categories : it
228
+    timeout : 4.0
229
+    disabled : True
230
+    shortcut : gt
231
+
232
+  - name : habrahabr
233
+    engine : xpath
234
+    paging : True
235
+    search_url : https://habrahabr.ru/search/page{pageno}/?q={query}
236
+    url_xpath : //div[@class="search_results"]//a[@class="post_title"]/@href
237
+    title_xpath : //div[@class="search_results"]//a[@class="post_title"]
238
+    content_xpath : //div[@class="search_results"]//div[contains(@class, "content")]
239
+    categories : it
240
+    timeout : 4.0
241
+    disabled : True
242
+    shortcut : habr
243
+
178 244
   - name : mixcloud
179 245
     engine : mixcloud
180 246
     shortcut : mc
181 247
 
248
+  - name : nyaa
249
+    engine : nyaa
250
+    shortcut : nt
251
+
182 252
   - name : openstreetmap
183 253
     engine : openstreetmap
184 254
     shortcut : osm
@@ -213,6 +283,13 @@ engines:
213 283
     shortcut : qws
214 284
     categories : social media
215 285
 
286
+  - name : reddit
287
+    engine : reddit
288
+    shortcut : re
289
+    page_size : 25
290
+    timeout : 10.0
291
+    disabled : True
292
+
216 293
   - name : kickass
217 294
     engine : kickass
218 295
     shortcut : ka
@@ -264,6 +341,17 @@ engines:
264 341
     shortcut : sw
265 342
     disabled : True
266 343
 
344
+  - name : tokyotoshokan
345
+    engine : tokyotoshokan
346
+    shortcut : tt
347
+    timeout : 6.0
348
+    disabled : True
349
+
350
+  - name : torrentz
351
+    engine : torrentz
352
+    timeout : 5.0
353
+    shortcut : to
354
+
267 355
   - name : twitter
268 356
     engine : twitter
269 357
     shortcut : tw

+ 49
- 0
tests/unit/engines/test_fdroid.py Visa fil

@@ -0,0 +1,49 @@
1
+import mock
2
+from collections import defaultdict
3
+from searx.engines import fdroid
4
+from searx.testing import SearxTestCase
5
+
6
+
7
+class TestFdroidEngine(SearxTestCase):
8
+
9
+    def test_request(self):
10
+        query = 'test_query'
11
+        dic = defaultdict(dict)
12
+        dic['pageno'] = 1
13
+        params = fdroid.request(query, dic)
14
+        self.assertTrue('url' in params)
15
+        self.assertTrue(query in params['url'])
16
+        self.assertTrue('f-droid.org' in params['url'])
17
+
18
+    def test_response(self):
19
+        resp = mock.Mock(text='<html></html>')
20
+        self.assertEqual(fdroid.response(resp), [])
21
+
22
+        html = """
23
+        <a href="https://google.com/qwerty">
24
+          <div id="appheader">
25
+            <div style="float:left;padding-right:10px;">
26
+              <img src="http://example.com/image.png"
27
+                   style="width:48px;border:none;">
28
+            </div>
29
+            <div style="float:right;">
30
+              <p>Details...</p>
31
+            </div>
32
+            <p style="color:#000000;">
33
+              <span style="font-size:20px;">Sample title</span>
34
+              <br>
35
+              Sample content
36
+            </p>
37
+          </div>
38
+        </a>
39
+        """
40
+
41
+        resp = mock.Mock(text=html)
42
+        results = fdroid.response(resp)
43
+
44
+        self.assertEqual(type(results), list)
45
+        self.assertEqual(len(results), 1)
46
+        self.assertEqual(results[0]['url'], 'https://google.com/qwerty')
47
+        self.assertEqual(results[0]['title'], 'Sample title')
48
+        self.assertEqual(results[0]['content'], 'Sample content')
49
+        self.assertEqual(results[0]['img_src'], 'http://example.com/image.png')

+ 66
- 0
tests/unit/engines/test_nyaa.py Visa fil

@@ -0,0 +1,66 @@
1
+from collections import defaultdict
2
+import mock
3
+from searx.engines import nyaa
4
+from searx.testing import SearxTestCase
5
+
6
+
7
+class TestNyaaEngine(SearxTestCase):
8
+
9
+    def test_request(self):
10
+        query = 'test_query'
11
+        dic = defaultdict(dict)
12
+        dic['pageno'] = 1
13
+        params = nyaa.request(query, dic)
14
+        self.assertTrue('url' in params)
15
+        self.assertTrue(query in params['url'])
16
+        self.assertTrue('nyaa.se' in params['url'])
17
+
18
+    def test_response(self):
19
+        resp = mock.Mock(text='<html></html>')
20
+        self.assertEqual(nyaa.response(resp), [])
21
+
22
+        html = """
23
+        <table class="tlist">
24
+          <tbody>
25
+            <tr class="trusted tlistrow">
26
+              <td class="tlisticon">
27
+                <a href="//www.nyaa.se" title="English-translated Anime">
28
+                   <img src="//files.nyaa.se" alt="English-translated Anime">
29
+                </a>
30
+              </td>
31
+              <td class="tlistname">
32
+                <a href="//www.nyaa.se/?page3">
33
+                  Sample torrent title
34
+                </a>
35
+              </td>
36
+              <td class="tlistdownload">
37
+                <a href="//www.nyaa.se/?page_dl" title="Download">
38
+                  <img src="//files.nyaa.se/www-dl.png" alt="DL">
39
+                </a>
40
+              </td>
41
+              <td class="tlistsize">10 MiB</td>
42
+              <td class="tlistsn">1</td>
43
+              <td class="tlistln">3</td>
44
+              <td class="tlistdn">666</td>
45
+              <td class="tlistmn">0</td>
46
+            </tr>
47
+          </tbody>
48
+        </table>
49
+        """
50
+
51
+        resp = mock.Mock(text=html)
52
+        results = nyaa.response(resp)
53
+
54
+        self.assertEqual(type(results), list)
55
+        self.assertEqual(len(results), 1)
56
+
57
+        r = results[0]
58
+        self.assertTrue(r['url'].find('www.nyaa.se/?page3') >= 0)
59
+        self.assertTrue(r['torrentfile'].find('www.nyaa.se/?page_dl') >= 0)
60
+        self.assertTrue(r['content'].find('English-translated Anime') >= 0)
61
+        self.assertTrue(r['content'].find('Downloaded 666 times.') >= 0)
62
+
63
+        self.assertEqual(r['title'], 'Sample torrent title')
64
+        self.assertEqual(r['seed'], 1)
65
+        self.assertEqual(r['leech'], 3)
66
+        self.assertEqual(r['filesize'], 10 * 1024 * 1024)

+ 67
- 0
tests/unit/engines/test_reddit.py Visa fil

@@ -0,0 +1,67 @@
1
+from collections import defaultdict
2
+import mock
3
+from searx.engines import reddit
4
+from searx.testing import SearxTestCase
5
+from datetime import datetime
6
+
7
+
8
+class TestRedditEngine(SearxTestCase):
9
+
10
+    def test_request(self):
11
+        query = 'test_query'
12
+        dic = defaultdict(dict)
13
+        params = reddit.request(query, dic)
14
+        self.assertTrue('url' in params)
15
+        self.assertTrue(query in params['url'])
16
+        self.assertTrue('reddit.com' in params['url'])
17
+
18
+    def test_response(self):
19
+        resp = mock.Mock(text='{}')
20
+        self.assertEqual(reddit.response(resp), [])
21
+
22
+        json = """
23
+        {
24
+            "kind": "Listing",
25
+            "data": {
26
+                "children": [{
27
+                    "data": {
28
+                        "url": "http://google.com/",
29
+                        "title": "Title number one",
30
+                        "selftext": "Sample",
31
+                        "created_utc": 1401219957.0,
32
+                        "thumbnail": "http://image.com/picture.jpg"
33
+                    }
34
+                }, {
35
+                    "data": {
36
+                        "url": "https://reddit.com/",
37
+                        "title": "Title number two",
38
+                        "selftext": "Dominus vobiscum",
39
+                        "created_utc": 1438792533.0,
40
+                        "thumbnail": "self"
41
+                    }
42
+                }]
43
+            }
44
+        }
45
+        """
46
+
47
+        resp = mock.Mock(text=json)
48
+        results = reddit.response(resp)
49
+
50
+        self.assertEqual(len(results), 2)
51
+        self.assertEqual(type(results), list)
52
+
53
+        # testing first result (picture)
54
+        r = results[0]
55
+        self.assertEqual(r['url'], 'http://google.com/')
56
+        self.assertEqual(r['title'], 'Title number one')
57
+        self.assertEqual(r['template'], 'images.html')
58
+        self.assertEqual(r['thumbnail_src'], 'http://image.com/picture.jpg')
59
+
60
+        # testing second result (self-post)
61
+        r = results[1]
62
+        self.assertEqual(r['url'], 'https://reddit.com/')
63
+        self.assertEqual(r['title'], 'Title number two')
64
+        self.assertEqual(r['content'], 'Dominus vobiscum')
65
+        created = datetime.fromtimestamp(1438792533.0)
66
+        self.assertEqual(r['publishedDate'], created)
67
+        self.assertTrue('thumbnail_src' not in r)

+ 110
- 0
tests/unit/engines/test_tokyotoshokan.py Visa fil

@@ -0,0 +1,110 @@
1
+import mock
2
+from collections import defaultdict
3
+from searx.engines import tokyotoshokan
4
+from searx.testing import SearxTestCase
5
+from datetime import datetime
6
+
7
+
8
+class TestTokyotoshokanEngine(SearxTestCase):
9
+
10
+    def test_request(self):
11
+        query = 'test_query'
12
+        dic = defaultdict(dict)
13
+        dic['pageno'] = 1
14
+        params = tokyotoshokan.request(query, dic)
15
+        self.assertTrue('url' in params)
16
+        self.assertTrue(query in params['url'])
17
+        self.assertTrue('tokyotosho.info' in params['url'])
18
+
19
+    def test_response(self):
20
+        resp = mock.Mock(text='<html></html>')
21
+        self.assertEqual(tokyotoshokan.response(resp), [])
22
+
23
+        html = """
24
+        <table class="listing">
25
+          <tbody>
26
+            <tr class="shade category_0">
27
+              <td rowspan="2">
28
+                <a href="/?cat=7"><span class="sprite_cat-raw"></span></a>
29
+              </td>
30
+              <td class="desc-top">
31
+                <a href="magnet:?xt=urn:btih:4c19eb46b5113685fbd2288ed2531b0b">
32
+                  <span class="sprite_magnet"></span>
33
+                </a>
34
+                <a rel="nofollow" type="application/x-bittorrent" href="http://www.nyaa.se/f">
35
+                  Koyomimonogatari
36
+                </a>
37
+              </td>
38
+              <td class="web"><a rel="nofollow" href="details.php?id=975700">Details</a></td>
39
+            </tr>
40
+            <tr class="shade category_0">
41
+              <td class="desc-bot">
42
+                Authorized: <span class="auth_ok">Yes</span>
43
+                Submitter: <a href="?username=Ohys">Ohys</a> |
44
+                Size: 10.5MB |
45
+                Date: 2016-03-26 16:41 UTC |
46
+                Comment: sample comment
47
+              </td>
48
+              <td style="color: #BBB; font-family: monospace" class="stats" align="right">
49
+                S: <span style="color: red">53</span>
50
+                L: <span style="color: red">18</span>
51
+                C: <span style="color: red">0</span>
52
+                ID: 975700
53
+              </td>
54
+            </tr>
55
+
56
+            <tr class="category_0">
57
+              <td rowspan="2">
58
+                <a href="/?cat=7"><span class="sprite_cat-raw"></span></a>
59
+              </td>
60
+              <td class="desc-top">
61
+                <a rel="nofollow" type="application/x-bittorrent" href="http://google.com/q">
62
+                  Owarimonogatari
63
+                </a>
64
+              </td>
65
+              <td class="web"><a rel="nofollow" href="details.php?id=975700">Details</a></td>
66
+            </tr>
67
+            <tr class="category_0">
68
+              <td class="desc-bot">
69
+                Submitter: <a href="?username=Ohys">Ohys</a> |
70
+                Size: 932.84EB |
71
+                Date: QWERTY-03-26 16:41 UTC
72
+              </td>
73
+              <td style="color: #BBB; font-family: monospace" class="stats" align="right">
74
+                S: <span style="color: red">0</span>
75
+              </td>
76
+            </tr>
77
+          </tbody>
78
+        </table>
79
+        """
80
+
81
+        resp = mock.Mock(text=html)
82
+        results = tokyotoshokan.response(resp)
83
+
84
+        self.assertEqual(type(results), list)
85
+        self.assertEqual(len(results), 2)
86
+
87
+        # testing the first result, which has correct format
88
+        # and should have all information fields filled
89
+        r = results[0]
90
+        self.assertEqual(r['url'], 'http://www.nyaa.se/f')
91
+        self.assertEqual(r['title'], 'Koyomimonogatari')
92
+        self.assertEqual(r['magnetlink'], 'magnet:?xt=urn:btih:4c19eb46b5113685fbd2288ed2531b0b')
93
+        self.assertEqual(r['filesize'], int(1024 * 1024 * 10.5))
94
+        self.assertEqual(r['publishedDate'], datetime(2016, 03, 26, 16, 41))
95
+        self.assertEqual(r['content'], 'Comment: sample comment')
96
+        self.assertEqual(r['seed'], 53)
97
+        self.assertEqual(r['leech'], 18)
98
+
99
+        # testing the second result, which does not include magnet link,
100
+        # seed & leech info, and has incorrect size & creation date
101
+        r = results[1]
102
+        self.assertEqual(r['url'], 'http://google.com/q')
103
+        self.assertEqual(r['title'], 'Owarimonogatari')
104
+
105
+        self.assertFalse('magnetlink' in r)
106
+        self.assertFalse('filesize' in r)
107
+        self.assertFalse('content' in r)
108
+        self.assertFalse('publishedDate' in r)
109
+        self.assertFalse('seed' in r)
110
+        self.assertFalse('leech' in r)

+ 91
- 0
tests/unit/engines/test_torrentz.py Visa fil

@@ -0,0 +1,91 @@
1
+import mock
2
+from collections import defaultdict
3
+from searx.engines import torrentz
4
+from searx.testing import SearxTestCase
5
+from datetime import datetime
6
+
7
+
8
+class TestTorrentzEngine(SearxTestCase):
9
+
10
+    def test_request(self):
11
+        query = 'test_query'
12
+        dic = defaultdict(dict)
13
+        dic['pageno'] = 1
14
+        params = torrentz.request(query, dic)
15
+        self.assertTrue('url' in params)
16
+        self.assertTrue(query in params['url'])
17
+        self.assertTrue('torrentz.eu' in params['url'])
18
+
19
+    def test_response(self):
20
+        resp = mock.Mock(text='<html></html>')
21
+        self.assertEqual(torrentz.response(resp), [])
22
+
23
+        html = """
24
+        <div class="results">
25
+          <dl>
26
+            <dt>
27
+              <a href="/4362e08b1d80e1820fb2550b752f9f3126fe76d6">
28
+                Completely valid info
29
+              </a>
30
+              books ebooks
31
+            </dt>
32
+            <dd>
33
+              <span class="v">1</span>
34
+              <span class="a">
35
+                <span title="Sun, 22 Nov 2015 03:01:42">4 months</span>
36
+              </span>
37
+              <span class="s">30 MB</span>
38
+              <span class="u">14</span>
39
+              <span class="d">1</span>
40
+            </dd>
41
+          </dl>
42
+
43
+          <dl>
44
+            <dt>
45
+              <a href="/poaskdpokaspod">
46
+                Invalid hash and date and filesize
47
+              </a>
48
+              books ebooks
49
+            </dt>
50
+            <dd>
51
+              <span class="v">1</span>
52
+              <span class="a">
53
+                <span title="Sun, 2124091j0j190gm42">4 months</span>
54
+              </span>
55
+              <span class="s">30MB</span>
56
+              <span class="u">5,555</span>
57
+              <span class="d">1,234,567</span>
58
+            </dd>
59
+          </dl>
60
+        </div>
61
+        """
62
+
63
+        resp = mock.Mock(text=html)
64
+        results = torrentz.response(resp)
65
+
66
+        self.assertEqual(type(results), list)
67
+        self.assertEqual(len(results), 2)
68
+
69
+        # testing against the first result
70
+        r = results[0]
71
+        self.assertEqual(r['url'], 'https://torrentz.eu/4362e08b1d80e1820fb2550b752f9f3126fe76d6')
72
+        self.assertEqual(r['title'], 'Completely valid info books ebooks')
73
+        # 22 Nov 2015 03:01:42
74
+        self.assertEqual(r['publishedDate'], datetime(2015, 11, 22, 3, 1, 42))
75
+        self.assertEqual(r['seed'], 14)
76
+        self.assertEqual(r['leech'], 1)
77
+        self.assertEqual(r['filesize'], 30 * 1024 * 1024)
78
+        self.assertEqual(r['magnetlink'], 'magnet:?xt=urn:btih:4362e08b1d80e1820fb2550b752f9f3126fe76d6')
79
+
80
+        # testing against the second result
81
+        r = results[1]
82
+        self.assertEqual(r['url'], 'https://torrentz.eu/poaskdpokaspod')
83
+        self.assertEqual(r['title'], 'Invalid hash and date and filesize books ebooks')
84
+        self.assertEqual(r['seed'], 5555)
85
+        self.assertEqual(r['leech'], 1234567)
86
+
87
+        # in the second result we have invalid hash, creation date & torrent size,
88
+        # so these tests should fail
89
+        self.assertFalse('magnetlink' in r)
90
+        self.assertFalse('filesize' in r)
91
+        self.assertFalse('publishedDate' in r)