|
@@ -1,43 +1,58 @@
|
|
1
|
+## Vimeo (Videos)
|
|
2
|
+#
|
|
3
|
+# @website https://vimeo.com/
|
|
4
|
+# @provide-api yes (http://developer.vimeo.com/api), they have a maximum count of queries/hour
|
|
5
|
+#
|
|
6
|
+# @using-api no (TODO, rewrite to api)
|
|
7
|
+# @results HTML (using search portal)
|
|
8
|
+# @stable no (HTML can change)
|
|
9
|
+# @parse url, title, publishedDate, thumbnail
|
|
10
|
+#
|
|
11
|
+# @todo rewrite to api
|
|
12
|
+# @todo set content-parameter with correct data
|
|
13
|
+
|
1
|
14
|
from urllib import urlencode
|
2
|
15
|
from HTMLParser import HTMLParser
|
3
|
16
|
from lxml import html
|
4
|
17
|
from searx.engines.xpath import extract_text
|
5
|
18
|
from dateutil import parser
|
6
|
19
|
|
7
|
|
-base_url = 'http://vimeo.com'
|
8
|
|
-search_url = base_url + '/search?{query}'
|
9
|
|
-url_xpath = None
|
10
|
|
-content_xpath = None
|
11
|
|
-title_xpath = None
|
12
|
|
-results_xpath = ''
|
13
|
|
-content_tpl = '<a href="{0}"> <img src="{2}"/> </a>'
|
14
|
|
-publishedDate_xpath = './/p[@class="meta"]//attribute::datetime'
|
|
20
|
+# engine dependent config
|
|
21
|
+categories = ['videos']
|
|
22
|
+paging = True
|
15
|
23
|
|
16
|
|
-# the cookie set by vimeo contains all the following values,
|
17
|
|
-# but only __utma seems to be requiered
|
18
|
|
-cookie = {
|
19
|
|
- #'vuid':'918282893.1027205400'
|
20
|
|
- # 'ab_bs':'%7B%223%22%3A279%7D'
|
21
|
|
- '__utma': '00000000.000#0000000.0000000000.0000000000.0000000000.0'
|
22
|
|
- # '__utmb':'18302654.1.10.1388942090'
|
23
|
|
- #, '__utmc':'18302654'
|
24
|
|
- #, '__utmz':'18#302654.1388942090.1.1.utmcsr=(direct)|utmccn=(direct)|utmcmd=(none)' # noqa
|
25
|
|
- #, '__utml':'search'
|
26
|
|
-}
|
|
24
|
+# search-url
|
|
25
|
+base_url = 'https://vimeo.com'
|
|
26
|
+search_url = base_url + '/search/page:{pageno}?{query}'
|
|
27
|
+
|
|
28
|
+# specific xpath variables
|
|
29
|
+url_xpath = './a/@href'
|
|
30
|
+content_xpath = './a/img/@src'
|
|
31
|
+title_xpath = './a/div[@class="data"]/p[@class="title"]/text()'
|
|
32
|
+results_xpath = '//div[@id="browse_content"]/ol/li'
|
|
33
|
+publishedDate_xpath = './/p[@class="meta"]//attribute::datetime'
|
27
|
34
|
|
28
|
35
|
|
|
36
|
+# do search-request
|
29
|
37
|
def request(query, params):
|
30
|
|
- params['url'] = search_url.format(query=urlencode({'q': query}))
|
31
|
|
- params['cookies'] = cookie
|
|
38
|
+ params['url'] = search_url.format(pageno=params['pageno'] ,
|
|
39
|
+ query=urlencode({'q': query}))
|
|
40
|
+
|
|
41
|
+ # TODO required?
|
|
42
|
+ params['cookies']['__utma'] = '00000000.000#0000000.0000000000.0000000000.0000000000.0'
|
|
43
|
+
|
32
|
44
|
return params
|
33
|
45
|
|
34
|
46
|
|
|
47
|
+# get response from search-request
|
35
|
48
|
def response(resp):
|
36
|
49
|
results = []
|
|
50
|
+
|
37
|
51
|
dom = html.fromstring(resp.text)
|
38
|
52
|
|
39
|
53
|
p = HTMLParser()
|
40
|
54
|
|
|
55
|
+ # parse results
|
41
|
56
|
for result in dom.xpath(results_xpath):
|
42
|
57
|
url = base_url + result.xpath(url_xpath)[0]
|
43
|
58
|
title = p.unescape(extract_text(result.xpath(title_xpath)))
|
|
@@ -45,10 +60,13 @@ def response(resp):
|
45
|
60
|
publishedDate = parser.parse(extract_text(
|
46
|
61
|
result.xpath(publishedDate_xpath)[0]))
|
47
|
62
|
|
|
63
|
+ # append result
|
48
|
64
|
results.append({'url': url,
|
49
|
65
|
'title': title,
|
50
|
|
- 'content': content_tpl.format(url, title, thumbnail),
|
|
66
|
+ 'content': '',
|
51
|
67
|
'template': 'videos.html',
|
52
|
68
|
'publishedDate': publishedDate,
|
53
|
69
|
'thumbnail': thumbnail})
|
|
70
|
+
|
|
71
|
+ # return results
|
54
|
72
|
return results
|