deviantart.py 1.8KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667
  1. ## Deviantart (Images)
  2. #
  3. # @website https://www.deviantart.com/
  4. # @provide-api yes (https://www.deviantart.com/developers/) (RSS)
  5. #
  6. # @using-api no (TODO, rewrite to api)
  7. # @results HTML
  8. # @stable no (HTML can change)
  9. # @parse url, title, thumbnail_src, img_src
  10. #
  11. # @todo rewrite to api
  12. from urllib import urlencode
  13. from urlparse import urljoin
  14. from lxml import html
  15. import re
  16. from searx.engines.xpath import extract_text
  17. # engine dependent config
  18. categories = ['images']
  19. paging = True
  20. # search-url
  21. base_url = 'https://www.deviantart.com/'
  22. search_url = base_url+'search?offset={offset}&{query}'
  23. # do search-request
  24. def request(query, params):
  25. offset = (params['pageno'] - 1) * 24
  26. params['url'] = search_url.format(offset=offset,
  27. query=urlencode({'q': query}))
  28. return params
  29. # get response from search-request
  30. def response(resp):
  31. results = []
  32. # return empty array if a redirection code is returned
  33. if resp.status_code == 302:
  34. return []
  35. dom = html.fromstring(resp.text)
  36. regex = re.compile('\/200H\/')
  37. # parse results
  38. for result in dom.xpath('//div[contains(@class, "tt-a tt-fh")]'):
  39. link = result.xpath('.//a[contains(@class, "thumb")]')[0]
  40. url = urljoin(base_url, link.attrib.get('href'))
  41. title_links = result.xpath('.//span[@class="details"]//a[contains(@class, "t")]')
  42. title = extract_text(title_links[0])
  43. thumbnail_src = link.xpath('.//img')[0].attrib.get('src')
  44. img_src = regex.sub('/', thumbnail_src)
  45. # append result
  46. results.append({'url': url,
  47. 'title': title,
  48. 'img_src': img_src,
  49. 'thumbnail_src': thumbnail_src,
  50. 'template': 'images.html'})
  51. # return results
  52. return results