wikidata.py 17KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484
  1. # -*- coding: utf-8 -*-
  2. """
  3. Wikidata
  4. @website https://wikidata.org
  5. @provide-api yes (https://wikidata.org/w/api.php)
  6. @using-api partially (most things require scraping)
  7. @results JSON, HTML
  8. @stable no (html can change)
  9. @parse url, infobox
  10. """
  11. from searx import logger
  12. from searx.poolrequests import get
  13. from searx.engines.xpath import extract_text
  14. from searx.engines.wikipedia import _fetch_supported_languages, supported_languages_url
  15. from searx.url_utils import urlencode
  16. from searx.utils import match_language
  17. from json import loads
  18. from lxml.html import fromstring
  19. logger = logger.getChild('wikidata')
  20. result_count = 1
  21. # urls
  22. wikidata_host = 'https://www.wikidata.org'
  23. url_search = wikidata_host \
  24. + '/w/index.php?{query}'
  25. wikidata_api = wikidata_host + '/w/api.php'
  26. url_detail = wikidata_api\
  27. + '?action=parse&format=json&{query}'\
  28. + '&redirects=1&prop=text%7Cdisplaytitle%7Clanglinks%7Crevid'\
  29. + '&disableeditsection=1&disabletidy=1&preview=1&sectionpreview=1&disabletoc=1&utf8=1&formatversion=2'
  30. url_map = 'https://www.openstreetmap.org/'\
  31. + '?lat={latitude}&lon={longitude}&zoom={zoom}&layers=M'
  32. url_image = 'https://commons.wikimedia.org/wiki/Special:FilePath/{filename}?width=500&height=400'
  33. # xpaths
  34. wikidata_ids_xpath = '//ul[@class="mw-search-results"]/li//a/@href'
  35. title_xpath = '//*[contains(@class,"wikibase-title-label")]'
  36. description_xpath = '//div[contains(@class,"wikibase-entitytermsview-heading-description")]'
  37. property_xpath = '//div[@id="{propertyid}"]'
  38. label_xpath = './/div[contains(@class,"wikibase-statementgroupview-property-label")]/a'
  39. url_xpath = './/a[contains(@class,"external free") or contains(@class, "wb-external-id")]'
  40. wikilink_xpath = './/ul[contains(@class,"wikibase-sitelinklistview-listview")]'\
  41. + '/li[contains(@data-wb-siteid,"{wikiid}")]//a/@href'
  42. property_row_xpath = './/div[contains(@class,"wikibase-statementview")]'
  43. preferred_rank_xpath = './/span[contains(@class,"wikibase-rankselector-preferred")]'
  44. value_xpath = './/div[contains(@class,"wikibase-statementview-mainsnak")]'\
  45. + '/*/div[contains(@class,"wikibase-snakview-value")]'
  46. language_fallback_xpath = '//sup[contains(@class,"wb-language-fallback-indicator")]'
  47. calendar_name_xpath = './/sup[contains(@class,"wb-calendar-name")]'
  48. def request(query, params):
  49. params['url'] = url_search.format(
  50. query=urlencode({'search': query}))
  51. return params
  52. def response(resp):
  53. results = []
  54. html = fromstring(resp.text)
  55. search_results = html.xpath(wikidata_ids_xpath)
  56. language = match_language(resp.search_params['language'], supported_languages).split('-')[0]
  57. # TODO: make requests asynchronous to avoid timeout when result_count > 1
  58. for search_result in search_results[:result_count]:
  59. wikidata_id = search_result.split('/')[-1]
  60. url = url_detail.format(query=urlencode({'page': wikidata_id, 'uselang': language}))
  61. htmlresponse = get(url)
  62. jsonresponse = loads(htmlresponse.text)
  63. results += getDetail(jsonresponse, wikidata_id, language, resp.search_params['language'])
  64. return results
  65. def getDetail(jsonresponse, wikidata_id, language, locale):
  66. results = []
  67. urls = []
  68. attributes = []
  69. title = jsonresponse.get('parse', {}).get('displaytitle', {})
  70. result = jsonresponse.get('parse', {}).get('text', {})
  71. if not title or not result:
  72. return results
  73. title = fromstring(title)
  74. for elem in title.xpath(language_fallback_xpath):
  75. elem.getparent().remove(elem)
  76. title = extract_text(title.xpath(title_xpath))
  77. result = fromstring(result)
  78. for elem in result.xpath(language_fallback_xpath):
  79. elem.getparent().remove(elem)
  80. description = extract_text(result.xpath(description_xpath))
  81. # URLS
  82. # official website
  83. add_url(urls, result, 'P856', results=results)
  84. # wikipedia
  85. wikipedia_link_count = 0
  86. wikipedia_link = get_wikilink(result, language + 'wiki')
  87. if wikipedia_link:
  88. wikipedia_link_count += 1
  89. urls.append({'title': 'Wikipedia (' + language + ')',
  90. 'url': wikipedia_link})
  91. if language != 'en':
  92. wikipedia_en_link = get_wikilink(result, 'enwiki')
  93. if wikipedia_en_link:
  94. wikipedia_link_count += 1
  95. urls.append({'title': 'Wikipedia (en)',
  96. 'url': wikipedia_en_link})
  97. # TODO: get_wiki_firstlanguage
  98. # if wikipedia_link_count == 0:
  99. # more wikis
  100. add_url(urls, result, default_label='Wikivoyage (' + language + ')', link_type=language + 'wikivoyage')
  101. add_url(urls, result, default_label='Wikiquote (' + language + ')', link_type=language + 'wikiquote')
  102. add_url(urls, result, default_label='Wikimedia Commons', link_type='commonswiki')
  103. add_url(urls, result, 'P625', 'OpenStreetMap', link_type='geo')
  104. # musicbrainz
  105. add_url(urls, result, 'P434', 'MusicBrainz', 'http://musicbrainz.org/artist/')
  106. add_url(urls, result, 'P435', 'MusicBrainz', 'http://musicbrainz.org/work/')
  107. add_url(urls, result, 'P436', 'MusicBrainz', 'http://musicbrainz.org/release-group/')
  108. add_url(urls, result, 'P966', 'MusicBrainz', 'http://musicbrainz.org/label/')
  109. # IMDb
  110. add_url(urls, result, 'P345', 'IMDb', 'https://www.imdb.com/', link_type='imdb')
  111. # source code repository
  112. add_url(urls, result, 'P1324')
  113. # blog
  114. add_url(urls, result, 'P1581')
  115. # social media links
  116. add_url(urls, result, 'P2397', 'YouTube', 'https://www.youtube.com/channel/')
  117. add_url(urls, result, 'P1651', 'YouTube', 'https://www.youtube.com/watch?v=')
  118. add_url(urls, result, 'P2002', 'Twitter', 'https://twitter.com/')
  119. add_url(urls, result, 'P2013', 'Facebook', 'https://facebook.com/')
  120. add_url(urls, result, 'P2003', 'Instagram', 'https://instagram.com/')
  121. urls.append({'title': 'Wikidata',
  122. 'url': 'https://www.wikidata.org/wiki/'
  123. + wikidata_id + '?uselang=' + language})
  124. # INFOBOX ATTRIBUTES (ROWS)
  125. # DATES
  126. # inception date
  127. add_attribute(attributes, result, 'P571', date=True)
  128. # dissolution date
  129. add_attribute(attributes, result, 'P576', date=True)
  130. # start date
  131. add_attribute(attributes, result, 'P580', date=True)
  132. # end date
  133. add_attribute(attributes, result, 'P582', date=True)
  134. # date of birth
  135. add_attribute(attributes, result, 'P569', date=True)
  136. # date of death
  137. add_attribute(attributes, result, 'P570', date=True)
  138. # date of spacecraft launch
  139. add_attribute(attributes, result, 'P619', date=True)
  140. # date of spacecraft landing
  141. add_attribute(attributes, result, 'P620', date=True)
  142. # nationality
  143. add_attribute(attributes, result, 'P27')
  144. # country of origin
  145. add_attribute(attributes, result, 'P495')
  146. # country
  147. add_attribute(attributes, result, 'P17')
  148. # headquarters
  149. add_attribute(attributes, result, 'Q180')
  150. # PLACES
  151. # capital
  152. add_attribute(attributes, result, 'P36', trim=True)
  153. # head of state
  154. add_attribute(attributes, result, 'P35', trim=True)
  155. # head of government
  156. add_attribute(attributes, result, 'P6', trim=True)
  157. # type of government
  158. add_attribute(attributes, result, 'P122')
  159. # official language
  160. add_attribute(attributes, result, 'P37')
  161. # population
  162. add_attribute(attributes, result, 'P1082', trim=True)
  163. # area
  164. add_attribute(attributes, result, 'P2046')
  165. # currency
  166. add_attribute(attributes, result, 'P38', trim=True)
  167. # heigth (building)
  168. add_attribute(attributes, result, 'P2048')
  169. # MEDIA
  170. # platform (videogames)
  171. add_attribute(attributes, result, 'P400')
  172. # author
  173. add_attribute(attributes, result, 'P50')
  174. # creator
  175. add_attribute(attributes, result, 'P170')
  176. # director
  177. add_attribute(attributes, result, 'P57')
  178. # performer
  179. add_attribute(attributes, result, 'P175')
  180. # developer
  181. add_attribute(attributes, result, 'P178')
  182. # producer
  183. add_attribute(attributes, result, 'P162')
  184. # manufacturer
  185. add_attribute(attributes, result, 'P176')
  186. # screenwriter
  187. add_attribute(attributes, result, 'P58')
  188. # production company
  189. add_attribute(attributes, result, 'P272')
  190. # record label
  191. add_attribute(attributes, result, 'P264')
  192. # publisher
  193. add_attribute(attributes, result, 'P123')
  194. # original network
  195. add_attribute(attributes, result, 'P449')
  196. # distributor
  197. add_attribute(attributes, result, 'P750')
  198. # composer
  199. add_attribute(attributes, result, 'P86')
  200. # publication date
  201. add_attribute(attributes, result, 'P577', date=True)
  202. # genre
  203. add_attribute(attributes, result, 'P136')
  204. # original language
  205. add_attribute(attributes, result, 'P364')
  206. # isbn
  207. add_attribute(attributes, result, 'Q33057')
  208. # software license
  209. add_attribute(attributes, result, 'P275')
  210. # programming language
  211. add_attribute(attributes, result, 'P277')
  212. # version
  213. add_attribute(attributes, result, 'P348', trim=True)
  214. # narrative location
  215. add_attribute(attributes, result, 'P840')
  216. # LANGUAGES
  217. # number of speakers
  218. add_attribute(attributes, result, 'P1098')
  219. # writing system
  220. add_attribute(attributes, result, 'P282')
  221. # regulatory body
  222. add_attribute(attributes, result, 'P1018')
  223. # language code
  224. add_attribute(attributes, result, 'P218')
  225. # OTHER
  226. # ceo
  227. add_attribute(attributes, result, 'P169', trim=True)
  228. # founder
  229. add_attribute(attributes, result, 'P112')
  230. # legal form (company/organization)
  231. add_attribute(attributes, result, 'P1454')
  232. # operator
  233. add_attribute(attributes, result, 'P137')
  234. # crew members (tripulation)
  235. add_attribute(attributes, result, 'P1029')
  236. # taxon
  237. add_attribute(attributes, result, 'P225')
  238. # chemical formula
  239. add_attribute(attributes, result, 'P274')
  240. # winner (sports/contests)
  241. add_attribute(attributes, result, 'P1346')
  242. # number of deaths
  243. add_attribute(attributes, result, 'P1120')
  244. # currency code
  245. add_attribute(attributes, result, 'P498')
  246. image = add_image(result)
  247. if len(attributes) == 0 and len(urls) == 2 and len(description) == 0:
  248. results.append({
  249. 'url': urls[0]['url'],
  250. 'title': title,
  251. 'content': description
  252. })
  253. else:
  254. results.append({
  255. 'infobox': title,
  256. 'id': wikipedia_link,
  257. 'content': description,
  258. 'img_src': image,
  259. 'attributes': attributes,
  260. 'urls': urls
  261. })
  262. return results
  263. # only returns first match
  264. def add_image(result):
  265. # P15: route map, P242: locator map, P154: logo, P18: image, P242: map, P41: flag, P2716: collage, P2910: icon
  266. property_ids = ['P15', 'P242', 'P154', 'P18', 'P242', 'P41', 'P2716', 'P2910']
  267. for property_id in property_ids:
  268. image = result.xpath(property_xpath.replace('{propertyid}', property_id))
  269. if image:
  270. image_name = image[0].xpath(value_xpath)
  271. image_src = url_image.replace('{filename}', extract_text(image_name[0]))
  272. return image_src
  273. # setting trim will only returned high ranked rows OR the first row
  274. def add_attribute(attributes, result, property_id, default_label=None, date=False, trim=False):
  275. attribute = result.xpath(property_xpath.replace('{propertyid}', property_id))
  276. if attribute:
  277. if default_label:
  278. label = default_label
  279. else:
  280. label = extract_text(attribute[0].xpath(label_xpath))
  281. label = label[0].upper() + label[1:]
  282. if date:
  283. trim = True
  284. # remove calendar name
  285. calendar_name = attribute[0].xpath(calendar_name_xpath)
  286. for calendar in calendar_name:
  287. calendar.getparent().remove(calendar)
  288. concat_values = ""
  289. values = []
  290. first_value = None
  291. for row in attribute[0].xpath(property_row_xpath):
  292. if not first_value or not trim or row.xpath(preferred_rank_xpath):
  293. value = row.xpath(value_xpath)
  294. if not value:
  295. continue
  296. value = extract_text(value)
  297. # save first value in case no ranked row is found
  298. if trim and not first_value:
  299. first_value = value
  300. else:
  301. # to avoid duplicate values
  302. if value not in values:
  303. concat_values += value + ", "
  304. values.append(value)
  305. if trim and not values:
  306. attributes.append({'label': label,
  307. 'value': first_value})
  308. else:
  309. attributes.append({'label': label,
  310. 'value': concat_values[:-2]})
  311. # requires property_id unless it's a wiki link (defined in link_type)
  312. def add_url(urls, result, property_id=None, default_label=None, url_prefix=None, results=None, link_type=None):
  313. links = []
  314. # wiki links don't have property in wikidata page
  315. if link_type and 'wiki' in link_type:
  316. links.append(get_wikilink(result, link_type))
  317. else:
  318. dom_element = result.xpath(property_xpath.replace('{propertyid}', property_id))
  319. if dom_element:
  320. dom_element = dom_element[0]
  321. if not default_label:
  322. label = extract_text(dom_element.xpath(label_xpath))
  323. label = label[0].upper() + label[1:]
  324. if link_type == 'geo':
  325. links.append(get_geolink(dom_element))
  326. elif link_type == 'imdb':
  327. links.append(get_imdblink(dom_element, url_prefix))
  328. else:
  329. url_results = dom_element.xpath(url_xpath)
  330. for link in url_results:
  331. if link is not None:
  332. if url_prefix:
  333. link = url_prefix + extract_text(link)
  334. else:
  335. link = extract_text(link)
  336. links.append(link)
  337. # append urls
  338. for url in links:
  339. if url is not None:
  340. urls.append({'title': default_label or label,
  341. 'url': url})
  342. if results is not None:
  343. results.append({'title': default_label or label,
  344. 'url': url})
  345. def get_imdblink(result, url_prefix):
  346. imdb_id = result.xpath(value_xpath)
  347. if imdb_id:
  348. imdb_id = extract_text(imdb_id)
  349. id_prefix = imdb_id[:2]
  350. if id_prefix == 'tt':
  351. url = url_prefix + 'title/' + imdb_id
  352. elif id_prefix == 'nm':
  353. url = url_prefix + 'name/' + imdb_id
  354. elif id_prefix == 'ch':
  355. url = url_prefix + 'character/' + imdb_id
  356. elif id_prefix == 'co':
  357. url = url_prefix + 'company/' + imdb_id
  358. elif id_prefix == 'ev':
  359. url = url_prefix + 'event/' + imdb_id
  360. else:
  361. url = None
  362. return url
  363. def get_geolink(result):
  364. coordinates = result.xpath(value_xpath)
  365. if not coordinates:
  366. return None
  367. coordinates = extract_text(coordinates[0])
  368. latitude, longitude = coordinates.split(',')
  369. # convert to decimal
  370. lat = int(latitude[:latitude.find(u'°')])
  371. if latitude.find('\'') >= 0:
  372. lat += int(latitude[latitude.find(u'°') + 1:latitude.find('\'')] or 0) / 60.0
  373. if latitude.find('"') >= 0:
  374. lat += float(latitude[latitude.find('\'') + 1:latitude.find('"')] or 0) / 3600.0
  375. if latitude.find('S') >= 0:
  376. lat *= -1
  377. lon = int(longitude[:longitude.find(u'°')])
  378. if longitude.find('\'') >= 0:
  379. lon += int(longitude[longitude.find(u'°') + 1:longitude.find('\'')] or 0) / 60.0
  380. if longitude.find('"') >= 0:
  381. lon += float(longitude[longitude.find('\'') + 1:longitude.find('"')] or 0) / 3600.0
  382. if longitude.find('W') >= 0:
  383. lon *= -1
  384. # TODO: get precision
  385. precision = 0.0002
  386. # there is no zoom information, deduce from precision (error prone)
  387. # samples :
  388. # 13 --> 5
  389. # 1 --> 6
  390. # 0.016666666666667 --> 9
  391. # 0.00027777777777778 --> 19
  392. # wolframalpha :
  393. # quadratic fit { {13, 5}, {1, 6}, {0.0166666, 9}, {0.0002777777,19}}
  394. # 14.1186-8.8322 x+0.625447 x^2
  395. if precision < 0.0003:
  396. zoom = 19
  397. else:
  398. zoom = int(15 - precision * 8.8322 + precision * precision * 0.625447)
  399. url = url_map\
  400. .replace('{latitude}', str(lat))\
  401. .replace('{longitude}', str(lon))\
  402. .replace('{zoom}', str(zoom))
  403. return url
  404. def get_wikilink(result, wikiid):
  405. url = result.xpath(wikilink_xpath.replace('{wikiid}', wikiid))
  406. if not url:
  407. return None
  408. url = url[0]
  409. if url.startswith('http://'):
  410. url = url.replace('http://', 'https://')
  411. elif url.startswith('//'):
  412. url = 'https:' + url
  413. return url