浏览代码

[fix] pep8 : duckduckgo_definitions and wikidata engines

dalf 10 年前
父节点
当前提交
ffcec383b7
共有 2 个文件被更改,包括 168 次插入90 次删除
  1. 46
    31
      searx/engines/duckduckgo_definitions.py
  2. 122
    59
      searx/engines/wikidata.py

+ 46
- 31
searx/engines/duckduckgo_definitions.py 查看文件

3
 from lxml import html
3
 from lxml import html
4
 from searx.engines.xpath import extract_text
4
 from searx.engines.xpath import extract_text
5
 
5
 
6
-url = 'https://api.duckduckgo.com/?{query}&format=json&pretty=0&no_redirect=1&d=1'
6
+url = 'https://api.duckduckgo.com/'\
7
+    + '?{query}&format=json&pretty=0&no_redirect=1&d=1'
8
+
7
 
9
 
8
 def result_to_text(url, text, htmlResult):
10
 def result_to_text(url, text, htmlResult):
9
     # TODO : remove result ending with "Meaning" or "Category"
11
     # TODO : remove result ending with "Meaning" or "Category"
10
     dom = html.fromstring(htmlResult)
12
     dom = html.fromstring(htmlResult)
11
     a = dom.xpath('//a')
13
     a = dom.xpath('//a')
12
-    if len(a)>=1:
14
+    if len(a) >= 1:
13
         return extract_text(a[0])
15
         return extract_text(a[0])
14
     else:
16
     else:
15
         return text
17
         return text
16
 
18
 
19
+
17
 def html_to_text(htmlFragment):
20
 def html_to_text(htmlFragment):
18
     dom = html.fromstring(htmlFragment)
21
     dom = html.fromstring(htmlFragment)
19
     return extract_text(dom)
22
     return extract_text(dom)
20
 
23
 
24
+
21
 def request(query, params):
25
 def request(query, params):
22
     # TODO add kl={locale}
26
     # TODO add kl={locale}
23
     params['url'] = url.format(query=urlencode({'q': query}))
27
     params['url'] = url.format(query=urlencode({'q': query}))
38
     # add answer if there is one
42
     # add answer if there is one
39
     answer = search_res.get('Answer', '')
43
     answer = search_res.get('Answer', '')
40
     if answer != '':
44
     if answer != '':
41
-        results.append({ 'answer' : html_to_text(answer) })
45
+        results.append({'answer': html_to_text(answer)})
42
 
46
 
43
     # add infobox
47
     # add infobox
44
     if 'Definition' in search_res:
48
     if 'Definition' in search_res:
45
-        content = content + search_res.get('Definition', '') 
49
+        content = content + search_res.get('Definition', '')
46
 
50
 
47
     if 'Abstract' in search_res:
51
     if 'Abstract' in search_res:
48
         content = content + search_res.get('Abstract', '')
52
         content = content + search_res.get('Abstract', '')
49
 
53
 
50
-
51
     # image
54
     # image
52
     image = search_res.get('Image', '')
55
     image = search_res.get('Image', '')
53
     image = None if image == '' else image
56
     image = None if image == '' else image
55
     # attributes
58
     # attributes
56
     if 'Infobox' in search_res:
59
     if 'Infobox' in search_res:
57
         infobox = search_res.get('Infobox', None)
60
         infobox = search_res.get('Infobox', None)
58
-        if  'content' in infobox:
61
+        if 'content' in infobox:
59
             for info in infobox.get('content'):
62
             for info in infobox.get('content'):
60
-                attributes.append({'label': info.get('label'), 'value': info.get('value')})
63
+                attributes.append({'label': info.get('label'),
64
+                                  'value': info.get('value')})
61
 
65
 
62
     # urls
66
     # urls
63
     for ddg_result in search_res.get('Results', []):
67
     for ddg_result in search_res.get('Results', []):
64
         if 'FirstURL' in ddg_result:
68
         if 'FirstURL' in ddg_result:
65
             firstURL = ddg_result.get('FirstURL', '')
69
             firstURL = ddg_result.get('FirstURL', '')
66
             text = ddg_result.get('Text', '')
70
             text = ddg_result.get('Text', '')
67
-            urls.append({'title':text, 'url':firstURL})
68
-            results.append({'title':heading, 'url': firstURL})
71
+            urls.append({'title': text, 'url': firstURL})
72
+            results.append({'title': heading, 'url': firstURL})
69
 
73
 
70
     # related topics
74
     # related topics
71
     for ddg_result in search_res.get('RelatedTopics', None):
75
     for ddg_result in search_res.get('RelatedTopics', None):
72
         if 'FirstURL' in ddg_result:
76
         if 'FirstURL' in ddg_result:
73
-            suggestion = result_to_text(ddg_result.get('FirstURL', None), ddg_result.get('Text', None), ddg_result.get('Result', None))
77
+            suggestion = result_to_text(ddg_result.get('FirstURL', None),
78
+                                        ddg_result.get('Text', None),
79
+                                        ddg_result.get('Result', None))
74
             if suggestion != heading:
80
             if suggestion != heading:
75
                 results.append({'suggestion': suggestion})
81
                 results.append({'suggestion': suggestion})
76
         elif 'Topics' in ddg_result:
82
         elif 'Topics' in ddg_result:
77
             suggestions = []
83
             suggestions = []
78
-            relatedTopics.append({ 'name' : ddg_result.get('Name', ''), 'suggestions': suggestions })
84
+            relatedTopics.append({'name': ddg_result.get('Name', ''),
85
+                                 'suggestions': suggestions})
79
             for topic_result in ddg_result.get('Topics', []):
86
             for topic_result in ddg_result.get('Topics', []):
80
-                suggestion = result_to_text(topic_result.get('FirstURL', None), topic_result.get('Text', None), topic_result.get('Result', None))
87
+                suggestion = result_to_text(topic_result.get('FirstURL', None),
88
+                                            topic_result.get('Text', None),
89
+                                            topic_result.get('Result', None))
81
                 if suggestion != heading:
90
                 if suggestion != heading:
82
                     suggestions.append(suggestion)
91
                     suggestions.append(suggestion)
83
 
92
 
86
     if abstractURL != '':
95
     if abstractURL != '':
87
         # add as result ? problem always in english
96
         # add as result ? problem always in english
88
         infobox_id = abstractURL
97
         infobox_id = abstractURL
89
-        urls.append({'title': search_res.get('AbstractSource'), 'url': abstractURL})
98
+        urls.append({'title': search_res.get('AbstractSource'),
99
+                    'url': abstractURL})
90
 
100
 
91
     # definition
101
     # definition
92
     definitionURL = search_res.get('DefinitionURL', '')
102
     definitionURL = search_res.get('DefinitionURL', '')
93
     if definitionURL != '':
103
     if definitionURL != '':
94
         # add as result ? as answer ? problem always in english
104
         # add as result ? as answer ? problem always in english
95
         infobox_id = definitionURL
105
         infobox_id = definitionURL
96
-        urls.append({'title': search_res.get('DefinitionSource'), 'url': definitionURL})
106
+        urls.append({'title': search_res.get('DefinitionSource'),
107
+                    'url': definitionURL})
97
 
108
 
98
     # entity
109
     # entity
99
     entity = search_res.get('Entity', None)
110
     entity = search_res.get('Entity', None)
100
-    # TODO continent / country / department / location / waterfall / mountain range : link to map search, get weather, near by locations
111
+    # TODO continent / country / department / location / waterfall /
112
+    #      mountain range :
113
+    #      link to map search, get weather, near by locations
101
     # TODO musician : link to music search
114
     # TODO musician : link to music search
102
     # TODO concert tour : ??
115
     # TODO concert tour : ??
103
-    # TODO film / actor / television  / media franchise : links to IMDB / rottentomatoes (or scrap result)
116
+    # TODO film / actor / television  / media franchise :
117
+    #      links to IMDB / rottentomatoes (or scrap result)
104
     # TODO music : link tu musicbrainz / last.fm
118
     # TODO music : link tu musicbrainz / last.fm
105
     # TODO book : ??
119
     # TODO book : ??
106
     # TODO artist / playwright : ??
120
     # TODO artist / playwright : ??
114
     # TODO programming language : ??
128
     # TODO programming language : ??
115
     # TODO file format : ??
129
     # TODO file format : ??
116
 
130
 
117
-    if len(heading)>0:
131
+    if len(heading) > 0:
118
         # TODO get infobox.meta.value where .label='article_title'
132
         # TODO get infobox.meta.value where .label='article_title'
119
-        if image==None and len(attributes)==0 and len(urls)==1 and len(relatedTopics)==0 and len(content)==0:
133
+        if image is None and len(attributes) == 0 and len(urls) == 1 and\
134
+           len(relatedTopics) == 0 and len(content) == 0:
120
             results.append({
135
             results.append({
121
-                    'url': urls[0]['url'],
122
-                    'title': heading,
123
-                    'content': content
124
-                    })
136
+                           'url': urls[0]['url'],
137
+                           'title': heading,
138
+                           'content': content
139
+                           })
125
         else:
140
         else:
126
             results.append({
141
             results.append({
127
-                    'infobox': heading,
128
-                    'id': infobox_id,
129
-                    'entity': entity,
130
-                    'content': content,
131
-                    'img_src' : image,
132
-                    'attributes': attributes,
133
-                    'urls': urls,
134
-                    'relatedTopics': relatedTopics
135
-                    })
142
+                           'infobox': heading,
143
+                           'id': infobox_id,
144
+                           'entity': entity,
145
+                           'content': content,
146
+                           'img_src': image,
147
+                           'attributes': attributes,
148
+                           'urls': urls,
149
+                           'relatedTopics': relatedTopics
150
+                           })
136
 
151
 
137
     return results
152
     return results

+ 122
- 59
searx/engines/wikidata.py 查看文件

2
 from requests import get
2
 from requests import get
3
 from urllib import urlencode
3
 from urllib import urlencode
4
 
4
 
5
-resultCount=1
6
-urlSearch = 'https://www.wikidata.org/w/api.php?action=query&list=search&format=json&srnamespace=0&srprop=sectiontitle&{query}'
7
-urlDetail = 'https://www.wikidata.org/w/api.php?action=wbgetentities&format=json&props=labels%7Cinfo%7Csitelinks%7Csitelinks%2Furls%7Cdescriptions%7Cclaims&{query}'
8
-urlMap = 'https://www.openstreetmap.org/?lat={latitude}&lon={longitude}&zoom={zoom}&layers=M'
5
+result_count = 1
6
+wikidata_host = 'https://www.wikidata.org'
7
+wikidata_api = wikidata_host + '/w/api.php'
8
+url_search = wikidata_api \
9
+    + '?action=query&list=search&format=json'\
10
+    + '&srnamespace=0&srprop=sectiontitle&{query}'
11
+url_detail = wikidata_api\
12
+    + '?action=wbgetentities&format=json'\
13
+    + '&props=labels%7Cinfo%7Csitelinks'\
14
+    + '%7Csitelinks%2Furls%7Cdescriptions%7Cclaims'\
15
+    + '&{query}'
16
+url_map = 'https://www.openstreetmap.org/'\
17
+    + '?lat={latitude}&lon={longitude}&zoom={zoom}&layers=M'
18
+
9
 
19
 
10
 def request(query, params):
20
 def request(query, params):
11
-    params['url'] = urlSearch.format(query=urlencode({'srsearch': query, 'srlimit': resultCount}))
21
+    params['url'] = url_search.format(
22
+        query=urlencode({'srsearch': query,
23
+                        'srlimit': result_count}))
12
     return params
24
     return params
13
 
25
 
14
 
26
 
23
     language = resp.search_params['language'].split('_')[0]
35
     language = resp.search_params['language'].split('_')[0]
24
     if language == 'all':
36
     if language == 'all':
25
         language = 'en'
37
         language = 'en'
26
-    url = urlDetail.format(query=urlencode({'ids': '|'.join(wikidata_ids), 'languages': language + '|en'}))
38
+    url = url_detail.format(query=urlencode({'ids': '|'.join(wikidata_ids),
39
+                                            'languages': language + '|en'}))
27
 
40
 
28
     htmlresponse = get(url)
41
     htmlresponse = get(url)
29
     jsonresponse = json.loads(htmlresponse.content)
42
     jsonresponse = json.loads(htmlresponse.content)
32
 
45
 
33
     return results
46
     return results
34
 
47
 
48
+
35
 def getDetail(jsonresponse, wikidata_id, language):
49
 def getDetail(jsonresponse, wikidata_id, language):
36
     results = []
50
     results = []
37
     urls = []
51
     urls = []
40
     result = jsonresponse.get('entities', {}).get(wikidata_id, {})
54
     result = jsonresponse.get('entities', {}).get(wikidata_id, {})
41
 
55
 
42
     title = result.get('labels', {}).get(language, {}).get('value', None)
56
     title = result.get('labels', {}).get(language, {}).get('value', None)
43
-    if title == None:
57
+    if title is None:
44
         title = result.get('labels', {}).get('en', {}).get('value', None)
58
         title = result.get('labels', {}).get('en', {}).get('value', None)
45
-    if title == None:
59
+    if title is None:
46
         return results
60
         return results
47
 
61
 
48
-    description = result.get('descriptions', {}).get(language, {}).get('value', None)
49
-    if description == None:
50
-        description = result.get('descriptions', {}).get('en', {}).get('value', '')
62
+    description = result\
63
+        .get('descriptions', {})\
64
+        .get(language, {})\
65
+        .get('value', None)
66
+
67
+    if description is None:
68
+        description = result\
69
+            .get('descriptions', {})\
70
+            .get('en', {})\
71
+            .get('value', '')
51
 
72
 
52
     claims = result.get('claims', {})
73
     claims = result.get('claims', {})
53
     official_website = get_string(claims, 'P856', None)
74
     official_website = get_string(claims, 'P856', None)
54
-    if official_website != None:
55
-        urls.append({ 'title' : 'Official site', 'url': official_website })
56
-        results.append({ 'title': title, 'url' : official_website })
75
+    if official_website is not None:
76
+        urls.append({'title': 'Official site', 'url': official_website})
77
+        results.append({'title': title, 'url': official_website})
57
 
78
 
58
     wikipedia_link_count = 0
79
     wikipedia_link_count = 0
59
     if language != 'en':
80
     if language != 'en':
60
-        wikipedia_link_count += add_url(urls, 'Wikipedia (' + language + ')', get_wikilink(result, language + 'wiki'))
81
+        wikipedia_link_count += add_url(urls,
82
+                                        'Wikipedia (' + language + ')',
83
+                                        get_wikilink(result, language +
84
+                                                     'wiki'))
61
     wikipedia_en_link = get_wikilink(result, 'enwiki')
85
     wikipedia_en_link = get_wikilink(result, 'enwiki')
62
-    wikipedia_link_count += add_url(urls, 'Wikipedia (en)', wikipedia_en_link)
86
+    wikipedia_link_count += add_url(urls,
87
+                                    'Wikipedia (en)',
88
+                                    wikipedia_en_link)
63
     if wikipedia_link_count == 0:
89
     if wikipedia_link_count == 0:
64
         misc_language = get_wiki_firstlanguage(result, 'wiki')
90
         misc_language = get_wiki_firstlanguage(result, 'wiki')
65
-        if misc_language != None:
66
-            add_url(urls, 'Wikipedia (' + misc_language + ')', get_wikilink(result, misc_language + 'wiki'))
91
+        if misc_language is not None:
92
+            add_url(urls,
93
+                    'Wikipedia (' + misc_language + ')',
94
+                    get_wikilink(result, misc_language + 'wiki'))
67
 
95
 
68
     if language != 'en':
96
     if language != 'en':
69
-        add_url(urls, 'Wiki voyage (' + language + ')', get_wikilink(result, language + 'wikivoyage'))
70
-    add_url(urls, 'Wiki voyage (en)', get_wikilink(result, 'enwikivoyage'))
97
+        add_url(urls,
98
+                'Wiki voyage (' + language + ')',
99
+                get_wikilink(result, language + 'wikivoyage'))
100
+
101
+    add_url(urls,
102
+            'Wiki voyage (en)',
103
+            get_wikilink(result, 'enwikivoyage'))
71
 
104
 
72
     if language != 'en':
105
     if language != 'en':
73
-        add_url(urls, 'Wikiquote (' + language + ')', get_wikilink(result, language + 'wikiquote'))
74
-    add_url(urls, 'Wikiquote (en)', get_wikilink(result, 'enwikiquote'))
106
+        add_url(urls,
107
+                'Wikiquote (' + language + ')',
108
+                get_wikilink(result, language + 'wikiquote'))
75
 
109
 
76
-    add_url(urls, 'Commons wiki', get_wikilink(result, 'commonswiki'))
110
+    add_url(urls,
111
+            'Wikiquote (en)',
112
+            get_wikilink(result, 'enwikiquote'))
77
 
113
 
78
-    add_url(urls, 'Location', get_geolink(claims, 'P625', None))
114
+    add_url(urls,
115
+            'Commons wiki',
116
+            get_wikilink(result, 'commonswiki'))
79
 
117
 
80
-    add_url(urls, 'Wikidata', 'https://www.wikidata.org/wiki/' + wikidata_id + '?uselang='+ language)
118
+    add_url(urls,
119
+            'Location',
120
+            get_geolink(claims, 'P625', None))
121
+
122
+    add_url(urls,
123
+            'Wikidata',
124
+            'https://www.wikidata.org/wiki/'
125
+            + wikidata_id + '?uselang=' + language)
81
 
126
 
82
     musicbrainz_work_id = get_string(claims, 'P435')
127
     musicbrainz_work_id = get_string(claims, 'P435')
83
-    if musicbrainz_work_id != None:
84
-        add_url(urls, 'MusicBrainz', 'http://musicbrainz.org/work/' + musicbrainz_work_id)
128
+    if musicbrainz_work_id is not None:
129
+        add_url(urls,
130
+                'MusicBrainz',
131
+                'http://musicbrainz.org/work/'
132
+                + musicbrainz_work_id)
85
 
133
 
86
     musicbrainz_artist_id = get_string(claims, 'P434')
134
     musicbrainz_artist_id = get_string(claims, 'P434')
87
-    if musicbrainz_artist_id != None:
88
-        add_url(urls, 'MusicBrainz', 'http://musicbrainz.org/artist/' + musicbrainz_artist_id)
135
+    if musicbrainz_artist_id is not None:
136
+        add_url(urls,
137
+                'MusicBrainz',
138
+                'http://musicbrainz.org/artist/'
139
+                + musicbrainz_artist_id)
89
 
140
 
90
     musicbrainz_release_group_id = get_string(claims, 'P436')
141
     musicbrainz_release_group_id = get_string(claims, 'P436')
91
-    if musicbrainz_release_group_id != None:
92
-        add_url(urls, 'MusicBrainz', 'http://musicbrainz.org/release-group/' + musicbrainz_release_group_id)
142
+    if musicbrainz_release_group_id is not None:
143
+        add_url(urls,
144
+                'MusicBrainz',
145
+                'http://musicbrainz.org/release-group/'
146
+                + musicbrainz_release_group_id)
93
 
147
 
94
     musicbrainz_label_id = get_string(claims, 'P966')
148
     musicbrainz_label_id = get_string(claims, 'P966')
95
-    if musicbrainz_label_id != None:
96
-        add_url(urls, 'MusicBrainz', 'http://musicbrainz.org/label/' + musicbrainz_label_id)
149
+    if musicbrainz_label_id is not None:
150
+        add_url(urls,
151
+                'MusicBrainz',
152
+                'http://musicbrainz.org/label/'
153
+                + musicbrainz_label_id)
97
 
154
 
98
     # musicbrainz_area_id = get_string(claims, 'P982')
155
     # musicbrainz_area_id = get_string(claims, 'P982')
99
     # P1407 MusicBrainz series ID
156
     # P1407 MusicBrainz series ID
102
     # P1407 MusicBrainz series ID
159
     # P1407 MusicBrainz series ID
103
 
160
 
104
     postal_code = get_string(claims, 'P281', None)
161
     postal_code = get_string(claims, 'P281', None)
105
-    if postal_code != None:
106
-        attributes.append({'label' : 'Postal code(s)', 'value' : postal_code})
162
+    if postal_code is not None:
163
+        attributes.append({'label': 'Postal code(s)', 'value': postal_code})
107
 
164
 
108
     date_of_birth = get_time(claims, 'P569', None)
165
     date_of_birth = get_time(claims, 'P569', None)
109
-    if date_of_birth != None:
110
-        attributes.append({'label' : 'Date of birth', 'value' : date_of_birth})
166
+    if date_of_birth is not None:
167
+        attributes.append({'label': 'Date of birth', 'value': date_of_birth})
111
 
168
 
112
     date_of_death = get_time(claims, 'P570', None)
169
     date_of_death = get_time(claims, 'P570', None)
113
-    if date_of_death != None:
114
-        attributes.append({'label' : 'Date of death', 'value' : date_of_death})
170
+    if date_of_death is not None:
171
+        attributes.append({'label': 'Date of death', 'value': date_of_death})
115
 
172
 
116
-    if len(attributes)==0 and len(urls)==2 and len(description)==0:
173
+    if len(attributes) == 0 and len(urls) == 2 and len(description) == 0:
117
         results.append({
174
         results.append({
118
-                'url': urls[0]['url'],
119
-                'title': title,
120
-                'content': description
121
-                })
175
+                       'url': urls[0]['url'],
176
+                       'title': title,
177
+                       'content': description
178
+                       })
122
     else:
179
     else:
123
         results.append({
180
         results.append({
124
-                'infobox' : title,
125
-                'id' : wikipedia_en_link,
126
-                'content' : description,
127
-                'attributes' : attributes,
128
-                'urls' : urls
129
-                })
181
+                       'infobox': title,
182
+                       'id': wikipedia_en_link,
183
+                       'content': description,
184
+                       'attributes': attributes,
185
+                       'urls': urls
186
+                       })
130
 
187
 
131
     return results
188
     return results
132
 
189
 
133
 
190
 
134
 def add_url(urls, title, url):
191
 def add_url(urls, title, url):
135
-    if url != None:
136
-        urls.append({'title' : title, 'url' : url})
192
+    if url is not None:
193
+        urls.append({'title': title, 'url': url})
137
         return 1
194
         return 1
138
     else:
195
     else:
139
         return 0
196
         return 0
140
 
197
 
198
+
141
 def get_mainsnak(claims, propertyName):
199
 def get_mainsnak(claims, propertyName):
142
     propValue = claims.get(propertyName, {})
200
     propValue = claims.get(propertyName, {})
143
     if len(propValue) == 0:
201
     if len(propValue) == 0:
157
         mainsnak = e.get('mainsnak', {})
215
         mainsnak = e.get('mainsnak', {})
158
 
216
 
159
         datavalue = mainsnak.get('datavalue', {})
217
         datavalue = mainsnak.get('datavalue', {})
160
-        if datavalue != None:
218
+        if datavalue is not None:
161
             result.append(datavalue.get('value', ''))
219
             result.append(datavalue.get('value', ''))
162
 
220
 
163
     if len(result) == 0:
221
     if len(result) == 0:
177
         mainsnak = e.get('mainsnak', {})
235
         mainsnak = e.get('mainsnak', {})
178
 
236
 
179
         datavalue = mainsnak.get('datavalue', {})
237
         datavalue = mainsnak.get('datavalue', {})
180
-        if datavalue != None:
238
+        if datavalue is not None:
181
             value = datavalue.get('value', '')
239
             value = datavalue.get('value', '')
182
             result.append(value.get('time', ''))
240
             result.append(value.get('time', ''))
183
 
241
 
190
 def get_geolink(claims, propertyName, defaultValue=''):
248
 def get_geolink(claims, propertyName, defaultValue=''):
191
     mainsnak = get_mainsnak(claims, propertyName)
249
     mainsnak = get_mainsnak(claims, propertyName)
192
 
250
 
193
-    if mainsnak == None:
251
+    if mainsnak is None:
194
         return defaultValue
252
         return defaultValue
195
 
253
 
196
     datatype = mainsnak.get('datatype', '')
254
     datatype = mainsnak.get('datatype', '')
209
     # 1 --> 6
267
     # 1 --> 6
210
     # 0.016666666666667 --> 9
268
     # 0.016666666666667 --> 9
211
     # 0.00027777777777778 --> 19
269
     # 0.00027777777777778 --> 19
212
-    # wolframalpha : quadratic fit { {13, 5}, {1, 6}, {0.0166666, 9}, {0.0002777777,19}}
270
+    # wolframalpha :
271
+    # quadratic fit { {13, 5}, {1, 6}, {0.0166666, 9}, {0.0002777777,19}}
213
     # 14.1186-8.8322 x+0.625447 x^2
272
     # 14.1186-8.8322 x+0.625447 x^2
214
     if precision < 0.0003:
273
     if precision < 0.0003:
215
         zoom = 19
274
         zoom = 19
216
     else:
275
     else:
217
         zoom = int(15 - precision*8.8322 + precision*precision*0.625447)
276
         zoom = int(15 - precision*8.8322 + precision*precision*0.625447)
218
 
277
 
219
-    url = urlMap.replace('{latitude}', str(value.get('latitude',0))).replace('{longitude}', str(value.get('longitude',0))).replace('{zoom}', str(zoom))
278
+    url = url_map\
279
+        .replace('{latitude}', str(value.get('latitude', 0)))\
280
+        .replace('{longitude}', str(value.get('longitude', 0)))\
281
+        .replace('{zoom}', str(zoom))
220
 
282
 
221
     return url
283
     return url
222
 
284
 
223
 
285
 
224
 def get_wikilink(result, wikiid):
286
 def get_wikilink(result, wikiid):
225
     url = result.get('sitelinks', {}).get(wikiid, {}).get('url', None)
287
     url = result.get('sitelinks', {}).get(wikiid, {}).get('url', None)
226
-    if url == None:
288
+    if url is None:
227
         return url
289
         return url
228
     elif url.startswith('http://'):
290
     elif url.startswith('http://'):
229
         url = url.replace('http://', 'https://')
291
         url = url.replace('http://', 'https://')
231
         url = 'https:' + url
293
         url = 'https:' + url
232
     return url
294
     return url
233
 
295
 
296
+
234
 def get_wiki_firstlanguage(result, wikipatternid):
297
 def get_wiki_firstlanguage(result, wikipatternid):
235
     for k in result.get('sitelinks', {}).keys():
298
     for k in result.get('sitelinks', {}).keys():
236
-        if k.endswith(wikipatternid) and len(k)==(2+len(wikipatternid)):
299
+        if k.endswith(wikipatternid) and len(k) == (2+len(wikipatternid)):
237
             return k[0:2]
300
             return k[0:2]
238
     return None
301
     return None