소스 검색

[mod] len() removed from conditions

asciimoo 11 년 전
부모
커밋
c1d7d30b8e
8개의 변경된 파일19개의 추가작업 그리고 17개의 파일을 삭제
  1. 1
    1
      searx/engines/__init__.py
  2. 1
    1
      searx/engines/json_engine.py
  3. 1
    1
      searx/engines/startpage.py
  4. 1
    1
      searx/engines/xpath.py
  5. 3
    3
      searx/engines/yacy.py
  6. 6
    4
      searx/engines/youtube.py
  7. 3
    3
      searx/search.py
  8. 3
    3
      searx/webapp.py

+ 1
- 1
searx/engines/__init__.py 파일 보기

163
                 duplicated = new_res
163
                 duplicated = new_res
164
                 break
164
                 break
165
         if duplicated:
165
         if duplicated:
166
-            if len(res.get('content', '')) > len(duplicated.get('content', '')):  # noqa
166
+            if res.get('content') > duplicated.get('content'):
167
                 duplicated['content'] = res['content']
167
                 duplicated['content'] = res['content']
168
             duplicated['score'] += score
168
             duplicated['score'] += score
169
             duplicated['engines'].append(res['engine'])
169
             duplicated['engines'].append(res['engine'])

+ 1
- 1
searx/engines/json_engine.py 파일 보기

39
 
39
 
40
 def do_query(data, q):
40
 def do_query(data, q):
41
     ret = []
41
     ret = []
42
-    if not len(q):
42
+    if not q:
43
         return ret
43
         return ret
44
 
44
 
45
     qkey = q[0]
45
     qkey = q[0]

+ 1
- 1
searx/engines/startpage.py 파일 보기

35
         title = link.text_content()
35
         title = link.text_content()
36
 
36
 
37
         content = ''
37
         content = ''
38
-        if len(result.xpath('./p[@class="desc"]')):
38
+        if result.xpath('./p[@class="desc"]'):
39
             content = result.xpath('./p[@class="desc"]')[0].text_content()
39
             content = result.xpath('./p[@class="desc"]')[0].text_content()
40
 
40
 
41
         results.append({'url': url, 'title': title, 'content': content})
41
         results.append({'url': url, 'title': title, 'content': content})

+ 1
- 1
searx/engines/xpath.py 파일 보기

23
 def extract_text(xpath_results):
23
 def extract_text(xpath_results):
24
     if type(xpath_results) == list:
24
     if type(xpath_results) == list:
25
         # it's list of result : concat everything using recursive call
25
         # it's list of result : concat everything using recursive call
26
-        if not len(xpath_results):
26
+        if not xpath_results:
27
             raise Exception('Empty url resultset')
27
             raise Exception('Empty url resultset')
28
         result = ''
28
         result = ''
29
         for e in xpath_results:
29
         for e in xpath_results:

+ 3
- 3
searx/engines/yacy.py 파일 보기

13
 def response(resp):
13
 def response(resp):
14
     raw_search_results = loads(resp.text)
14
     raw_search_results = loads(resp.text)
15
 
15
 
16
-    if not len(raw_search_results):
16
+    if not raw_search_results:
17
         return []
17
         return []
18
 
18
 
19
     search_results = raw_search_results.get('channels', {})[0].get('items', [])
19
     search_results = raw_search_results.get('channels', {})[0].get('items', [])
26
         tmp_result['url'] = result['link']
26
         tmp_result['url'] = result['link']
27
         tmp_result['content'] = ''
27
         tmp_result['content'] = ''
28
 
28
 
29
-        if len(result['description']):
29
+        if result['description']:
30
             tmp_result['content'] += result['description'] + "<br/>"
30
             tmp_result['content'] += result['description'] + "<br/>"
31
 
31
 
32
-        if len(result['pubDate']):
32
+        if result['pubDate']:
33
             tmp_result['content'] += result['pubDate'] + "<br/>"
33
             tmp_result['content'] += result['pubDate'] + "<br/>"
34
 
34
 
35
         if result['size'] != '-1':
35
         if result['size'] != '-1':

+ 6
- 4
searx/engines/youtube.py 파일 보기

22
     if not 'feed' in search_results:
22
     if not 'feed' in search_results:
23
         return results
23
         return results
24
     feed = search_results['feed']
24
     feed = search_results['feed']
25
+
25
     for result in feed['entry']:
26
     for result in feed['entry']:
26
         url = [x['href'] for x in result['link'] if x['type'] == 'text/html']
27
         url = [x['href'] for x in result['link'] if x['type'] == 'text/html']
27
-        if not len(url):
28
+        if not url:
28
             return
29
             return
29
         # remove tracking
30
         # remove tracking
30
         url = url[0].replace('feature=youtube_gdata', '')
31
         url = url[0].replace('feature=youtube_gdata', '')
32
             url = url[:-1]
33
             url = url[:-1]
33
         title = result['title']['$t']
34
         title = result['title']['$t']
34
         content = ''
35
         content = ''
35
-
36
         thumbnail = ''
36
         thumbnail = ''
37
-        if len(result['media$group']['media$thumbnail']):
37
+
38
+        if result['media$group']['media$thumbnail']:
38
             thumbnail = result['media$group']['media$thumbnail'][0]['url']
39
             thumbnail = result['media$group']['media$thumbnail'][0]['url']
39
             content += '<a href="{0}" title="{0}" ><img src="{1}" /></a>'.format(url, thumbnail)  # noqa
40
             content += '<a href="{0}" title="{0}" ><img src="{1}" /></a>'.format(url, thumbnail)  # noqa
40
-        if len(content):
41
+
42
+        if content:
41
             content += '<br />' + result['content']['$t']
43
             content += '<br />' + result['content']['$t']
42
         else:
44
         else:
43
             content = result['content']['$t']
45
             content = result['content']['$t']

+ 3
- 3
searx/search.py 파일 보기

49
 
49
 
50
         self.categories = []
50
         self.categories = []
51
 
51
 
52
-        if len(self.engines):
52
+        if self.engines:
53
             self.categories = list(set(engine['category']
53
             self.categories = list(set(engine['category']
54
                                        for engine in self.engines))
54
                                        for engine in self.engines))
55
         else:
55
         else:
59
                     if not category in categories:
59
                     if not category in categories:
60
                         continue
60
                         continue
61
                     self.categories.append(category)
61
                     self.categories.append(category)
62
-            if not len(self.categories):
62
+            if not self.categories:
63
                 cookie_categories = request.cookies.get('categories', '')
63
                 cookie_categories = request.cookies.get('categories', '')
64
                 cookie_categories = cookie_categories.split(',')
64
                 cookie_categories = cookie_categories.split(',')
65
                 for ccateg in cookie_categories:
65
                 for ccateg in cookie_categories:
66
                     if ccateg in categories:
66
                     if ccateg in categories:
67
                         self.categories.append(ccateg)
67
                         self.categories.append(ccateg)
68
-            if not len(self.categories):
68
+            if not self.categories:
69
                 self.categories = ['general']
69
                 self.categories = ['general']
70
 
70
 
71
             for categ in self.categories:
71
             for categ in self.categories:

+ 3
- 3
searx/webapp.py 파일 보기

91
         for ccateg in cookie_categories:
91
         for ccateg in cookie_categories:
92
             if ccateg in categories:
92
             if ccateg in categories:
93
                 kwargs['selected_categories'].append(ccateg)
93
                 kwargs['selected_categories'].append(ccateg)
94
-        if not len(kwargs['selected_categories']):
94
+        if not kwargs['selected_categories']:
95
             kwargs['selected_categories'] = ['general']
95
             kwargs['selected_categories'] = ['general']
96
     return render_template(template_name, **kwargs)
96
     return render_template(template_name, **kwargs)
97
 
97
 
150
     elif search.request_data.get('format') == 'csv':
150
     elif search.request_data.get('format') == 'csv':
151
         csv = UnicodeWriter(cStringIO.StringIO())
151
         csv = UnicodeWriter(cStringIO.StringIO())
152
         keys = ('title', 'url', 'content', 'host', 'engine', 'score')
152
         keys = ('title', 'url', 'content', 'host', 'engine', 'score')
153
-        if len(search.results):
153
+        if search.results:
154
             csv.writerow(keys)
154
             csv.writerow(keys)
155
             for row in search.results:
155
             for row in search.results:
156
                 row['host'] = row['parsed_url'].netloc
156
                 row['host'] = row['parsed_url'].netloc
157
                 csv.writerow([row.get(key, '') for key in keys])
157
                 csv.writerow([row.get(key, '') for key in keys])
158
-        csv.stream.seek(0)
158
+            csv.stream.seek(0)
159
         response = Response(csv.stream.read(), mimetype='application/csv')
159
         response = Response(csv.stream.read(), mimetype='application/csv')
160
         cont_disp = 'attachment;Filename=searx_-_{0}.csv'.format(search.query)
160
         cont_disp = 'attachment;Filename=searx_-_{0}.csv'.format(search.query)
161
         response.headers.add('Content-Disposition', cont_disp)
161
         response.headers.add('Content-Disposition', cont_disp)