Quellcode durchsuchen

[mod] unused imports and whitespaces purged

asciimoo vor 11 Jahren
Ursprung
Commit
d13c5aa37b
2 geänderte Dateien mit 9 neuen und 11 gelöschten Zeilen
  1. 4
    6
      searx/engines/filecrop.py
  2. 5
    5
      searx/engines/yacy.py

+ 4
- 6
searx/engines/filecrop.py Datei anzeigen

@@ -1,6 +1,4 @@
1
-from json import loads
2 1
 from urllib import urlencode
3
-from searx.utils import html_to_text
4 2
 from HTMLParser import HTMLParser
5 3
 
6 4
 url = 'http://www.filecrop.com/'
@@ -10,7 +8,7 @@ class FilecropResultParser(HTMLParser):
10 8
     def __init__(self):
11 9
         HTMLParser.__init__(self)
12 10
         self.__start_processing = False
13
-        
11
+
14 12
         self.results = []
15 13
         self.result = {}
16 14
 
@@ -22,7 +20,7 @@ class FilecropResultParser(HTMLParser):
22 20
         if tag == 'tr':
23 21
             if ('bgcolor', '#edeff5') in attrs or ('bgcolor', '#ffffff') in attrs:
24 22
                 self.__start_processing = True
25
-                
23
+
26 24
         if not self.__start_processing:
27 25
             return
28 26
 
@@ -50,7 +48,7 @@ class FilecropResultParser(HTMLParser):
50 48
                 self.data_counter = 0
51 49
                 self.results.append(self.result)
52 50
                 self.result = {}
53
-                                
51
+
54 52
     def handle_data(self, data):
55 53
         if not self.__start_processing:
56 54
             return
@@ -59,7 +57,7 @@ class FilecropResultParser(HTMLParser):
59 57
             self.result['content'] += data + ' '
60 58
         else:
61 59
             self.result['content'] = data + ' '
62
-        
60
+
63 61
         self.data_counter += 1
64 62
 
65 63
 def request(query, params):

+ 5
- 5
searx/engines/yacy.py Datei anzeigen

@@ -1,5 +1,5 @@
1 1
 from json import loads
2
-from urllib import urlencode, quote
2
+from urllib import urlencode
3 3
 
4 4
 url = 'http://localhost:8090'
5 5
 search_url = '/yacysearch.json?{query}&maximumRecords=10'
@@ -10,7 +10,7 @@ def request(query, params):
10 10
 
11 11
 def response(resp):
12 12
     raw_search_results = loads(resp.text)
13
-    
13
+
14 14
     if not len(raw_search_results):
15 15
         return []
16 16
 
@@ -22,10 +22,10 @@ def response(resp):
22 22
         tmp_result = {}
23 23
         tmp_result['title'] = result['title']
24 24
         tmp_result['url'] = result['link']
25
-        tmp_result['content'] = '' 
26
-        
25
+        tmp_result['content'] = ''
26
+
27 27
         if len(result['description']):
28
-            tmp_result['content'] += result['description'] +"<br/>" 
28
+            tmp_result['content'] += result['description'] +"<br/>"
29 29
 
30 30
         if len(result['pubDate']):
31 31
             tmp_result['content'] += result['pubDate'] + "<br/>"