|  | @@ -25,7 +25,6 @@ from urlparse import urlparse
 | 
	
		
			
			| 25 | 25 |  from searx import settings
 | 
	
		
			
			| 26 | 26 |  import ConfigParser
 | 
	
		
			
			| 27 | 27 |  import sys
 | 
	
		
			
			| 28 |  | -import re
 | 
	
		
			
			| 29 | 28 |  from datetime import datetime
 | 
	
		
			
			| 30 | 29 |  
 | 
	
		
			
			| 31 | 30 |  engine_dir = dirname(realpath(__file__))
 | 
	
	
		
			
			|  | @@ -106,31 +105,6 @@ def make_callback(engine_name, results, suggestions, callback, params):
 | 
	
		
			
			| 106 | 105 |          results[engine_name] = cb_res
 | 
	
		
			
			| 107 | 106 |      return process_callback
 | 
	
		
			
			| 108 | 107 |  
 | 
	
		
			
			| 109 |  | -def highlight_content(content, query):
 | 
	
		
			
			| 110 |  | -
 | 
	
		
			
			| 111 |  | -    if not content:
 | 
	
		
			
			| 112 |  | -        return None
 | 
	
		
			
			| 113 |  | -    # ignoring html contents
 | 
	
		
			
			| 114 |  | -    # TODO better html content detection
 | 
	
		
			
			| 115 |  | -    if content.find('<') != -1:
 | 
	
		
			
			| 116 |  | -        return content
 | 
	
		
			
			| 117 |  | -
 | 
	
		
			
			| 118 |  | -    query = query.decode('utf-8')
 | 
	
		
			
			| 119 |  | -    if content.lower().find(query.lower()) > -1:
 | 
	
		
			
			| 120 |  | -        query_regex = u'({0})'.format(re.escape(query))
 | 
	
		
			
			| 121 |  | -        content = re.sub(query_regex, '<b>\\1</b>', content, flags=re.I | re.U)
 | 
	
		
			
			| 122 |  | -    else:
 | 
	
		
			
			| 123 |  | -        regex_parts = []
 | 
	
		
			
			| 124 |  | -        for chunk in query.split():
 | 
	
		
			
			| 125 |  | -            if len(chunk) == 1:
 | 
	
		
			
			| 126 |  | -                regex_parts.append(u'\W+{0}\W+'.format(re.escape(chunk)))
 | 
	
		
			
			| 127 |  | -            else:
 | 
	
		
			
			| 128 |  | -                regex_parts.append(u'{0}'.format(re.escape(chunk)))
 | 
	
		
			
			| 129 |  | -        query_regex = u'({0})'.format('|'.join(regex_parts))
 | 
	
		
			
			| 130 |  | -        content = re.sub(query_regex, '<b>\\1</b>', content, flags=re.I | re.U)
 | 
	
		
			
			| 131 |  | -
 | 
	
		
			
			| 132 |  | -    return content
 | 
	
		
			
			| 133 |  | -
 | 
	
		
			
			| 134 | 108 |  def score_results(results):
 | 
	
		
			
			| 135 | 109 |      flat_res = filter(None, chain.from_iterable(izip_longest(*results.values())))
 | 
	
		
			
			| 136 | 110 |      flat_len = len(flat_res)
 | 
	
	
		
			
			|  | @@ -218,8 +192,6 @@ def search(query, request, selected_engines):
 | 
	
		
			
			| 218 | 192 |      results = score_results(results)
 | 
	
		
			
			| 219 | 193 |  
 | 
	
		
			
			| 220 | 194 |      for result in results:
 | 
	
		
			
			| 221 |  | -        if 'content' in result:
 | 
	
		
			
			| 222 |  | -            result['content'] = highlight_content(result['content'], query)
 | 
	
		
			
			| 223 | 195 |          for res_engine in result['engines']:
 | 
	
		
			
			| 224 | 196 |              engines[result['engine']].stats['score_count'] += result['score']
 | 
	
		
			
			| 225 | 197 |  
 |