mirror of
https://github.com/searxng/searxng.git
synced 2025-07-14 00:39:18 +02:00
[mod] move extract_text, extract_url to searx.utils
This commit is contained in:
parent
ecb9f28869
commit
2006eb4680
39 changed files with 156 additions and 137 deletions
|
@ -1,7 +1,6 @@
|
|||
from urllib.parse import unquote, urlencode, urljoin, urlparse
|
||||
from lxml import html
|
||||
from lxml.etree import _ElementStringResult, _ElementUnicodeResult
|
||||
from searx.utils import html_to_text, eval_xpath
|
||||
from urllib.parse import urlencode
|
||||
from searx.utils import extract_text, extract_url, eval_xpath
|
||||
|
||||
search_url = None
|
||||
url_xpath = None
|
||||
|
@ -21,76 +20,6 @@ page_size = 1
|
|||
first_page_num = 1
|
||||
|
||||
|
||||
'''
|
||||
if xpath_results is list, extract the text from each result and concat the list
|
||||
if xpath_results is a xml element, extract all the text node from it
|
||||
( text_content() method from lxml )
|
||||
if xpath_results is a string element, then it's already done
|
||||
'''
|
||||
|
||||
|
||||
def extract_text(xpath_results):
|
||||
if type(xpath_results) == list:
|
||||
# it's list of result : concat everything using recursive call
|
||||
result = ''
|
||||
for e in xpath_results:
|
||||
result = result + extract_text(e)
|
||||
return result.strip()
|
||||
elif type(xpath_results) in [_ElementStringResult, _ElementUnicodeResult]:
|
||||
# it's a string
|
||||
return ''.join(xpath_results)
|
||||
else:
|
||||
# it's a element
|
||||
text = html.tostring(
|
||||
xpath_results, encoding='unicode', method='text', with_tail=False
|
||||
)
|
||||
text = text.strip().replace('\n', ' ')
|
||||
return ' '.join(text.split())
|
||||
|
||||
|
||||
def extract_url(xpath_results, search_url):
|
||||
if xpath_results == []:
|
||||
raise Exception('Empty url resultset')
|
||||
url = extract_text(xpath_results)
|
||||
|
||||
if url.startswith('//'):
|
||||
# add http or https to this kind of url //example.com/
|
||||
parsed_search_url = urlparse(search_url)
|
||||
url = '{0}:{1}'.format(parsed_search_url.scheme or 'http', url)
|
||||
elif url.startswith('/'):
|
||||
# fix relative url to the search engine
|
||||
url = urljoin(search_url, url)
|
||||
|
||||
# fix relative urls that fall through the crack
|
||||
if '://' not in url:
|
||||
url = urljoin(search_url, url)
|
||||
|
||||
# normalize url
|
||||
url = normalize_url(url)
|
||||
|
||||
return url
|
||||
|
||||
|
||||
def normalize_url(url):
|
||||
parsed_url = urlparse(url)
|
||||
|
||||
# add a / at this end of the url if there is no path
|
||||
if not parsed_url.netloc:
|
||||
raise Exception('Cannot parse url')
|
||||
if not parsed_url.path:
|
||||
url += '/'
|
||||
|
||||
# FIXME : hack for yahoo
|
||||
if parsed_url.hostname == 'search.yahoo.com'\
|
||||
and parsed_url.path.startswith('/r'):
|
||||
p = parsed_url.path
|
||||
mark = p.find('/**')
|
||||
if mark != -1:
|
||||
return unquote(p[mark + 3:]).decode()
|
||||
|
||||
return url
|
||||
|
||||
|
||||
def request(query, params):
|
||||
query = urlencode({'q': query})[2:]
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue