forked from Icycoide/searxng
[mod] various engines: use eval_xpath* functions and searx.exceptions.*
Engine list: ahmia, duckduckgo_images, elasticsearch, google, google_images, google_videos, youtube_api
This commit is contained in:
parent
ad72803ed9
commit
64cccae99e
7 changed files with 44 additions and 52 deletions
|
@ -15,6 +15,7 @@
|
|||
|
||||
from json import loads
|
||||
from urllib.parse import urlencode
|
||||
from searx.exceptions import SearxEngineAPIException
|
||||
from searx.engines.duckduckgo import get_region_code
|
||||
from searx.engines.duckduckgo import _fetch_supported_languages, supported_languages_url # NOQA # pylint: disable=unused-import
|
||||
from searx.poolrequests import get
|
||||
|
@ -37,7 +38,7 @@ def get_vqd(query, headers):
|
|||
res = get(query_url, headers=headers)
|
||||
content = res.text
|
||||
if content.find('vqd=\'') == -1:
|
||||
raise Exception('Request failed')
|
||||
raise SearxEngineAPIException('Request failed')
|
||||
vqd = content[content.find('vqd=\'') + 5:]
|
||||
vqd = vqd[:vqd.find('\'')]
|
||||
return vqd
|
||||
|
@ -71,10 +72,7 @@ def response(resp):
|
|||
results = []
|
||||
|
||||
content = resp.text
|
||||
try:
|
||||
res_json = loads(content)
|
||||
except:
|
||||
raise Exception('Cannot parse results')
|
||||
res_json = loads(content)
|
||||
|
||||
# parse results
|
||||
for result in res_json['results']:
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue