[mod] pylint: numerous minor code fixes

This commit is contained in:
Alexandre Flament 2020-11-16 09:43:23 +01:00
parent 9ed3ee2beb
commit b00d108673
24 changed files with 48 additions and 64 deletions

View file

@ -41,7 +41,6 @@ def response(resp):
# defaults
filesize = 0
magnet_link = "magnet:?xt=urn:btih:{}&tr=http://tracker.acgsou.com:2710/announce"
torrent_link = ""
try:
category = extract_text(result.xpath(xpath_category)[0])

View file

@ -61,7 +61,7 @@ def response(resp):
content = content_string.format(doi_content="", abstract_content=abstract)
if len(content) > 300:
content = content[0:300] + "..."
content = content[0:300] + "..."
# TODO: center snippet on query term
publishedDate = datetime.strptime(entry.xpath('.//published')[0].text, '%Y-%m-%dT%H:%M:%SZ')

View file

@ -80,10 +80,7 @@ def response(resp):
date = datetime.now() # needed in case no dcdate is available for an item
for item in entry:
if item.attrib["name"] == "dchdate":
harvestDate = item.text
elif item.attrib["name"] == "dcdate":
if item.attrib["name"] == "dcdate":
date = item.text
elif item.attrib["name"] == "dctitle":

View file

@ -18,7 +18,7 @@ from json import loads
from searx.utils import match_language
from searx.engines.bing import language_aliases
from searx.engines.bing import _fetch_supported_languages, supported_languages_url # NOQA
from searx.engines.bing import _fetch_supported_languages, supported_languages_url # NOQA # pylint: disable=unused-import
# engine dependent config
categories = ['images']

View file

@ -17,7 +17,7 @@ from urllib.parse import urlencode, urlparse, parse_qsl
from lxml import etree
from searx.utils import list_get, match_language
from searx.engines.bing import language_aliases
from searx.engines.bing import _fetch_supported_languages, supported_languages_url # NOQA
from searx.engines.bing import _fetch_supported_languages, supported_languages_url # NOQA # pylint: disable=unused-import
# engine dependent config
categories = ['news']

View file

@ -16,7 +16,7 @@ from urllib.parse import urlencode
from searx.utils import match_language
from searx.engines.bing import language_aliases
from searx.engines.bing import _fetch_supported_languages, supported_languages_url # NOQA
from searx.engines.bing import _fetch_supported_languages, supported_languages_url # NOQA # pylint: disable=unused-import
categories = ['videos']
paging = True

View file

@ -41,8 +41,6 @@ def request(query, params):
from_currency = name_to_iso4217(from_currency.strip())
to_currency = name_to_iso4217(to_currency.strip())
q = (from_currency + to_currency).upper()
params['url'] = url.format(from_currency, to_currency)
params['amount'] = amount
params['from'] = from_currency

View file

@ -49,11 +49,11 @@ correction_xpath = '//div[@id="did_you_mean"]//a'
# match query's language to a region code that duckduckgo will accept
def get_region_code(lang, lang_list=[]):
def get_region_code(lang, lang_list=None):
if lang == 'all':
return None
lang_code = match_language(lang, lang_list, language_aliases, 'wt-WT')
lang_code = match_language(lang, lang_list or [], language_aliases, 'wt-WT')
lang_parts = lang_code.split('-')
# country code goes first

View file

@ -16,7 +16,7 @@ from lxml import html
from searx import logger
from searx.data import WIKIDATA_UNITS
from searx.engines.duckduckgo import language_aliases
from searx.engines.duckduckgo import _fetch_supported_languages, supported_languages_url # NOQA
from searx.engines.duckduckgo import _fetch_supported_languages, supported_languages_url # NOQA # pylint: disable=unused-import
from searx.utils import extract_text, html_to_text, match_language, get_string_replaces_function
from searx.external_urls import get_external_url, get_earth_coordinates_url, area_to_osm_zoom

View file

@ -16,7 +16,7 @@
from json import loads
from urllib.parse import urlencode
from searx.engines.duckduckgo import get_region_code
from searx.engines.duckduckgo import _fetch_supported_languages, supported_languages_url # NOQA
from searx.engines.duckduckgo import _fetch_supported_languages, supported_languages_url # NOQA # pylint: disable=unused-import
from searx.poolrequests import get
# engine dependent config

View file

@ -8,7 +8,7 @@ search_url = url + '/search.php?{query}&size_i=0&size_f=100000000&engine_r=1&eng
paging = True
class FilecropResultParser(HTMLParser):
class FilecropResultParser(HTMLParser): # pylint: disable=W0223 # (see https://bugs.python.org/issue31844)
def __init__(self):
HTMLParser.__init__(self)

View file

@ -29,10 +29,7 @@ from lxml import html
from flask_babel import gettext
from searx import logger
from searx.utils import extract_text, eval_xpath
from searx.engines.google import _fetch_supported_languages, supported_languages_url # NOQA
# pylint: disable=unused-import
# pylint: enable=unused-import
from searx.engines.google import _fetch_supported_languages, supported_languages_url # NOQA # pylint: disable=unused-import
from searx.engines.google import (
get_lang_country,

View file

@ -13,7 +13,7 @@
from urllib.parse import urlencode
from lxml import html
from searx.utils import match_language
from searx.engines.google import _fetch_supported_languages, supported_languages_url # NOQA
from searx.engines.google import _fetch_supported_languages, supported_languages_url # NOQA # pylint: disable=unused-import
# search-url
categories = ['news']

View file

@ -90,6 +90,5 @@ def response(resp):
def _fetch_supported_languages(resp):
ret_val = {}
peertube_languages = list(loads(resp.text).keys())
return peertube_languages

View file

@ -81,7 +81,7 @@ def response(resp):
pass
if len(content) > 300:
content = content[0:300] + "..."
content = content[0:300] + "..."
# TODO: center snippet on query term
res_dict = {'url': url,

View file

@ -14,7 +14,7 @@ from datetime import datetime, timedelta
from urllib.parse import urlencode
from lxml import html
from searx.engines.yahoo import parse_url, language_aliases
from searx.engines.yahoo import _fetch_supported_languages, supported_languages_url # NOQA
from searx.engines.yahoo import _fetch_supported_languages, supported_languages_url # NOQA # pylint: disable=unused-import
from dateutil import parser
from searx.utils import extract_text, extract_url, match_language