[mod] remove unused import

use
from searx.engines.duckduckgo import _fetch_supported_languages, supported_languages_url  # NOQA
so it is possible to easily remove all unused import using autoflake:
autoflake --in-place --recursive --remove-all-unused-imports searx tests
This commit is contained in:
Alexandre Flament 2020-11-02 11:19:53 +01:00
parent 6489a560ea
commit 3038052c79
31 changed files with 24 additions and 52 deletions

View file

@ -11,7 +11,7 @@
from urllib.parse import urlencode
from lxml import html
from searx.utils import extract_text, get_torrent_size, int_or_zero
from searx.utils import extract_text, get_torrent_size
# engine dependent config
categories = ['files', 'images', 'videos', 'music']

View file

@ -11,7 +11,6 @@
More info on api: https://arxiv.org/help/api/user-manual
"""
from urllib.parse import urlencode
from lxml import html
from datetime import datetime

View file

@ -16,8 +16,8 @@
import re
from urllib.parse import urlencode
from lxml import html
from searx import logger, utils
from searx.utils import extract_text, match_language, gen_useragent, eval_xpath
from searx import logger
from searx.utils import eval_xpath, extract_text, match_language
logger = logger.getChild('bing engine')
@ -98,7 +98,6 @@ def response(resp):
result_len = int(result_len_container)
except Exception as e:
logger.debug('result error :\n%s', e)
pass
if result_len and _get_offset_from_pageno(resp.search_params.get("pageno", 0)) > result_len:
return []

View file

@ -15,10 +15,10 @@
from urllib.parse import urlencode
from lxml import html
from json import loads
import re
from searx.utils import match_language
from searx.engines.bing import _fetch_supported_languages, supported_languages_url, language_aliases
from searx.engines.bing import language_aliases
from searx.engines.bing import _fetch_supported_languages, supported_languages_url # NOQA
# engine dependent config
categories = ['images']

View file

@ -16,7 +16,8 @@ from dateutil import parser
from urllib.parse import urlencode, urlparse, parse_qsl
from lxml import etree
from searx.utils import list_get, match_language
from searx.engines.bing import _fetch_supported_languages, supported_languages_url, language_aliases
from searx.engines.bing import language_aliases
from searx.engines.bing import _fetch_supported_languages, supported_languages_url # NOQA
# engine dependent config
categories = ['news']

View file

@ -15,7 +15,8 @@ from lxml import html
from urllib.parse import urlencode
from searx.utils import match_language
from searx.engines.bing import _fetch_supported_languages, supported_languages_url, language_aliases
from searx.engines.bing import language_aliases
from searx.engines.bing import _fetch_supported_languages, supported_languages_url # NOQA
categories = ['videos']
paging = True

View file

@ -11,7 +11,6 @@
"""
from lxml import html
from operator import itemgetter
from urllib.parse import quote, urljoin
from searx.utils import extract_text, get_torrent_size

View file

@ -18,7 +18,6 @@ import re
from os.path import expanduser, isabs, realpath, commonprefix
from shlex import split as shlex_split
from subprocess import Popen, PIPE
from time import time
from threading import Thread
from searx import logger

View file

@ -1,10 +1,7 @@
import json
import re
import unicodedata
from datetime import datetime
from searx.data import CURRENCIES
from searx.data import CURRENCIES # NOQA
categories = []

View file

@ -15,7 +15,6 @@
from lxml import html
import re
from urllib.parse import urlencode
from searx.utils import extract_text
# engine dependent config

View file

@ -12,10 +12,8 @@
import random
import string
from dateutil import parser
from json import loads
from urllib.parse import urlencode
from lxml import html
from datetime import datetime
# engine dependent config

View file

@ -15,8 +15,6 @@
from lxml.html import fromstring
from json import loads
from urllib.parse import urlencode
from searx.poolrequests import get
from searx.utils import extract_text, match_language, eval_xpath
# engine dependent config

View file

@ -15,11 +15,11 @@ from lxml import html
from searx import logger
from searx.data import WIKIDATA_UNITS
from searx.engines.duckduckgo import _fetch_supported_languages, supported_languages_url, language_aliases
from searx.engines.duckduckgo import language_aliases
from searx.engines.duckduckgo import _fetch_supported_languages, supported_languages_url # NOQA
from searx.utils import extract_text, html_to_text, match_language, get_string_replaces_function
from searx.external_urls import get_external_url, get_earth_coordinates_url, area_to_osm_zoom
logger = logger.getChild('duckduckgo_definitions')
URL = 'https://api.duckduckgo.com/'\

View file

@ -15,12 +15,9 @@
from json import loads
from urllib.parse import urlencode
from searx.engines.duckduckgo import (
_fetch_supported_languages, supported_languages_url,
get_region_code, language_aliases
)
from searx.engines.duckduckgo import get_region_code
from searx.engines.duckduckgo import _fetch_supported_languages, supported_languages_url # NOQA
from searx.poolrequests import get
from searx.utils import extract_text
# engine dependent config
categories = ['images']

View file

@ -60,7 +60,6 @@ def response(resp):
except:
logger.debug("Couldn't read number of results.")
pass
for result in eval_xpath(dom, '//section[not(contains(@class, "essay"))]'):
try:

View file

@ -1,8 +1,5 @@
from json import loads, dumps
from lxml import html
from urllib.parse import quote, urljoin
from requests.auth import HTTPBasicAuth
from searx.utils import extract_text, get_torrent_size
base_url = 'http://localhost:9200'

View file

@ -29,12 +29,9 @@ from lxml import html
from flask_babel import gettext
from searx import logger
from searx.utils import extract_text, eval_xpath
from searx.engines.google import _fetch_supported_languages, supported_languages_url # NOQA
# pylint: disable=unused-import
from searx.engines.google import (
supported_languages_url,
_fetch_supported_languages,
)
# pylint: enable=unused-import
from searx.engines.google import (

View file

@ -12,8 +12,8 @@
from urllib.parse import urlencode
from lxml import html
from searx.engines.google import _fetch_supported_languages, supported_languages_url
from searx.utils import match_language
from searx.engines.google import _fetch_supported_languages, supported_languages_url # NOQA
# search-url
categories = ['news']

View file

@ -11,7 +11,6 @@
"""
from datetime import date, timedelta
from json import loads
from urllib.parse import urlencode
from lxml import html
from searx.utils import extract_text

View file

@ -12,8 +12,8 @@ from json import loads
from datetime import datetime
from operator import itemgetter
from urllib.parse import quote, urljoin
from searx.utils import extract_text, get_torrent_size
from urllib.parse import quote
from searx.utils import get_torrent_size
# engine dependent config
categories = ["videos", "music", "files"]

View file

@ -14,7 +14,6 @@ import re
from json import loads
from lxml import html
from dateutil import parser
from io import StringIO
from urllib.parse import quote_plus, urlencode
from searx import logger
from searx.poolrequests import get as http_get

View file

@ -17,7 +17,6 @@ import re
from unicodedata import normalize, combining
from babel import Locale
from babel.localedata import locale_identifiers
from searx.languages import language_codes
from searx.utils import extract_text, eval_xpath, match_language
# engine dependent config

View file

@ -21,9 +21,9 @@ from babel.dates import format_datetime, format_date, format_time, get_datetime_
from searx import logger
from searx.data import WIKIDATA_UNITS
from searx.poolrequests import post, get
from searx.engines.wikipedia import _fetch_supported_languages, supported_languages_url
from searx.utils import match_language, searx_useragent, get_string_replaces_function
from searx.external_urls import get_external_url, get_earth_coordinates_url, area_to_osm_zoom
from searx.engines.wikipedia import _fetch_supported_languages, supported_languages_url # NOQA
logger = logger.getChild('wikidata')

View file

@ -13,9 +13,8 @@ import re
from datetime import datetime, timedelta
from urllib.parse import urlencode
from lxml import html
from searx.engines.yahoo import (
parse_url, _fetch_supported_languages, supported_languages_url, language_aliases
)
from searx.engines.yahoo import parse_url, language_aliases
from searx.engines.yahoo import _fetch_supported_languages, supported_languages_url # NOQA
from dateutil import parser
from searx.utils import extract_text, extract_url, match_language

View file

@ -11,7 +11,6 @@
from functools import reduce
from json import loads
from urllib.parse import quote_plus
from searx.utils import extract_text, list_get
# engine dependent config
categories = ['videos', 'music']