[format.python] initial formatting of the python code

This patch was generated by black [1]::

    make format.python

[1] https://github.com/psf/black

Signed-off-by: Markus Heiser <markus.heiser@darmarit.de>
This commit is contained in:
Markus Heiser 2021-12-27 09:26:22 +01:00
parent fcdc2c2cd2
commit 3d96a9839a
184 changed files with 2800 additions and 2836 deletions

View file

@ -62,7 +62,7 @@ Example to run it from python:
},
"suggestions": [...]
}
""" # pylint: disable=line-too-long
""" # pylint: disable=line-too-long
import argparse
import sys
@ -80,7 +80,7 @@ EngineCategoriesVar = Optional[List[str]]
def get_search_query(
args: argparse.Namespace, engine_categories: EngineCategoriesVar = None
args: argparse.Namespace, engine_categories: EngineCategoriesVar = None
) -> searx.search.SearchQuery:
"""Get search results for the query"""
if engine_categories is None:
@ -94,14 +94,12 @@ def get_search_query(
"categories": category,
"pageno": str(args.pageno),
"language": args.lang,
"time_range": args.timerange
"time_range": args.timerange,
}
preferences = searx.preferences.Preferences(
['oscar'], engine_categories, searx.engines.engines, [])
preferences = searx.preferences.Preferences(['oscar'], engine_categories, searx.engines.engines, [])
preferences.key_value_settings['safesearch'].parse(args.safesearch)
search_query = searx.webadapter.get_search_query_from_webapp(
preferences, form)[0]
search_query = searx.webadapter.get_search_query_from_webapp(preferences, form)[0]
return search_query
@ -143,14 +141,13 @@ def to_dict(search_query: searx.search.SearchQuery) -> Dict[str, Any]:
"suggestions": list(result_container.suggestions),
"answers": list(result_container.answers),
"paging": result_container.paging,
"results_number": result_container.results_number()
"results_number": result_container.results_number(),
}
return result_container_json
def parse_argument(
args: Optional[List[str]]=None,
category_choices: EngineCategoriesVar=None
args: Optional[List[str]] = None, category_choices: EngineCategoriesVar = None
) -> argparse.Namespace:
"""Parse command line.
@ -174,24 +171,23 @@ def parse_argument(
if not category_choices:
category_choices = list(searx.engines.categories.keys())
parser = argparse.ArgumentParser(description='Standalone searx.')
parser.add_argument('query', type=str,
help='Text query')
parser.add_argument('--category', type=str, nargs='?',
choices=category_choices,
default='general',
help='Search category')
parser.add_argument('--lang', type=str, nargs='?', default='all',
help='Search language')
parser.add_argument('--pageno', type=int, nargs='?', default=1,
help='Page number starting from 1')
parser.add_argument('query', type=str, help='Text query')
parser.add_argument(
'--safesearch', type=str, nargs='?',
choices=['0', '1', '2'], default='0',
help='Safe content filter from none to strict')
'--category', type=str, nargs='?', choices=category_choices, default='general', help='Search category'
)
parser.add_argument('--lang', type=str, nargs='?', default='all', help='Search language')
parser.add_argument('--pageno', type=int, nargs='?', default=1, help='Page number starting from 1')
parser.add_argument(
'--timerange', type=str,
nargs='?', choices=['day', 'week', 'month', 'year'],
help='Filter by time range')
'--safesearch',
type=str,
nargs='?',
choices=['0', '1', '2'],
default='0',
help='Safe content filter from none to strict',
)
parser.add_argument(
'--timerange', type=str, nargs='?', choices=['day', 'week', 'month', 'year'], help='Filter by time range'
)
return parser.parse_args(args)
@ -206,6 +202,4 @@ if __name__ == '__main__':
searx.search.initialize_processors(settings_engines)
search_q = get_search_query(prog_args, engine_categories=engine_cs)
res_dict = to_dict(search_q)
sys.stdout.write(dumps(
res_dict, sort_keys=True, indent=4, ensure_ascii=False,
default=json_serial))
sys.stdout.write(dumps(res_dict, sort_keys=True, indent=4, ensure_ascii=False, default=json_serial))

View file

@ -85,9 +85,7 @@ def add_currency_label(db, label, iso4217, language):
def wikidata_request_result_iterator(request):
result = wikidata.send_wikidata_query(
request.replace('%LANGUAGES_SPARQL%', LANGUAGES_SPARQL)
)
result = wikidata.send_wikidata_query(request.replace('%LANGUAGES_SPARQL%', LANGUAGES_SPARQL))
if result is not None:
for r in result['results']['bindings']:
yield r
@ -151,5 +149,6 @@ def main():
with open(get_filename(), 'w', encoding='utf8') as f:
json.dump(db, f, ensure_ascii=False, indent=4)
if __name__ == '__main__':
main()

View file

@ -56,7 +56,8 @@ NOT_A_DESCRIPTION = [
SKIP_ENGINE_SOURCE = [
# fmt: off
('gitlab', 'wikidata') # descriptions are about wikipedia disambiguation pages
('gitlab', 'wikidata')
# descriptions are about wikipedia disambiguation pages
# fmt: on
]
@ -94,10 +95,7 @@ def update_description(engine_name, lang, description, source, replace=True):
def get_wikipedia_summary(lang, pageid):
params = {
'language': lang.replace('_','-'),
'headers': {}
}
params = {'language': lang.replace('_', '-'), 'headers': {}}
searx.engines.engines['wikipedia'].request(pageid, params)
try:
response = searx.network.get(params['url'], headers=params['headers'], timeout=10)
@ -162,10 +160,7 @@ def initialize():
global IDS, WIKIPEDIA_LANGUAGES, LANGUAGES_SPARQL
searx.search.initialize()
wikipedia_engine = searx.engines.engines['wikipedia']
WIKIPEDIA_LANGUAGES = {
language: wikipedia_engine.url_lang(language.replace('_', '-'))
for language in LANGUAGES
}
WIKIPEDIA_LANGUAGES = {language: wikipedia_engine.url_lang(language.replace('_', '-')) for language in LANGUAGES}
WIKIPEDIA_LANGUAGES['nb_NO'] = 'no'
LANGUAGES_SPARQL = ', '.join(f"'{l}'" for l in set(WIKIPEDIA_LANGUAGES.values()))
for engine_name, engine in searx.engines.engines.items():
@ -180,9 +175,7 @@ def initialize():
def fetch_wikidata_descriptions():
searx.network.set_timeout_for_thread(60)
result = wikidata.send_wikidata_query(
SPARQL_DESCRIPTION
.replace('%IDS%', IDS)
.replace('%LANGUAGES_SPARQL%', LANGUAGES_SPARQL)
SPARQL_DESCRIPTION.replace('%IDS%', IDS).replace('%LANGUAGES_SPARQL%', LANGUAGES_SPARQL)
)
if result is not None:
for binding in result['results']['bindings']:
@ -197,9 +190,7 @@ def fetch_wikidata_descriptions():
def fetch_wikipedia_descriptions():
result = wikidata.send_wikidata_query(
SPARQL_WIKIPEDIA_ARTICLE
.replace('%IDS%', IDS)
.replace('%LANGUAGES_SPARQL%', LANGUAGES_SPARQL)
SPARQL_WIKIPEDIA_ARTICLE.replace('%IDS%', IDS).replace('%LANGUAGES_SPARQL%', LANGUAGES_SPARQL)
)
if result is not None:
for binding in result['results']['bindings']:
@ -226,9 +217,9 @@ def fetch_website_description(engine_name, website):
# the front page can't be fetched: skip this engine
return
wikipedia_languages_r = { V: K for K, V in WIKIPEDIA_LANGUAGES.items() }
wikipedia_languages_r = {V: K for K, V in WIKIPEDIA_LANGUAGES.items()}
languages = ['en', 'es', 'pt', 'ru', 'tr', 'fr']
languages = languages + [ l for l in LANGUAGES if l not in languages]
languages = languages + [l for l in LANGUAGES if l not in languages]
previous_matched_lang = None
previous_count = 0
@ -281,9 +272,7 @@ def get_output():
* description (if source = "wikipedia")
* [f"engine:lang", "ref"] (reference to another existing description)
"""
output = {
locale: {} for locale in LOCALE_NAMES
}
output = {locale: {} for locale in LOCALE_NAMES}
seen_descriptions = {}

View file

@ -113,13 +113,13 @@ def parse_ddg_bangs(ddg_bangs):
# only for the https protocol: "https://example.com" becomes "//example.com"
if bang_url.startswith(HTTPS_COLON + '//'):
bang_url = bang_url[len(HTTPS_COLON):]
bang_url = bang_url[len(HTTPS_COLON) :]
#
if bang_url.startswith(HTTP_COLON + '//') and bang_url[len(HTTP_COLON):] in bang_urls:
if bang_url.startswith(HTTP_COLON + '//') and bang_url[len(HTTP_COLON) :] in bang_urls:
# if the bang_url uses the http:// protocol, and the same URL exists in https://
# then reuse the https:// bang definition. (written //example.com)
bang_def_output = bang_urls[bang_url[len(HTTP_COLON):]]
bang_def_output = bang_urls[bang_url[len(HTTP_COLON) :]]
else:
# normal use case : new http:// URL or https:// URL (without "https:", see above)
bang_rank = str(bang_definition['r'])
@ -151,9 +151,6 @@ def get_bangs_filename():
if __name__ == '__main__':
bangs_url, bangs_version = get_bang_url()
print(f'fetch bangs from {bangs_url}')
output = {
'version': bangs_version,
'trie': parse_ddg_bangs(fetch_ddg_bangs(bangs_url))
}
output = {'version': bangs_version, 'trie': parse_ddg_bangs(fetch_ddg_bangs(bangs_url))}
with open(get_bangs_filename(), 'w', encoding="utf8") as fp:
json.dump(output, fp, ensure_ascii=False, indent=4)

View file

@ -40,7 +40,7 @@ def fetch_firefox_versions():
url = urlparse(urljoin(URL, link))
path = url.path
if path.startswith(RELEASE_PATH):
version = path[len(RELEASE_PATH):-1]
version = path[len(RELEASE_PATH) : -1]
if NORMAL_REGEX.match(version):
versions.append(LooseVersion(version))

View file

@ -31,8 +31,7 @@ def fetch_supported_languages():
for engine_name in names:
if hasattr(engines[engine_name], 'fetch_supported_languages'):
engines_languages[engine_name] = engines[engine_name].fetch_supported_languages()
print("fetched %s languages from engine %s" % (
len(engines_languages[engine_name]), engine_name))
print("fetched %s languages from engine %s" % (len(engines_languages[engine_name]), engine_name))
if type(engines_languages[engine_name]) == list:
engines_languages[engine_name] = sorted(engines_languages[engine_name])
@ -60,8 +59,9 @@ def join_language_lists(engines_languages):
# apply custom fixes if necessary
if lang_code in getattr(engines[engine_name], 'language_aliases', {}).values():
lang_code = next(lc for lc, alias in engines[engine_name].language_aliases.items()
if lang_code == alias)
lang_code = next(
lc for lc, alias in engines[engine_name].language_aliases.items() if lang_code == alias
)
locale = get_locale(lang_code)
@ -85,10 +85,12 @@ def join_language_lists(engines_languages):
english_name = None
# add language to list
language_list[short_code] = {'name': language_name,
'english_name': english_name,
'counter': set(),
'countries': dict()}
language_list[short_code] = {
'name': language_name,
'english_name': english_name,
'counter': set(),
'countries': dict(),
}
# add language with country if not in list
if lang_code != short_code and lang_code not in language_list[short_code]['countries']:
@ -97,8 +99,7 @@ def join_language_lists(engines_languages):
# get country name from babel's Locale object
country_name = locale.get_territory_name()
language_list[short_code]['countries'][lang_code] = {'country_name': country_name,
'counter': set()}
language_list[short_code]['countries'][lang_code] = {'country_name': country_name, 'counter': set()}
# count engine for both language_country combination and language alone
language_list[short_code]['counter'].add(engine_name)
@ -112,17 +113,23 @@ def join_language_lists(engines_languages):
def filter_language_list(all_languages):
min_engines_per_lang = 13
min_engines_per_country = 7
main_engines = [engine_name for engine_name in engines.keys()
if 'general' in engines[engine_name].categories and
engines[engine_name].supported_languages and
not engines[engine_name].disabled]
main_engines = [
engine_name
for engine_name in engines.keys()
if 'general' in engines[engine_name].categories
and engines[engine_name].supported_languages
and not engines[engine_name].disabled
]
# filter list to include only languages supported by most engines or all default general engines
filtered_languages = {code: lang for code, lang
in all_languages.items()
if (len(lang['counter']) >= min_engines_per_lang or
all(main_engine in lang['counter']
for main_engine in main_engines))}
filtered_languages = {
code: lang
for code, lang in all_languages.items()
if (
len(lang['counter']) >= min_engines_per_lang
or all(main_engine in lang['counter'] for main_engine in main_engines)
)
}
def _copy_lang_data(lang, country_name=None):
new_dict = dict()
@ -176,22 +183,24 @@ def write_languages_file(languages):
"# -*- coding: utf-8 -*-",
"# list of language codes",
"# this file is generated automatically by utils/fetch_languages.py",
"language_codes ="
"language_codes =",
)
language_codes = tuple([
(
code,
languages[code]['name'].split(' (')[0],
languages[code].get('country_name') or '',
languages[code].get('english_name') or ''
) for code in sorted(languages)
])
language_codes = tuple(
[
(
code,
languages[code]['name'].split(' (')[0],
languages[code].get('country_name') or '',
languages[code].get('english_name') or '',
)
for code in sorted(languages)
]
)
with open(languages_file, 'w') as new_file:
file_content = "{file_headers} \\\n{language_codes}".format(
file_headers='\n'.join(file_headers),
language_codes=pformat(language_codes, indent=4)
file_headers='\n'.join(file_headers), language_codes=pformat(language_codes, indent=4)
)
new_file.write(file_content)
new_file.close()

View file

@ -84,9 +84,8 @@ PRESET_KEYS = {
('internet_access', 'ssid'): {'en': 'Wi-Fi'},
}
INCLUDED_KEYS = {
('addr', )
}
INCLUDED_KEYS = {('addr',)}
def get_preset_keys():
results = collections.OrderedDict()
@ -97,6 +96,7 @@ def get_preset_keys():
r.setdefault('*', value)
return results
def get_keys():
results = get_preset_keys()
response = wikidata.send_wikidata_query(SPARQL_KEYS_REQUEST)
@ -110,18 +110,16 @@ def get_keys():
# label for the key "contact.email" is "Email"
# whatever the language
r = results.setdefault('contact', {})
r[keys[1]] = {
'*': {
'en': keys[1]
}
}
r[keys[1]] = {'*': {'en': keys[1]}}
continue
if tuple(keys) in PRESET_KEYS:
# skip presets (already set above)
continue
if get_key_rank(':'.join(keys)) is None\
and ':'.join(keys) not in VALUE_TO_LINK\
and tuple(keys) not in INCLUDED_KEYS:
if (
get_key_rank(':'.join(keys)) is None
and ':'.join(keys) not in VALUE_TO_LINK
and tuple(keys) not in INCLUDED_KEYS
):
# keep only keys that will be displayed by openstreetmap.py
continue
label = key['itemLabel']['value'].lower()
@ -160,6 +158,7 @@ def get_tags():
results.setdefault(tag_category, {}).setdefault(tag_type, {}).setdefault(lang, label)
return results
def optimize_data_lang(translations):
language_to_delete = []
# remove "zh-hk" entry if the value is the same as "zh"
@ -184,12 +183,14 @@ def optimize_data_lang(translations):
for language in language_to_delete:
del translations[language]
def optimize_tags(data):
for v in data.values():
for translations in v.values():
optimize_data_lang(translations)
return data
def optimize_keys(data):
for k, v in data.items():
if k == '*':
@ -198,9 +199,11 @@ def optimize_keys(data):
optimize_keys(v)
return data
def get_osm_tags_filename():
return Path(searx_dir) / "data" / "osm_keys_tags.json"
if __name__ == '__main__':
set_timeout_for_thread(60)

View file

@ -120,7 +120,7 @@ def get_css(cssclass, style):
css_text = HtmlFormatter(style=style).get_style_defs(cssclass)
result += cssclass + RULE_CODE_LINENOS + '\n\n'
for line in css_text.splitlines():
if ' ' in line and not line.startswith(cssclass):
if ' ' in line and not line.startswith(cssclass):
line = cssclass + ' ' + line
result += line + '\n'
return result