[mod] replace searx.languages by searx.sxng_locales

With the language and region tags from the EngineTraitsMap the handling of
SearXNG's tags of languages and regions has been normalized and is no longer
a *mystery*.  The "languages" became "locales" that are supported by babel and
by this, the update_engine_traits.py can be simplified a lot.

Other code places can be simplified as well, but these simplifications
should (respectively can) only be done when none of the engines work with the
deprecated EngineTraits.supported_languages interface anymore.

This commit replaces searx.languages by searx.sxng_locales and fix the naming of
some names from "language" to "locale" (e.g. language_codes --> sxng_locales).

Signed-off-by: Markus Heiser <markus.heiser@darmarit.de>
This commit is contained in:
Markus Heiser 2022-10-10 19:31:22 +02:00
parent 7daf4f95ef
commit c9cd376186
10 changed files with 233 additions and 304 deletions

View file

@ -18,17 +18,69 @@ the :origin:`CI Update data ... <.github/workflows/data-update.yml>`
from unicodedata import lookup
from pathlib import Path
from pprint import pformat
from babel import Locale, UnknownLocaleError
from babel.languages import get_global
from babel.core import parse_locale
import babel
from searx import settings, searx_dir
from searx import network
from searx.engines import load_engines, engines
from searx.engines import load_engines
from searx.enginelib.traits import EngineTraitsMap
# Output files.
languages_file = Path(searx_dir) / 'languages.py'
languages_file = Path(searx_dir) / 'sxng_locales.py'
languages_file_header = """\
# -*- coding: utf-8 -*-
'''List of SearXNG's locale codes.
This file is generated automatically by::
./manage pyenv.cmd searxng_extra/update/update_engine_traits.py
'''
sxng_locales = (
"""
languages_file_footer = """,
)
'''
A list of five-digit tuples:
0. SearXNG's internal locale tag (a language or region tag)
1. Name of the language (:py:obj:`babel.core.Locale.get_language_name`)
2. For region tags the name of the region (:py:obj:`babel.core.Locale.get_territory_name`).
Empty string for language tags.
3. English language name (from :py:obj:`babel.core.Locale.english_name`)
4. Unicode flag (emoji) that fits to SearXNG's internal region tag. Languages
are represented by a globe (\U0001F310)
.. code:: python
('en', 'English', '', 'English', '\U0001f310'),
('en-CA', 'English', 'Canada', 'English', '\U0001f1e8\U0001f1e6'),
('en-US', 'English', 'United States', 'English', '\U0001f1fa\U0001f1f8'),
..
('fr', 'Français', '', 'French', '\U0001f310'),
('fr-BE', 'Français', 'Belgique', 'French', '\U0001f1e7\U0001f1ea'),
('fr-CA', 'Français', 'Canada', 'French', '\U0001f1e8\U0001f1e6'),
:meta hide-value:
'''
"""
lang2emoji = {
'ha': '\U0001F1F3\U0001F1EA', # Hausa / Niger
'bs': '\U0001F1E7\U0001F1E6', # Bosnian / Bosnia & Herzegovina
'jp': '\U0001F1EF\U0001F1F5', # Japanese
'ua': '\U0001F1FA\U0001F1E6', # Ukrainian
'he': '\U0001F1EE\U0001F1F7', # Hebrew
}
def main():
load_engines(settings['engines'])
# traits_map = EngineTraitsMap.from_data()
traits_map = fetch_traits_map()
sxng_tag_list = filter_locales(traits_map)
write_languages_file(sxng_tag_list)
def fetch_traits_map():
@ -45,234 +97,69 @@ def fetch_traits_map():
return traits_map
# Get babel Locale object from lang_code if possible.
def get_locale(lang_code):
try:
locale = Locale.parse(lang_code, sep='-')
return locale
except (UnknownLocaleError, ValueError):
return None
def filter_locales(traits_map: EngineTraitsMap):
"""Filter language & region tags by a threshold."""
min_eng_per_region = 11
min_eng_per_lang = 13
_ = {}
for eng in traits_map.values():
for reg in eng.regions.keys():
_[reg] = _.get(reg, 0) + 1
regions = set(k for k, v in _.items() if v >= min_eng_per_region)
lang_from_region = set(k.split('-')[0] for k in regions)
_ = {}
for eng in traits_map.values():
for lang in eng.languages.keys():
# ignore script types like zh_Hant, zh_Hans or sr_Latin, pa_Arab (they
# already counted by existence of 'zh' or 'sr', 'pa')
if '_' in lang:
# print("ignore %s" % lang)
continue
_[lang] = _.get(lang, 0) + 1
languages = set(k for k, v in _.items() if v >= min_eng_per_lang)
sxng_tag_list = set()
sxng_tag_list.update(regions)
sxng_tag_list.update(lang_from_region)
sxng_tag_list.update(languages)
return sxng_tag_list
lang2emoji = {
'ha': '\U0001F1F3\U0001F1EA', # Hausa / Niger
'bs': '\U0001F1E7\U0001F1E6', # Bosnian / Bosnia & Herzegovina
'jp': '\U0001F1EF\U0001F1F5', # Japanese
'ua': '\U0001F1FA\U0001F1E6', # Ukrainian
'he': '\U0001F1EE\U0001F1F7', # Hebrew
}
def write_languages_file(sxng_tag_list):
language_codes = []
def get_unicode_flag(lang_code):
"""Determine a unicode flag (emoji) that fits to the ``lang_code``"""
for sxng_tag in sorted(sxng_tag_list):
sxng_locale: babel.Locale = babel.Locale.parse(sxng_tag, sep='-')
emoji = lang2emoji.get(lang_code.lower())
if emoji:
return emoji
flag = get_unicode_flag(sxng_locale) or ''
if len(lang_code) == 2:
return '\U0001F310'
language = territory = script = variant = ''
try:
language, territory, script, variant = parse_locale(lang_code, '-')
except ValueError as exc:
print(exc)
# https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2
if not territory:
# https://www.unicode.org/emoji/charts/emoji-list.html#country-flag
emoji = lang2emoji.get(language)
if not emoji:
print(
"%s --> language: %s / territory: %s / script: %s / variant: %s"
% (lang_code, language, territory, script, variant)
)
return emoji
emoji = lang2emoji.get(territory.lower())
if emoji:
return emoji
try:
c1 = lookup('REGIONAL INDICATOR SYMBOL LETTER ' + territory[0])
c2 = lookup('REGIONAL INDICATOR SYMBOL LETTER ' + territory[1])
# print("%s --> territory: %s --> %s%s" %(lang_code, territory, c1, c2 ))
except KeyError as exc:
print("%s --> territory: %s --> %s" % (lang_code, territory, exc))
return None
return c1 + c2
def get_territory_name(lang_code):
country_name = None
locale = get_locale(lang_code)
try:
if locale is not None:
country_name = locale.get_territory_name()
except FileNotFoundError as exc:
print("ERROR: %s --> %s" % (locale, exc))
return country_name
def join_language_lists(traits_map: EngineTraitsMap):
"""Join all languages of the engines into one list. The returned language list
contains language codes (``zh``) and region codes (``zh-TW``). The codes can
be parsed by babel::
babel.Locale.parse(language_list[n])
"""
# pylint: disable=too-many-branches
language_list = {}
for eng_name, eng_traits in traits_map.items():
eng = engines[eng_name]
eng_codes = set()
if eng_traits.data_type == 'traits_v1':
# items of type 'engine_traits' do have regions & languages, the
# list of eng_codes should contain both.
eng_codes.update(eng_traits.regions.keys())
eng_codes.update(eng_traits.languages.keys())
elif eng_traits.data_type == 'supported_languages':
# vintage / deprecated
_codes = set()
if isinstance(eng_traits.supported_languages, dict):
_codes.update(eng_traits.supported_languages.keys())
elif isinstance(eng_traits.supported_languages, list):
_codes.update(eng_traits.supported_languages)
else:
raise TypeError('engine.supported_languages type %s is unknown' % type(eng_traits.supported_languages))
for lang_code in _codes:
# apply custom fixes if necessary
if lang_code in getattr(eng, 'language_aliases', {}).values():
lang_code = next(lc for lc, alias in eng.language_aliases.items() if lang_code == alias)
eng_codes.add(lang_code)
for lang_code in eng_codes:
locale = get_locale(lang_code)
# ensure that lang_code uses standard language and country codes
if locale and locale.territory:
lang_code = "{lang}-{country}".format(lang=locale.language, country=locale.territory)
short_code = lang_code.split('-')[0]
# add language without country if not in list
if short_code not in language_list:
if locale:
# get language's data from babel's Locale object
language_name = locale.get_language_name().title()
english_name = locale.english_name.split(' (')[0]
elif short_code in traits_map['wikipedia'].supported_languages:
# get language's data from wikipedia if not known by babel
language_name = traits_map['wikipedia'].supported_languages[short_code]['name']
english_name = traits_map['wikipedia'].supported_languages[short_code]['english_name']
else:
language_name = None
english_name = None
# add language to list
language_list[short_code] = {
'name': language_name,
'english_name': english_name,
'counter': set(),
'countries': {},
}
# add language with country if not in list
if lang_code != short_code and lang_code not in language_list[short_code]['countries']:
country_name = ''
if locale:
# get country name from babel's Locale object
try:
country_name = locale.get_territory_name()
except FileNotFoundError as exc:
print("ERROR: %s --> %s" % (locale, exc))
locale = None
language_list[short_code]['countries'][lang_code] = {
'country_name': country_name,
'counter': set(),
}
# count engine for both language_country combination and language alone
language_list[short_code]['counter'].add(eng_name)
if lang_code != short_code:
language_list[short_code]['countries'][lang_code]['counter'].add(eng_name)
return language_list
# Filter language list so it only includes the most supported languages and countries
def filter_language_list(joined_languages_map):
min_engines_per_lang = 12
min_engines_per_country = 7
# pylint: disable=consider-using-dict-items, consider-iterating-dictionary
main_engines = [
engine_name
for engine_name in engines.keys()
if 'general' in engines[engine_name].categories
and hasattr(engines[engine_name], 'supported_languages')
and engines[engine_name].supported_languages
and not engines[engine_name].disabled
]
# filter list to include only languages supported by most engines or all default general engines
filtered_languages = {
code: lang
for code, lang in joined_languages_map.items()
if (
len(lang['counter']) >= min_engines_per_lang
or all(main_engine in lang['counter'] for main_engine in main_engines)
item = (
sxng_tag,
sxng_locale.get_language_name().title(),
sxng_locale.get_territory_name() or '',
sxng_locale.english_name.split(' (')[0],
UnicodeEscape(flag),
)
}
def _copy_lang_data(lang, country_name=None):
new_dict = {}
new_dict['name'] = joined_languages_map[lang]['name']
new_dict['english_name'] = joined_languages_map[lang]['english_name']
if country_name:
new_dict['country_name'] = country_name
return new_dict
language_codes.append(item)
# for each language get country codes supported by most engines or at least one country code
filtered_languages_with_countries = {}
for lang, lang_data in filtered_languages.items():
countries = lang_data['countries']
filtered_countries = {}
language_codes = tuple(language_codes)
# get language's country codes with enough supported engines
for lang_country, country_data in countries.items():
if len(country_data['counter']) >= min_engines_per_country:
filtered_countries[lang_country] = _copy_lang_data(lang, country_data['country_name'])
# add language without countries too if there's more than one country to choose from
if len(filtered_countries) > 1:
filtered_countries[lang] = _copy_lang_data(lang, None)
elif len(filtered_countries) == 1:
lang_country = next(iter(filtered_countries))
# if no country has enough engines try to get most likely country code from babel
if not filtered_countries:
lang_country = None
subtags = get_global('likely_subtags').get(lang)
if subtags:
country_code = subtags.split('_')[-1]
if len(country_code) == 2:
lang_country = "{lang}-{country}".format(lang=lang, country=country_code)
if lang_country:
filtered_countries[lang_country] = _copy_lang_data(lang, None)
else:
filtered_countries[lang] = _copy_lang_data(lang, None)
filtered_languages_with_countries.update(filtered_countries)
return filtered_languages_with_countries
with open(languages_file, 'w', encoding='utf-8') as new_file:
file_content = "{header} {language_codes}{footer}".format(
header=languages_file_header,
language_codes=pformat(language_codes, width=120, indent=4)[1:-1],
footer=languages_file_footer,
)
new_file.write(file_content)
new_file.close()
class UnicodeEscape(str):
@ -282,54 +169,29 @@ class UnicodeEscape(str):
return "'" + "".join([chr(c) for c in self.encode('unicode-escape')]) + "'"
# Write languages.py.
def write_languages_file(languages):
file_headers = (
"# -*- coding: utf-8 -*-",
"# list of language codes",
"# this file is generated automatically by utils/fetch_languages.py",
"language_codes = (\n",
)
def get_unicode_flag(locale: babel.Locale):
"""Determine a unicode flag (emoji) that fits to the ``locale``"""
language_codes = []
emoji = lang2emoji.get(locale.language)
if emoji:
return emoji
for code in sorted(languages):
if not locale.territory:
return '\U0001F310'
name = languages[code]['name']
if name is None:
print("ERROR: languages['%s'] --> %s" % (code, languages[code]))
continue
emoji = lang2emoji.get(locale.territory.lower())
if emoji:
return emoji
flag = get_unicode_flag(code) or ''
item = (
code,
languages[code]['name'].split(' (')[0],
get_territory_name(code) or '',
languages[code].get('english_name') or '',
UnicodeEscape(flag),
)
try:
c1 = lookup('REGIONAL INDICATOR SYMBOL LETTER ' + locale.territory[0])
c2 = lookup('REGIONAL INDICATOR SYMBOL LETTER ' + locale.territory[1])
# print("OK : %s --> %s%s" % (locale, c1, c2))
except KeyError as exc:
print("ERROR: %s --> %s" % (locale, exc))
return None
language_codes.append(item)
language_codes = tuple(language_codes)
with open(languages_file, 'w', encoding='utf-8') as new_file:
file_content = "{file_headers} {language_codes},\n)\n".format(
# fmt: off
file_headers = '\n'.join(file_headers),
language_codes = pformat(language_codes, indent=4)[1:-1]
# fmt: on
)
new_file.write(file_content)
new_file.close()
def main():
load_engines(settings['engines'])
traits_map = fetch_traits_map()
joined_languages_map = join_language_lists(traits_map)
filtered_languages = filter_language_list(joined_languages_map)
write_languages_file(filtered_languages)
return c1 + c2
if __name__ == "__main__":