mirror of
https://github.com/searxng/searxng.git
synced 2025-08-03 10:32:21 +02:00
[fix] spelling
This commit is contained in:
parent
a9b6963971
commit
223b3487c3
50 changed files with 98 additions and 98 deletions
|
@ -92,7 +92,7 @@ def response(resp):
|
|||
|
||||
|
||||
def fetch_traits(engine_traits: EngineTraits):
|
||||
"""Fetch languages from Archlinix-Wiki. The location of the Wiki address of a
|
||||
"""Fetch languages from Archlinux-Wiki. The location of the Wiki address of a
|
||||
language is mapped in a :py:obj:`custom field
|
||||
<searx.enginelib.traits.EngineTraits.custom>` (``wiki_netloc``). Depending
|
||||
on the location, the ``title`` argument in the request is translated.
|
||||
|
@ -128,7 +128,7 @@ def fetch_traits(engine_traits: EngineTraits):
|
|||
|
||||
resp = get('https://wiki.archlinux.org/')
|
||||
if not resp.ok: # type: ignore
|
||||
print("ERROR: response from wiki.archlinix.org is not OK.")
|
||||
print("ERROR: response from wiki.archlinux.org is not OK.")
|
||||
|
||||
dom = lxml.html.fromstring(resp.text) # type: ignore
|
||||
for a in eval_xpath_list(dom, "//a[@class='interlanguage-link-target']"):
|
||||
|
|
|
@ -138,7 +138,7 @@ def fetch_traits(engine_traits: EngineTraits):
|
|||
|
||||
The :py:obj:`description <searx.engines.bing_news.bing_traits_url>` of the
|
||||
first table says *"query parameter when calling the Video Search API."*
|
||||
.. thats why I use the 4. table "News Category API markets" for the
|
||||
.. that's why I use the 4. table "News Category API markets" for the
|
||||
``xpath_market_codes``.
|
||||
|
||||
"""
|
||||
|
|
|
@ -37,7 +37,7 @@ Brave regions
|
|||
=============
|
||||
|
||||
Brave uses two-digit tags for the regions like ``ca`` while SearXNG deals with
|
||||
locales. To get a mapping, all *officatl de-facto* languages of the Brave
|
||||
locales. To get a mapping, all *officiat de-facto* languages of the Brave
|
||||
region are mapped to regions in SearXNG (see :py:obj:`babel
|
||||
<babel.languages.get_official_languages>`):
|
||||
|
||||
|
@ -63,10 +63,10 @@ region are mapped to regions in SearXNG (see :py:obj:`babel
|
|||
Brave languages
|
||||
===============
|
||||
|
||||
Brave's language support is limited to the UI (menues, area local notations,
|
||||
Brave's language support is limited to the UI (menus, area local notations,
|
||||
etc). Brave's index only seems to support a locale, but it does not seem to
|
||||
support any languages in its index. The choice of available languages is very
|
||||
small (and its not clear to me where the differencee in UI is when switching
|
||||
small (and its not clear to me where the difference in UI is when switching
|
||||
from en-us to en-ca or en-gb).
|
||||
|
||||
In the :py:obj:`EngineTraits object <searx.enginelib.traits.EngineTraits>` the
|
||||
|
@ -264,7 +264,7 @@ def _parse_search(resp):
|
|||
)
|
||||
if video_tag is not None:
|
||||
|
||||
# In my tests a video tag in the WEB search was mostoften not a
|
||||
# In my tests a video tag in the WEB search was most often not a
|
||||
# video, except the ones from youtube ..
|
||||
|
||||
iframe_src = _get_iframe_src(url)
|
||||
|
@ -405,7 +405,7 @@ def fetch_traits(engine_traits: EngineTraits):
|
|||
# country_name = extract_text(flag.xpath('./following-sibling::*')[0])
|
||||
country_tag = re.search(r'flag-([^\s]*)\s', flag.xpath('./@class')[0]).group(1) # type: ignore
|
||||
|
||||
# add offical languages of the country ..
|
||||
# add official languages of the country ..
|
||||
for lang_tag in babel.languages.get_official_languages(country_tag, de_facto=True):
|
||||
lang_tag = lang_map.get(lang_tag, lang_tag)
|
||||
sxng_tag = region_tag(babel.Locale.parse('%s_%s' % (lang_tag, country_tag.upper())))
|
||||
|
|
|
@ -67,11 +67,11 @@ or ``time``.
|
|||
|
||||
.. hint::
|
||||
|
||||
When *time_range* is activate, the results always orderd by ``time``.
|
||||
When *time_range* is activate, the results always ordered by ``time``.
|
||||
"""
|
||||
|
||||
bt4g_category = 'all'
|
||||
"""BT$G offers categoies: ``all`` (default), ``audio``, ``movie``, ``doc``,
|
||||
"""BT$G offers categories: ``all`` (default), ``audio``, ``movie``, ``doc``,
|
||||
``app`` and `` other``.
|
||||
"""
|
||||
|
||||
|
|
|
@ -105,7 +105,7 @@ def get_vqd(query):
|
|||
def get_ddg_lang(eng_traits: EngineTraits, sxng_locale, default='en_US'):
|
||||
"""Get DuckDuckGo's language identifier from SearXNG's locale.
|
||||
|
||||
DuckDuckGo defines its lanaguages by region codes (see
|
||||
DuckDuckGo defines its languages by region codes (see
|
||||
:py:obj:`fetch_traits`).
|
||||
|
||||
To get region and language of a DDG service use:
|
||||
|
@ -338,7 +338,7 @@ def fetch_traits(engine_traits: EngineTraits):
|
|||
``Accept-Language`` HTTP header. The value in ``engine_traits.all_locale``
|
||||
is ``wt-wt`` (the region).
|
||||
|
||||
Beside regions DuckDuckGo also defines its lanaguages by region codes. By
|
||||
Beside regions DuckDuckGo also defines its languages by region codes. By
|
||||
example these are the english languages in DuckDuckGo:
|
||||
|
||||
- en_US
|
||||
|
|
|
@ -22,7 +22,7 @@ import babel.core
|
|||
import babel.languages
|
||||
|
||||
from searx.utils import extract_text, eval_xpath, eval_xpath_list, eval_xpath_getindex
|
||||
from searx.locales import language_tag, region_tag, get_offical_locales
|
||||
from searx.locales import language_tag, region_tag, get_official_locales
|
||||
from searx.network import get # see https://github.com/searxng/searxng/issues/762
|
||||
from searx.exceptions import SearxEngineCaptchaException
|
||||
from searx.enginelib.traits import EngineTraits
|
||||
|
@ -184,8 +184,8 @@ def get_google_info(params, eng_traits):
|
|||
#
|
||||
# To select 'all' languages an empty 'lr' value is used.
|
||||
#
|
||||
# Different to other google services, Google Schloar supports to select more
|
||||
# than one language. The languages are seperated by a pipe '|' (logical OR).
|
||||
# Different to other google services, Google Scholar supports to select more
|
||||
# than one language. The languages are separated by a pipe '|' (logical OR).
|
||||
# By example: &lr=lang_zh-TW%7Clang_de selects articles written in
|
||||
# traditional chinese OR german language.
|
||||
|
||||
|
@ -204,7 +204,7 @@ def get_google_info(params, eng_traits):
|
|||
if len(sxng_locale.split('-')) > 1:
|
||||
ret_val['params']['cr'] = 'country' + country
|
||||
|
||||
# gl parameter: (mandatory by Geeogle News)
|
||||
# gl parameter: (mandatory by Google News)
|
||||
# The gl parameter value is a two-letter country code. For WebSearch
|
||||
# results, the gl parameter boosts search results whose country of origin
|
||||
# matches the parameter value. See the Country Codes section for a list of
|
||||
|
@ -465,7 +465,7 @@ def fetch_traits(engine_traits: EngineTraits, add_domains: bool = True):
|
|||
engine_traits.all_locale = 'ZZ'
|
||||
continue
|
||||
|
||||
sxng_locales = get_offical_locales(eng_country, engine_traits.languages.keys(), regional=True)
|
||||
sxng_locales = get_official_locales(eng_country, engine_traits.languages.keys(), regional=True)
|
||||
|
||||
if not sxng_locales:
|
||||
print("ERROR: can't map from google country %s (%s) to a babel region." % (x.get('data-name'), eng_country))
|
||||
|
|
|
@ -72,7 +72,7 @@ time_range_support = False
|
|||
# Google-News results are always *SafeSearch*. Option 'safesearch' is set to
|
||||
# False here, otherwise checker will report safesearch-errors::
|
||||
#
|
||||
# safesearch : results are identitical for safesearch=0 and safesearch=2
|
||||
# safesearch : results are identical for safesearch=0 and safesearch=2
|
||||
safesearch = True
|
||||
# send_accept_language_header = True
|
||||
|
||||
|
@ -155,7 +155,7 @@ def response(resp):
|
|||
|
||||
title = extract_text(eval_xpath(result, './article/h3[1]'))
|
||||
|
||||
# The pub_date is mostly a string like 'yesertday', not a real
|
||||
# The pub_date is mostly a string like 'yesterday', not a real
|
||||
# timezone date or time. Therefore we can't use publishedDate.
|
||||
pub_date = extract_text(eval_xpath(result, './article//time'))
|
||||
pub_origin = extract_text(eval_xpath(result, './article//a[@data-n-tid]'))
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
# lint: pylint
|
||||
"""Odysee_ is a decentralised video hosting platform.
|
||||
"""Odysee_ is a decentralized video hosting platform.
|
||||
|
||||
.. _Odysee: https://github.com/OdyseeTeam/odysee-frontend
|
||||
"""
|
||||
|
|
|
@ -162,7 +162,7 @@ def response(resp):
|
|||
}
|
||||
)
|
||||
|
||||
# simplify the code below: make sure extratags is a dictionnary
|
||||
# simplify the code below: make sure extratags is a dictionary
|
||||
for result in nominatim_json:
|
||||
if not isinstance(result.get('extratags'), dict):
|
||||
result["extratags"] = {}
|
||||
|
@ -445,7 +445,7 @@ def get_key_label(key_name, lang):
|
|||
if key_name.startswith('currency:'):
|
||||
# currency:EUR --> get the name from the CURRENCIES variable
|
||||
# see https://wiki.openstreetmap.org/wiki/Key%3Acurrency
|
||||
# and for exampe https://taginfo.openstreetmap.org/keys/currency:EUR#values
|
||||
# and for example https://taginfo.openstreetmap.org/keys/currency:EUR#values
|
||||
# but there is also currency=EUR (currently not handled)
|
||||
# https://taginfo.openstreetmap.org/keys/currency#values
|
||||
currency = key_name.split(':')
|
||||
|
|
|
@ -72,7 +72,7 @@ paging = True
|
|||
backend_url: list | str = "https://pipedapi.kavin.rocks"
|
||||
"""Piped-Backend_: The core component behind Piped. The value is an URL or a
|
||||
list of URLs. In the latter case instance will be selected randomly. For a
|
||||
complete list of offical instances see Piped-Instances (`JSON
|
||||
complete list of official instances see Piped-Instances (`JSON
|
||||
<https://piped-instances.kavin.rocks/>`__)
|
||||
|
||||
.. _Piped-Instances: https://github.com/TeamPiped/Piped/wiki/Instances
|
||||
|
|
|
@ -76,7 +76,7 @@ about = {
|
|||
categories = []
|
||||
paging = True
|
||||
qwant_categ = None
|
||||
"""One of ``web``, ``news``, ``images`` or ``videos``"""
|
||||
"""One of ``web-lite`` (or ``web``), ``news``, ``images`` or ``videos``"""
|
||||
|
||||
safesearch = True
|
||||
# safe_search_map = {0: '&safesearch=0', 1: '&safesearch=1', 2: '&safesearch=2'}
|
||||
|
|
|
@ -10,7 +10,7 @@ Example
|
|||
|
||||
To demonstrate the power of database engines, here is a more complex example
|
||||
which reads from a MediathekView_ (DE) movie database. For this example of the
|
||||
SQlite engine download the database:
|
||||
SQLite engine download the database:
|
||||
|
||||
- https://liste.mediathekview.de/filmliste-v2.db.bz2
|
||||
|
||||
|
|
|
@ -23,7 +23,7 @@ The engine has the following settings:
|
|||
Prowlarr-categories_ or Jackett-categories_ for more information.
|
||||
|
||||
``show_torrent_files``:
|
||||
Whether to show the torrent file in the search results. Be carful as using
|
||||
Whether to show the torrent file in the search results. Be careful as using
|
||||
this with Prowlarr_ or Jackett_ leaks the API key. This should be used only
|
||||
if you are querying a Torznab endpoint without authentication or if the
|
||||
instance is private. Be aware that private trackers may ban you if you share
|
||||
|
|
|
@ -287,7 +287,7 @@ def get_results(attribute_result, attributes, language):
|
|||
elif attribute_type == WDGeoAttribute:
|
||||
# geocoordinate link
|
||||
# use the area to get the OSM zoom
|
||||
# Note: ignre the unit (must be km² otherwise the calculation is wrong)
|
||||
# Note: ignore the unit (must be km² otherwise the calculation is wrong)
|
||||
# Should use normalized value p:P2046/psn:P2046/wikibase:quantityAmount
|
||||
area = attribute_result.get('P2046')
|
||||
osm_zoom = area_to_osm_zoom(area) if area else 19
|
||||
|
|
|
@ -86,7 +86,7 @@ Replacements are:
|
|||
Search terms from user.
|
||||
|
||||
``{pageno}``:
|
||||
Page number if engine supports pagging :py:obj:`paging`
|
||||
Page number if engine supports paging :py:obj:`paging`
|
||||
|
||||
``{lang}``:
|
||||
ISO 639-1 language code (en, de, fr ..)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue