mirror of
https://github.com/searxng/searxng.git
synced 2025-08-03 02:22:22 +02:00
Merge pull request #1814 from return42/fix-typos
[fix] typos / reported by @kianmeng in searx PR-3366
This commit is contained in:
commit
a3148e5115
52 changed files with 96 additions and 96 deletions
|
@ -275,12 +275,12 @@ def is_engine_active(engine: Engine):
|
|||
|
||||
def register_engine(engine: Engine):
|
||||
if engine.name in engines:
|
||||
logger.error('Engine config error: ambigious name: {0}'.format(engine.name))
|
||||
logger.error('Engine config error: ambiguous name: {0}'.format(engine.name))
|
||||
sys.exit(1)
|
||||
engines[engine.name] = engine
|
||||
|
||||
if engine.shortcut in engine_shortcuts:
|
||||
logger.error('Engine config error: ambigious shortcut: {0}'.format(engine.shortcut))
|
||||
logger.error('Engine config error: ambiguous shortcut: {0}'.format(engine.shortcut))
|
||||
sys.exit(1)
|
||||
engine_shortcuts[engine.shortcut] = engine.name
|
||||
|
||||
|
|
|
@ -79,7 +79,7 @@ def response(resp):
|
|||
# * book / performing art / film / television / media franchise / concert tour / playwright
|
||||
# * prepared food
|
||||
# * website / software / os / programming language / file format / software engineer
|
||||
# * compagny
|
||||
# * company
|
||||
|
||||
content = ''
|
||||
heading = search_res.get('Heading', '')
|
||||
|
|
|
@ -40,7 +40,7 @@ def response(resp):
|
|||
|
||||
search_res = loads(resp.text)
|
||||
|
||||
# check if items are recieved
|
||||
# check if items are received
|
||||
if 'items' not in search_res:
|
||||
return []
|
||||
|
||||
|
|
|
@ -326,14 +326,14 @@ def response(resp):
|
|||
|
||||
# google *sections*
|
||||
if extract_text(eval_xpath(result, g_section_with_header)):
|
||||
logger.debug("ingoring <g-section-with-header>")
|
||||
logger.debug("ignoring <g-section-with-header>")
|
||||
continue
|
||||
|
||||
try:
|
||||
title_tag = eval_xpath_getindex(result, title_xpath, 0, default=None)
|
||||
if title_tag is None:
|
||||
# this not one of the common google results *section*
|
||||
logger.debug('ingoring item from the result_xpath list: missing title')
|
||||
logger.debug('ignoring item from the result_xpath list: missing title')
|
||||
continue
|
||||
title = extract_text(title_tag)
|
||||
url = eval_xpath_getindex(result, href_xpath, 0, None)
|
||||
|
@ -341,7 +341,7 @@ def response(resp):
|
|||
continue
|
||||
content = extract_text(eval_xpath_getindex(result, content_xpath, 0, default=None), allow_none=True)
|
||||
if content is None:
|
||||
logger.debug('ingoring item from the result_xpath list: missing content of title "%s"', title)
|
||||
logger.debug('ignoring item from the result_xpath list: missing content of title "%s"', title)
|
||||
continue
|
||||
|
||||
logger.debug('add link to results: %s', title)
|
||||
|
|
|
@ -141,7 +141,7 @@ def response(resp):
|
|||
padding = (4 - (len(jslog) % 4)) * "="
|
||||
jslog = b64decode(jslog + padding)
|
||||
except binascii.Error:
|
||||
# URL cant be read, skip this result
|
||||
# URL can't be read, skip this result
|
||||
continue
|
||||
|
||||
# now we have : b'[null, ... null,"https://www.cnn.com/.../index.html"]'
|
||||
|
|
|
@ -150,7 +150,7 @@ def response(resp):
|
|||
|
||||
# ignore google *sections*
|
||||
if extract_text(eval_xpath(result, g_section_with_header)):
|
||||
logger.debug("ingoring <g-section-with-header>")
|
||||
logger.debug("ignoring <g-section-with-header>")
|
||||
continue
|
||||
|
||||
# ingnore articles without an image id / e.g. news articles
|
||||
|
|
|
@ -70,7 +70,7 @@ def response(resp):
|
|||
elif properties.get('osm_type') == 'R':
|
||||
osm_type = 'relation'
|
||||
else:
|
||||
# continue if invalide osm-type
|
||||
# continue if invalid osm-type
|
||||
continue
|
||||
|
||||
url = result_base_url.format(osm_type=osm_type, osm_id=properties.get('osm_id'))
|
||||
|
|
|
@ -209,7 +209,7 @@ def _fetch_supported_languages(resp):
|
|||
# native name, the English name of the writing script used by the language,
|
||||
# or occasionally something else entirely.
|
||||
|
||||
# this cases are so special they need to be hardcoded, a couple of them are mispellings
|
||||
# this cases are so special they need to be hardcoded, a couple of them are misspellings
|
||||
language_names = {
|
||||
'english_uk': 'en-GB',
|
||||
'fantizhengwen': ['zh-TW', 'zh-HK'],
|
||||
|
|
|
@ -50,7 +50,7 @@ WIKIDATA_PROPERTIES = {
|
|||
# SERVICE wikibase:label: https://en.wikibooks.org/wiki/SPARQL/SERVICE_-_Label#Manual_Label_SERVICE
|
||||
# https://en.wikibooks.org/wiki/SPARQL/WIKIDATA_Precision,_Units_and_Coordinates
|
||||
# https://www.mediawiki.org/wiki/Wikibase/Indexing/RDF_Dump_Format#Data_model
|
||||
# optmization:
|
||||
# optimization:
|
||||
# * https://www.wikidata.org/wiki/Wikidata:SPARQL_query_service/query_optimization
|
||||
# * https://github.com/blazegraph/database/wiki/QueryHints
|
||||
QUERY_TEMPLATE = """
|
||||
|
@ -386,7 +386,7 @@ def get_attributes(language):
|
|||
add_amount('P2046') # area
|
||||
add_amount('P281') # postal code
|
||||
add_label('P38') # currency
|
||||
add_amount('P2048') # heigth (building)
|
||||
add_amount('P2048') # height (building)
|
||||
|
||||
# Media
|
||||
for p in [
|
||||
|
|
|
@ -50,7 +50,7 @@ def request(query, params):
|
|||
# replace private user area characters to make text legible
|
||||
def replace_pua_chars(text):
|
||||
pua_chars = {
|
||||
'\uf522': '\u2192', # rigth arrow
|
||||
'\uf522': '\u2192', # right arrow
|
||||
'\uf7b1': '\u2115', # set of natural numbers
|
||||
'\uf7b4': '\u211a', # set of rational numbers
|
||||
'\uf7b5': '\u211d', # set of real numbers
|
||||
|
|
|
@ -53,7 +53,7 @@ Replacements are:
|
|||
|
||||
0: none, 1: moderate, 2:strict
|
||||
|
||||
If not supported, the URL paramter is an empty string.
|
||||
If not supported, the URL parameter is an empty string.
|
||||
|
||||
"""
|
||||
|
||||
|
@ -114,7 +114,7 @@ time_range_support = False
|
|||
|
||||
time_range_url = '&hours={time_range_val}'
|
||||
'''Time range URL parameter in the in :py:obj:`search_url`. If no time range is
|
||||
requested by the user, the URL paramter is an empty string. The
|
||||
requested by the user, the URL parameter is an empty string. The
|
||||
``{time_range_val}`` replacement is taken from the :py:obj:`time_range_map`.
|
||||
|
||||
.. code:: yaml
|
||||
|
|
|
@ -30,7 +30,7 @@ def get_external_url(url_id, item_id, alternative="default"):
|
|||
"""Return an external URL or None if url_id is not found.
|
||||
|
||||
url_id can take value from data/external_urls.json
|
||||
The "imdb_id" value is automaticaly converted according to the item_id value.
|
||||
The "imdb_id" value is automatically converted according to the item_id value.
|
||||
|
||||
If item_id is None, the raw URL with the $1 is returned.
|
||||
"""
|
||||
|
|
|
@ -17,7 +17,7 @@ along with searx. If not, see < http://www.gnu.org/licenses/ >.
|
|||
from flask_babel import gettext
|
||||
import re
|
||||
|
||||
name = gettext('Self Informations')
|
||||
name = gettext('Self Information')
|
||||
description = gettext('Displays your IP if the query is "ip" and your user agent if the query contains "user agent".')
|
||||
default_on = True
|
||||
preference_section = 'query'
|
||||
|
|
|
@ -52,7 +52,7 @@ class Setting:
|
|||
return self.value
|
||||
|
||||
def save(self, name: str, resp: flask.Response):
|
||||
"""Save cookie ``name`` in the HTTP reponse obect
|
||||
"""Save cookie ``name`` in the HTTP response object
|
||||
|
||||
If needed, its overwritten in the inheritance."""
|
||||
resp.set_cookie(name, self.value, max_age=COOKIE_MAX_AGE)
|
||||
|
@ -113,7 +113,7 @@ class MultipleChoiceSetting(Setting):
|
|||
self.value.append(choice)
|
||||
|
||||
def save(self, name: str, resp: flask.Response):
|
||||
"""Save cookie ``name`` in the HTTP reponse obect"""
|
||||
"""Save cookie ``name`` in the HTTP response object"""
|
||||
resp.set_cookie(name, ','.join(self.value), max_age=COOKIE_MAX_AGE)
|
||||
|
||||
|
||||
|
@ -146,7 +146,7 @@ class SetSetting(Setting):
|
|||
self.values = set(elements)
|
||||
|
||||
def save(self, name: str, resp: flask.Response):
|
||||
"""Save cookie ``name`` in the HTTP reponse obect"""
|
||||
"""Save cookie ``name`` in the HTTP response object"""
|
||||
resp.set_cookie(name, ','.join(self.values), max_age=COOKIE_MAX_AGE)
|
||||
|
||||
|
||||
|
@ -193,7 +193,7 @@ class MapSetting(Setting):
|
|||
self.key = data # pylint: disable=attribute-defined-outside-init
|
||||
|
||||
def save(self, name: str, resp: flask.Response):
|
||||
"""Save cookie ``name`` in the HTTP reponse obect"""
|
||||
"""Save cookie ``name`` in the HTTP response object"""
|
||||
if hasattr(self, 'key'):
|
||||
resp.set_cookie(name, self.key, max_age=COOKIE_MAX_AGE)
|
||||
|
||||
|
@ -239,7 +239,7 @@ class BooleanChoices:
|
|||
return (k for k, v in self.choices.items() if not v)
|
||||
|
||||
def save(self, resp: flask.Response):
|
||||
"""Save cookie in the HTTP reponse obect"""
|
||||
"""Save cookie in the HTTP response object"""
|
||||
disabled_changed = (k for k in self.disabled if self.default_choices[k])
|
||||
enabled_changed = (k for k in self.enabled if not self.default_choices[k])
|
||||
resp.set_cookie('disabled_{0}'.format(self.name), ','.join(disabled_changed), max_age=COOKIE_MAX_AGE)
|
||||
|
@ -496,7 +496,7 @@ class Preferences:
|
|||
return ret_val
|
||||
|
||||
def save(self, resp: flask.Response):
|
||||
"""Save cookie in the HTTP reponse obect"""
|
||||
"""Save cookie in the HTTP response object"""
|
||||
for user_setting_name, user_setting in self.key_value_settings.items():
|
||||
# pylint: disable=unnecessary-dict-index-lookup
|
||||
if self.key_value_settings[user_setting_name].locked:
|
||||
|
|
|
@ -198,10 +198,10 @@ class BangParser(QueryPartParser):
|
|||
self.raw_text_query.enginerefs.append(EngineRef(value, 'none'))
|
||||
return True
|
||||
|
||||
# check if prefix is equal with categorie name
|
||||
# check if prefix is equal with category name
|
||||
if value in categories:
|
||||
# using all engines for that search, which
|
||||
# are declared under that categorie name
|
||||
# are declared under that category name
|
||||
self.raw_text_query.enginerefs.extend(
|
||||
EngineRef(engine.name, value)
|
||||
for engine in categories[value]
|
||||
|
@ -219,7 +219,7 @@ class BangParser(QueryPartParser):
|
|||
self._add_autocomplete(first_char + suggestion)
|
||||
return
|
||||
|
||||
# check if query starts with categorie name
|
||||
# check if query starts with category name
|
||||
for category in categories:
|
||||
if category.startswith(value):
|
||||
self._add_autocomplete(first_char + category.replace(' ', '_'))
|
||||
|
@ -311,7 +311,7 @@ class RawTextQuery:
|
|||
|
||||
def getFullQuery(self):
|
||||
"""
|
||||
get full querry including whitespaces
|
||||
get full query including whitespaces
|
||||
"""
|
||||
return '{0} {1}'.format(' '.join(self.query_parts), self.getQuery()).strip()
|
||||
|
||||
|
|
|
@ -134,9 +134,9 @@ def result_score(result):
|
|||
if hasattr(engines[result_engine], 'weight'):
|
||||
weight *= float(engines[result_engine].weight)
|
||||
|
||||
occurences = len(result['positions'])
|
||||
occurrences = len(result['positions'])
|
||||
|
||||
return sum((occurences * weight) / position for position in result['positions'])
|
||||
return sum((occurrences * weight) / position for position in result['positions'])
|
||||
|
||||
|
||||
class Timing(NamedTuple):
|
||||
|
@ -286,7 +286,7 @@ class ResultContainer:
|
|||
if 'template' not in result:
|
||||
result['template'] = 'default.html'
|
||||
|
||||
# strip multiple spaces and cariage returns from content
|
||||
# strip multiple spaces and carriage returns from content
|
||||
if result.get('content'):
|
||||
result['content'] = WHITESPACE_REGEX.sub(' ', result['content'])
|
||||
|
||||
|
@ -315,7 +315,7 @@ class ResultContainer:
|
|||
return merged_result
|
||||
else:
|
||||
# it's an image
|
||||
# it's a duplicate if the parsed_url, template and img_src are differents
|
||||
# it's a duplicate if the parsed_url, template and img_src are different
|
||||
if result.get('img_src', '') == merged_result.get('img_src', ''):
|
||||
return merged_result
|
||||
return None
|
||||
|
|
|
@ -70,7 +70,7 @@ def run(engine_name_list, verbose):
|
|||
stderr.write(f'{BOLD_SEQ}Engine {name:30}{RESET_SEQ}Checking\n')
|
||||
checker = searx.search.checker.Checker(processor)
|
||||
checker.run()
|
||||
if checker.test_results.succesfull:
|
||||
if checker.test_results.successful:
|
||||
stdout.write(f'{BOLD_SEQ}Engine {name:30}{RESET_SEQ}{GREEN}OK{RESET_SEQ}\n')
|
||||
if verbose:
|
||||
stdout.write(f' {"found languages":15}: {" ".join(sorted(list(checker.test_results.languages)))}\n')
|
||||
|
|
|
@ -107,7 +107,7 @@ def run():
|
|||
logger.debug('Checking %s engine', name)
|
||||
checker = Checker(processor)
|
||||
checker.run()
|
||||
if checker.test_results.succesfull:
|
||||
if checker.test_results.successful:
|
||||
result['engines'][name] = {'success': True}
|
||||
else:
|
||||
result['engines'][name] = {'success': False, 'errors': checker.test_results.errors}
|
||||
|
|
|
@ -174,7 +174,7 @@ class TestResults:
|
|||
self.languages.add(language)
|
||||
|
||||
@property
|
||||
def succesfull(self):
|
||||
def successful(self):
|
||||
return len(self.errors) == 0
|
||||
|
||||
def __iter__(self):
|
||||
|
@ -317,7 +317,7 @@ class ResultContainerTests:
|
|||
self._record_error('No result')
|
||||
|
||||
def one_title_contains(self, title: str):
|
||||
"""Check one of the title contains `title` (case insensitive comparaison)"""
|
||||
"""Check one of the title contains `title` (case insensitive comparison)"""
|
||||
title = title.lower()
|
||||
for result in self.result_container.get_ordered_results():
|
||||
if title in result['title'].lower():
|
||||
|
|
|
@ -75,7 +75,7 @@ class OnlineProcessor(EngineProcessor):
|
|||
|
||||
def _send_http_request(self, params):
|
||||
# create dictionary which contain all
|
||||
# informations about the request
|
||||
# information about the request
|
||||
request_args = dict(
|
||||
headers=params['headers'], cookies=params['cookies'], verify=params['verify'], auth=params['auth']
|
||||
)
|
||||
|
|
|
@ -52,7 +52,7 @@ search:
|
|||
|
||||
server:
|
||||
# If you change port, bind_address or base_url don't forget to rebuild
|
||||
# instance's enviroment (make buildenv)
|
||||
# instance's environment (make buildenv)
|
||||
port: 8888
|
||||
bind_address: "127.0.0.1"
|
||||
base_url: false # Possible values: false or "https://example.org/location".
|
||||
|
@ -135,7 +135,7 @@ outgoing:
|
|||
request_timeout: 3.0
|
||||
# the maximum timeout in seconds
|
||||
# max_request_timeout: 10.0
|
||||
# suffix of searx_useragent, could contain informations like an email address
|
||||
# suffix of searx_useragent, could contain information like an email address
|
||||
# to the administrator
|
||||
useragent_suffix: ""
|
||||
# The maximum number of concurrent connections that may be established.
|
||||
|
@ -183,7 +183,7 @@ outgoing:
|
|||
# # these plugins are enabled if nothing is configured ..
|
||||
# - 'Hash plugin'
|
||||
# - 'Search on category select'
|
||||
# - 'Self Informations'
|
||||
# - 'Self Information'
|
||||
# - 'Tracker URL remover'
|
||||
# - 'Ahmia blacklist' # activation depends on outgoing.using_tor_proxy
|
||||
# # these plugins are disabled if nothing is configured ..
|
||||
|
|
|
@ -121,10 +121,10 @@ def is_use_default_settings(user_settings):
|
|||
raise ValueError('Invalid value for use_default_settings')
|
||||
|
||||
|
||||
def load_settings(load_user_setttings=True):
|
||||
def load_settings(load_user_settings=True):
|
||||
default_settings_path = get_default_settings_path()
|
||||
user_settings_path = get_user_settings_path()
|
||||
if user_settings_path is None or not load_user_setttings:
|
||||
if user_settings_path is None or not load_user_settings:
|
||||
# no user settings
|
||||
return (load_yaml(default_settings_path), 'load the default settings from {}'.format(default_settings_path))
|
||||
|
||||
|
@ -136,7 +136,7 @@ def load_settings(load_user_setttings=True):
|
|||
update_settings(default_settings, user_settings)
|
||||
return (
|
||||
default_settings,
|
||||
'merge the default settings ( {} ) and the user setttings ( {} )'.format(
|
||||
'merge the default settings ( {} ) and the user settings ( {} )'.format(
|
||||
default_settings_path, user_settings_path
|
||||
),
|
||||
)
|
||||
|
|
|
@ -261,7 +261,7 @@
|
|||
<option value="GET" {% if method == 'GET' %}selected="selected"{% endif %}>GET</option>
|
||||
</select>
|
||||
</p>
|
||||
<div class="description">{{ _('Change how forms are submited, <a href="http://en.wikipedia.org/wiki/Hypertext_Transfer_Protocol#Request_methods" rel="external">learn more about request methods</a>') }}</div>
|
||||
<div class="description">{{ _('Change how forms are submitted, <a href="http://en.wikipedia.org/wiki/Hypertext_Transfer_Protocol#Request_methods" rel="external">learn more about request methods</a>') }}</div>
|
||||
</fieldset>
|
||||
{% endif %}
|
||||
{% if 'image_proxy' not in locked_preferences %}
|
||||
|
|
|
@ -273,7 +273,7 @@ def extract_url(xpath_results, base_url) -> str:
|
|||
raise ValueError('URL not found')
|
||||
|
||||
|
||||
def dict_subset(dictionnary: MutableMapping, properties: Set[str]) -> Dict:
|
||||
def dict_subset(dictionary: MutableMapping, properties: Set[str]) -> Dict:
|
||||
"""Extract a subset of a dict
|
||||
|
||||
Examples:
|
||||
|
@ -282,7 +282,7 @@ def dict_subset(dictionnary: MutableMapping, properties: Set[str]) -> Dict:
|
|||
>>> >> dict_subset({'A': 'a', 'B': 'b', 'C': 'c'}, ['A', 'D'])
|
||||
{'A': 'a'}
|
||||
"""
|
||||
return {k: dictionnary[k] for k in properties if k in dictionnary}
|
||||
return {k: dictionary[k] for k in properties if k in dictionary}
|
||||
|
||||
|
||||
def get_torrent_size(filesize: str, filesize_multiplier: str) -> Optional[int]:
|
||||
|
|
|
@ -54,7 +54,7 @@ def parse_lang(preferences: Preferences, form: Dict[str, str], raw_text_query: R
|
|||
return preferences.get_value('language')
|
||||
# get language
|
||||
# set specific language if set on request, query or preferences
|
||||
# TODO support search with multible languages
|
||||
# TODO support search with multiple languages
|
||||
if len(raw_text_query.languages):
|
||||
query_lang = raw_text_query.languages[-1]
|
||||
elif 'language' in form:
|
||||
|
@ -223,7 +223,7 @@ def get_search_query_from_webapp(
|
|||
disabled_engines = preferences.engines.get_disabled()
|
||||
|
||||
# parse query, if tags are set, which change
|
||||
# the serch engine or search-language
|
||||
# the search engine or search-language
|
||||
raw_text_query = RawTextQuery(form['q'], disabled_engines)
|
||||
|
||||
# set query
|
||||
|
@ -238,7 +238,7 @@ def get_search_query_from_webapp(
|
|||
|
||||
if not is_locked('categories') and raw_text_query.specific:
|
||||
# if engines are calculated from query,
|
||||
# set categories by using that informations
|
||||
# set categories by using that information
|
||||
query_engineref_list = raw_text_query.enginerefs
|
||||
else:
|
||||
# otherwise, using defined categories to
|
||||
|
|
|
@ -244,7 +244,7 @@ def code_highlighter(codelines, language=None):
|
|||
language = 'text'
|
||||
|
||||
try:
|
||||
# find lexer by programing language
|
||||
# find lexer by programming language
|
||||
lexer = get_lexer_by_name(language, stripall=True)
|
||||
|
||||
except Exception as e: # pylint: disable=broad-except
|
||||
|
|
|
@ -42,7 +42,7 @@ class UnicodeWriter:
|
|||
# Fetch UTF-8 output from the queue ...
|
||||
data = self.queue.getvalue()
|
||||
data = data.strip('\x00')
|
||||
# ... and reencode it into the target encoding
|
||||
# ... and re-encode it into the target encoding
|
||||
data = self.encoder.encode(data)
|
||||
# write to the target stream
|
||||
self.stream.write(data.decode())
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue