mirror of
https://github.com/searxng/searxng.git
synced 2025-09-08 19:28:32 +02:00
[mod] addition of various type hints / tbc
- pyright configuration [1]_ - stub files: types-lxml [2]_ - addition of various type hints - enable use of new type system features on older Python versions [3]_ - ``.tool-versions`` - set python to lowest version we support (3.10.18) [4]_: Older versions typically lack some typing features found in newer Python versions. Therefore, for local type checking (before commit), it is necessary to use the older Python interpreter. .. [1] https://docs.basedpyright.com/v1.20.0/configuration/config-files/ .. [2] https://pypi.org/project/types-lxml/ .. [3] https://typing-extensions.readthedocs.io/en/latest/# .. [4] https://mise.jdx.dev/configuration.html#tool-versions Signed-off-by: Markus Heiser <markus.heiser@darmarit.de> Format: reST
This commit is contained in:
parent
09500459fe
commit
57b9673efb
107 changed files with 1205 additions and 1251 deletions
|
@ -32,18 +32,24 @@ Implementations
|
|||
===============
|
||||
|
||||
"""
|
||||
import typing as t
|
||||
|
||||
from typing import List, Dict, Any, Optional
|
||||
from urllib.parse import urlencode
|
||||
from lxml import html
|
||||
from lxml.etree import ElementBase
|
||||
|
||||
from searx.utils import extract_text, eval_xpath, eval_xpath_getindex, eval_xpath_list
|
||||
from searx.enginelib.traits import EngineTraits
|
||||
from searx.data import ENGINE_TRAITS
|
||||
from searx.exceptions import SearxEngineXPathException
|
||||
|
||||
from searx.result_types import EngineResults
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
from searx.extended_types import SXNG_Response
|
||||
|
||||
# about
|
||||
about: Dict[str, Any] = {
|
||||
about: dict[str, t.Any] = {
|
||||
"website": "https://annas-archive.org/",
|
||||
"wikidata_id": "Q115288326",
|
||||
"official_api_documentation": None,
|
||||
|
@ -53,7 +59,7 @@ about: Dict[str, Any] = {
|
|||
}
|
||||
|
||||
# engine dependent config
|
||||
categories: List[str] = ["files"]
|
||||
categories: list[str] = ["files"]
|
||||
paging: bool = True
|
||||
|
||||
# search-url
|
||||
|
@ -85,7 +91,7 @@ aa_ext: str = ''
|
|||
"""
|
||||
|
||||
|
||||
def init(engine_settings=None): # pylint: disable=unused-argument
|
||||
def init(engine_settings: dict[str, t.Any]) -> None: # pylint: disable=unused-argument
|
||||
"""Check of engine's settings."""
|
||||
traits = EngineTraits(**ENGINE_TRAITS['annas archive'])
|
||||
|
||||
|
@ -99,8 +105,8 @@ def init(engine_settings=None): # pylint: disable=unused-argument
|
|||
raise ValueError(f'invalid setting ext: {aa_ext}')
|
||||
|
||||
|
||||
def request(query, params: Dict[str, Any]) -> Dict[str, Any]:
|
||||
lang = traits.get_language(params["language"], traits.all_locale) # type: ignore
|
||||
def request(query: str, params: dict[str, t.Any]) -> None:
|
||||
lang = traits.get_language(params["language"], traits.all_locale)
|
||||
args = {
|
||||
'lang': lang,
|
||||
'content': aa_content,
|
||||
|
@ -112,11 +118,10 @@ def request(query, params: Dict[str, Any]) -> Dict[str, Any]:
|
|||
# filter out None and empty values
|
||||
filtered_args = dict((k, v) for k, v in args.items() if v)
|
||||
params["url"] = f"{base_url}/search?{urlencode(filtered_args)}"
|
||||
return params
|
||||
|
||||
|
||||
def response(resp) -> List[Dict[str, Optional[str]]]:
|
||||
results: List[Dict[str, Optional[str]]] = []
|
||||
def response(resp: "SXNG_Response") -> EngineResults:
|
||||
res = EngineResults()
|
||||
dom = html.fromstring(resp.text)
|
||||
|
||||
# The rendering of the WEB page is strange; positions of Anna's result page
|
||||
|
@ -126,16 +131,17 @@ def response(resp) -> List[Dict[str, Optional[str]]]:
|
|||
|
||||
for item in eval_xpath_list(dom, '//main//div[contains(@class, "js-aarecord-list-outer")]/div'):
|
||||
try:
|
||||
results.append(_get_result(item))
|
||||
kwargs: dict[str, t.Any] = _get_result(item)
|
||||
except SearxEngineXPathException:
|
||||
pass
|
||||
return results
|
||||
continue
|
||||
res.add(res.types.LegacyResult(**kwargs))
|
||||
return res
|
||||
|
||||
|
||||
def _get_result(item):
|
||||
def _get_result(item: ElementBase) -> dict[str, t.Any]:
|
||||
return {
|
||||
'template': 'paper.html',
|
||||
'url': base_url + extract_text(eval_xpath_getindex(item, './a/@href', 0)),
|
||||
'url': base_url + eval_xpath_getindex(item, './a/@href', 0),
|
||||
'title': extract_text(eval_xpath(item, './div//a[starts-with(@href, "/md5")]')),
|
||||
'authors': [extract_text(eval_xpath_getindex(item, './/a[starts-with(@href, "/search")]', 0))],
|
||||
'publisher': extract_text(
|
||||
|
@ -160,9 +166,9 @@ def fetch_traits(engine_traits: EngineTraits):
|
|||
engine_traits.custom['sort'] = []
|
||||
|
||||
resp = get(base_url + '/search')
|
||||
if not resp.ok: # type: ignore
|
||||
if not resp.ok:
|
||||
raise RuntimeError("Response from Anna's search page is not OK.")
|
||||
dom = html.fromstring(resp.text) # type: ignore
|
||||
dom = html.fromstring(resp.text)
|
||||
|
||||
# supported language codes
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue