[mod] isolation of botdetection from the limiter

This patch was inspired by the discussion around PR-2882 [2].  The goals of this
patch are:

1. Convert plugin searx.plugin.limiter to normal code [1]
2. isolation of botdetection from the limiter [2]
3. searx/{tools => botdetection}/config.py and drop searx.tools
4. in URL /config, 'limiter.enabled' is true only if the limiter is really
   enabled (Redis is available).

This patch moves all the code that belongs to botdetection into namespace
searx.botdetection and code that belongs to limiter is placed in namespace
searx.limiter.

Tthe limiter used to be a plugin at some point botdetection was added, it was
not a plugin.  The modularization of these two components was long overdue.
With the clear modularization, the documentation could then also be organized
according to the architecture.

[1] https://github.com/searxng/searxng/pull/2882
[2] https://github.com/searxng/searxng/pull/2882#issuecomment-1741716891

To test:

- check the app works without the limiter, check `/config`
- check the app works with the limiter and with the token, check `/config`
- make docs.live .. and read
  - http://0.0.0.0:8000/admin/searx.limiter.html
  - http://0.0.0.0:8000/src/searx.botdetection.html#botdetection

Signed-off-by: Markus Heiser <markus.heiser@darmarit.de>
This commit is contained in:
Markus Heiser 2023-10-02 16:36:07 +02:00 committed by Markus Heiser
parent b05a15540e
commit fd814aac86
22 changed files with 180 additions and 125 deletions

View file

@ -2,43 +2,22 @@
# lint: pylint
""".. _botdetection src:
The :ref:`limiter <limiter src>` implements several methods to block bots:
a. Analysis of the HTTP header in the request / can be easily bypassed.
b. Block and pass lists in which IPs are listed / difficult to maintain, since
the IPs of bots are not all known and change over the time.
c. Detection of bots based on the behavior of the requests and blocking and, if
necessary, unblocking of the IPs via a dynamically changeable IP block list.
For dynamically changeable IP lists a Redis database is needed and for any kind
of IP list the determination of the IP of the client is essential. The IP of
the client is determined via the X-Forwarded-For_ HTTP header
.. _X-Forwarded-For:
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For
X-Forwarded-For
===============
.. attention::
A correct setup of the HTTP request headers ``X-Forwarded-For`` and
``X-Real-IP`` is essential to be able to assign a request to an IP correctly:
- `NGINX RequestHeader`_
- `Apache RequestHeader`_
.. _NGINX RequestHeader:
https://docs.searxng.org/admin/installation-nginx.html#nginx-s-searxng-site
.. _Apache RequestHeader:
https://docs.searxng.org/admin/installation-apache.html#apache-s-searxng-site
.. autofunction:: searx.botdetection.get_real_ip
Implementations used for bot detection.
"""
from ._helpers import dump_request
from ._helpers import get_real_ip
from ._helpers import get_network
from ._helpers import too_many_requests
__all__ = ['dump_request', 'get_network', 'get_real_ip', 'too_many_requests']
redis_client = None
cfg = None
def init(_cfg, _redis_client):
global redis_client, cfg # pylint: disable=global-statement
redis_client = _redis_client
cfg = _cfg

View file

@ -13,8 +13,8 @@ from ipaddress import (
import flask
import werkzeug
from searx.tools import config
from searx import logger
from . import config
logger = logger.getChild('botdetection')
@ -104,10 +104,10 @@ def get_real_ip(request: flask.Request) -> str:
if not forwarded_for:
_log_error_only_once("X-Forwarded-For header is not set!")
else:
from .limiter import get_cfg # pylint: disable=import-outside-toplevel, cyclic-import
from . import cfg # pylint: disable=import-outside-toplevel, cyclic-import
forwarded_for = [x.strip() for x in forwarded_for.split(',')]
x_for: int = get_cfg()['real_ip.x_for'] # type: ignore
x_for: int = cfg['real_ip.x_for'] # type: ignore
forwarded_for = forwarded_for[-min(len(forwarded_for), x_for)]
if not real_ip:

View file

@ -0,0 +1,377 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
# lint: pylint
"""Configuration class :py:class:`Config` with deep-update, schema validation
and deprecated names.
The :py:class:`Config` class implements a configuration that is based on
structured dictionaries. The configuration schema is defined in a dictionary
structure and the configuration data is given in a dictionary structure.
"""
from __future__ import annotations
from typing import Any
import copy
import typing
import logging
import pathlib
import pytomlpp as toml
__all__ = ['Config', 'UNSET', 'SchemaIssue']
log = logging.getLogger(__name__)
class FALSE:
"""Class of ``False`` singelton"""
# pylint: disable=multiple-statements
def __init__(self, msg):
self.msg = msg
def __bool__(self):
return False
def __str__(self):
return self.msg
__repr__ = __str__
UNSET = FALSE('<UNSET>')
class SchemaIssue(ValueError):
"""Exception to store and/or raise a message from a schema issue."""
def __init__(self, level: typing.Literal['warn', 'invalid'], msg: str):
self.level = level
super().__init__(msg)
def __str__(self):
return f"[cfg schema {self.level}] {self.args[0]}"
class Config:
"""Base class used for configuration"""
UNSET = UNSET
@classmethod
def from_toml(cls, schema_file: pathlib.Path, cfg_file: pathlib.Path, deprecated: dict) -> Config:
# init schema
log.debug("load schema file: %s", schema_file)
cfg = cls(cfg_schema=toml.load(schema_file), deprecated=deprecated)
if not cfg_file.exists():
log.warning("missing config file: %s", cfg_file)
return cfg
# load configuration
log.debug("load config file: %s", cfg_file)
try:
upd_cfg = toml.load(cfg_file)
except toml.DecodeError as exc:
msg = str(exc).replace('\t', '').replace('\n', ' ')
log.error("%s: %s", cfg_file, msg)
raise
is_valid, issue_list = cfg.validate(upd_cfg)
for msg in issue_list:
log.error(str(msg))
if not is_valid:
raise TypeError(f"schema of {cfg_file} is invalid!")
cfg.update(upd_cfg)
return cfg
def __init__(self, cfg_schema: typing.Dict, deprecated: typing.Dict[str, str]):
"""Construtor of class Config.
:param cfg_schema: Schema of the configuration
:param deprecated: dictionary that maps deprecated configuration names to a messages
These values are needed for validation, see :py:obj:`validate`.
"""
self.cfg_schema = cfg_schema
self.deprecated = deprecated
self.cfg = copy.deepcopy(cfg_schema)
def __getitem__(self, key: str) -> Any:
return self.get(key)
def validate(self, cfg: dict):
"""Validation of dictionary ``cfg`` on :py:obj:`Config.SCHEMA`.
Validation is done by :py:obj:`validate`."""
return validate(self.cfg_schema, cfg, self.deprecated)
def update(self, upd_cfg: dict):
"""Update this configuration by ``upd_cfg``."""
dict_deepupdate(self.cfg, upd_cfg)
def default(self, name: str):
"""Returns default value of field ``name`` in ``self.cfg_schema``."""
return value(name, self.cfg_schema)
def get(self, name: str, default: Any = UNSET, replace: bool = True) -> Any:
"""Returns the value to which ``name`` points in the configuration.
If there is no such ``name`` in the config and the ``default`` is
:py:obj:`UNSET`, a :py:obj:`KeyError` is raised.
"""
parent = self._get_parent_dict(name)
val = parent.get(name.split('.')[-1], UNSET)
if val is UNSET:
if default is UNSET:
raise KeyError(name)
val = default
if replace and isinstance(val, str):
val = val % self
return val
def set(self, name: str, val):
"""Set the value to which ``name`` points in the configuration.
If there is no such ``name`` in the config, a :py:obj:`KeyError` is
raised.
"""
parent = self._get_parent_dict(name)
parent[name.split('.')[-1]] = val
def _get_parent_dict(self, name):
parent_name = '.'.join(name.split('.')[:-1])
if parent_name:
parent = value(parent_name, self.cfg)
else:
parent = self.cfg
if (parent is UNSET) or (not isinstance(parent, dict)):
raise KeyError(parent_name)
return parent
def path(self, name: str, default=UNSET):
"""Get a :py:class:`pathlib.Path` object from a config string."""
val = self.get(name, default)
if val is UNSET:
if default is UNSET:
raise KeyError(name)
return default
return pathlib.Path(str(val))
def pyobj(self, name, default=UNSET):
"""Get python object refered by full qualiffied name (FQN) in the config
string."""
fqn = self.get(name, default)
if fqn is UNSET:
if default is UNSET:
raise KeyError(name)
return default
(modulename, name) = str(fqn).rsplit('.', 1)
m = __import__(modulename, {}, {}, [name], 0)
return getattr(m, name)
# working with dictionaries
def value(name: str, data_dict: dict):
"""Returns the value to which ``name`` points in the ``dat_dict``.
.. code: python
>>> data_dict = {
"foo": {"bar": 1 },
"bar": {"foo": 2 },
"foobar": [1, 2, 3],
}
>>> value('foobar', data_dict)
[1, 2, 3]
>>> value('foo.bar', data_dict)
1
>>> value('foo.bar.xxx', data_dict)
<UNSET>
"""
ret_val = data_dict
for part in name.split('.'):
if isinstance(ret_val, dict):
ret_val = ret_val.get(part, UNSET)
if ret_val is UNSET:
break
return ret_val
def validate(
schema_dict: typing.Dict, data_dict: typing.Dict, deprecated: typing.Dict[str, str]
) -> typing.Tuple[bool, list]:
"""Deep validation of dictionary in ``data_dict`` against dictionary in
``schema_dict``. Argument deprecated is a dictionary that maps deprecated
configuration names to a messages::
deprecated = {
"foo.bar" : "config 'foo.bar' is deprecated, use 'bar.foo'",
"..." : "..."
}
The function returns a python tuple ``(is_valid, issue_list)``:
``is_valid``:
A bool value indicating ``data_dict`` is valid or not.
``issue_list``:
A list of messages (:py:obj:`SchemaIssue`) from the validation::
[schema warn] data_dict: deprecated 'fontlib.foo': <DEPRECATED['foo.bar']>
[schema invalid] data_dict: key unknown 'fontlib.foo'
[schema invalid] data_dict: type mismatch 'fontlib.foo': expected ..., is ...
If ``schema_dict`` or ``data_dict`` is not a dictionary type a
:py:obj:`SchemaIssue` is raised.
"""
names = []
is_valid = True
issue_list = []
if not isinstance(schema_dict, dict):
raise SchemaIssue('invalid', "schema_dict is not a dict type")
if not isinstance(data_dict, dict):
raise SchemaIssue('invalid', f"data_dict issue{'.'.join(names)} is not a dict type")
is_valid, issue_list = _validate(names, issue_list, schema_dict, data_dict, deprecated)
return is_valid, issue_list
def _validate(
names: typing.List,
issue_list: typing.List,
schema_dict: typing.Dict,
data_dict: typing.Dict,
deprecated: typing.Dict[str, str],
) -> typing.Tuple[bool, typing.List]:
is_valid = True
for key, data_value in data_dict.items():
names.append(key)
name = '.'.join(names)
deprecated_msg = deprecated.get(name)
# print("XXX %s: key %s // data_value: %s" % (name, key, data_value))
if deprecated_msg:
issue_list.append(SchemaIssue('warn', f"data_dict '{name}': deprecated - {deprecated_msg}"))
schema_value = value(name, schema_dict)
# print("YYY %s: key %s // schema_value: %s" % (name, key, schema_value))
if schema_value is UNSET:
if not deprecated_msg:
issue_list.append(SchemaIssue('invalid', f"data_dict '{name}': key unknown in schema_dict"))
is_valid = False
elif type(schema_value) != type(data_value): # pylint: disable=unidiomatic-typecheck
issue_list.append(
SchemaIssue(
'invalid',
(f"data_dict: type mismatch '{name}':" f" expected {type(schema_value)}, is: {type(data_value)}"),
)
)
is_valid = False
elif isinstance(data_value, dict):
_valid, _ = _validate(names, issue_list, schema_dict, data_value, deprecated)
is_valid = is_valid and _valid
names.pop()
return is_valid, issue_list
def dict_deepupdate(base_dict: dict, upd_dict: dict, names=None):
"""Deep-update of dictionary in ``base_dict`` by dictionary in ``upd_dict``.
For each ``upd_key`` & ``upd_val`` pair in ``upd_dict``:
0. If types of ``base_dict[upd_key]`` and ``upd_val`` do not match raise a
:py:obj:`TypeError`.
1. If ``base_dict[upd_key]`` is a dict: recursively deep-update it by ``upd_val``.
2. If ``base_dict[upd_key]`` not exist: set ``base_dict[upd_key]`` from a
(deep-) copy of ``upd_val``.
3. If ``upd_val`` is a list, extend list in ``base_dict[upd_key]`` by the
list in ``upd_val``.
4. If ``upd_val`` is a set, update set in ``base_dict[upd_key]`` by set in
``upd_val``.
"""
# pylint: disable=too-many-branches
if not isinstance(base_dict, dict):
raise TypeError("argument 'base_dict' is not a ditionary type")
if not isinstance(upd_dict, dict):
raise TypeError("argument 'upd_dict' is not a ditionary type")
if names is None:
names = []
for upd_key, upd_val in upd_dict.items():
# For each upd_key & upd_val pair in upd_dict:
if isinstance(upd_val, dict):
if upd_key in base_dict:
# if base_dict[upd_key] exists, recursively deep-update it
if not isinstance(base_dict[upd_key], dict):
raise TypeError(f"type mismatch {'.'.join(names)}: is not a dict type in base_dict")
dict_deepupdate(
base_dict[upd_key],
upd_val,
names
+ [
upd_key,
],
)
else:
# if base_dict[upd_key] not exist, set base_dict[upd_key] from deepcopy of upd_val
base_dict[upd_key] = copy.deepcopy(upd_val)
elif isinstance(upd_val, list):
if upd_key in base_dict:
# if base_dict[upd_key] exists, base_dict[up_key] is extended by
# the list from upd_val
if not isinstance(base_dict[upd_key], list):
raise TypeError(f"type mismatch {'.'.join(names)}: is not a list type in base_dict")
base_dict[upd_key].extend(upd_val)
else:
# if base_dict[upd_key] doesn't exists, set base_dict[key] from a deepcopy of the
# list in upd_val.
base_dict[upd_key] = copy.deepcopy(upd_val)
elif isinstance(upd_val, set):
if upd_key in base_dict:
# if base_dict[upd_key] exists, base_dict[up_key] is updated by the set in upd_val
if not isinstance(base_dict[upd_key], set):
raise TypeError(f"type mismatch {'.'.join(names)}: is not a set type in base_dict")
base_dict[upd_key].update(upd_val.copy())
else:
# if base_dict[upd_key] doesn't exists, set base_dict[upd_key] from a copy of the
# set in upd_val
base_dict[upd_key] = upd_val.copy()
else:
# for any other type of upd_val replace or add base_dict[upd_key] by a copy
# of upd_val
base_dict[upd_key] = copy.copy(upd_val)

View file

@ -24,7 +24,7 @@ from ipaddress import (
import flask
import werkzeug
from searx.tools import config
from . import config
from ._helpers import too_many_requests

View file

@ -25,7 +25,7 @@ from ipaddress import (
import flask
import werkzeug
from searx.tools import config
from . import config
from ._helpers import too_many_requests

View file

@ -21,7 +21,7 @@ from ipaddress import (
import flask
import werkzeug
from searx.tools import config
from . import config
from ._helpers import too_many_requests

View file

@ -22,7 +22,7 @@ from ipaddress import (
import flask
import werkzeug
from searx.tools import config
from . import config
from ._helpers import too_many_requests

View file

@ -24,7 +24,7 @@ from ipaddress import (
import flask
import werkzeug
from searx.tools import config
from . import config
from ._helpers import too_many_requests

View file

@ -13,8 +13,7 @@ and at least for a maximum of 10 minutes.
The :py:obj:`.link_token` method can be used to investigate whether a request is
*suspicious*. To activate the :py:obj:`.link_token` method in the
:py:obj:`.ip_limit` method add the following to your
``/etc/searxng/limiter.toml``:
:py:obj:`.ip_limit` method add the following configuration:
.. code:: toml
@ -46,13 +45,13 @@ from ipaddress import (
import flask
import werkzeug
from searx.tools import config
from searx import settings
from searx import redisdb
from searx.redislib import incr_sliding_window, drop_counter
from . import link_token
from . import config
from ._helpers import (
too_many_requests,
logger,

View file

@ -33,7 +33,7 @@ from ipaddress import (
IPv6Address,
)
from searx.tools import config
from . import config
from ._helpers import logger
logger = logger.getChild('ip_limit')

View file

@ -1,145 +0,0 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
# lint: pylint
""".. _limiter src:
Limiter
=======
.. sidebar:: info
The limiter requires a :ref:`Redis <settings redis>` database.
Bot protection / IP rate limitation. The intention of rate limitation is to
limit suspicious requests from an IP. The motivation behind this is the fact
that SearXNG passes through requests from bots and is thus classified as a bot
itself. As a result, the SearXNG engine then receives a CAPTCHA or is blocked
by the search engine (the origin) in some other way.
To avoid blocking, the requests from bots to SearXNG must also be blocked, this
is the task of the limiter. To perform this task, the limiter uses the methods
from the :py:obj:`searx.botdetection`.
To enable the limiter activate:
.. code:: yaml
server:
...
limiter: true # rate limit the number of request on the instance, block some bots
and set the redis-url connection. Check the value, it depends on your redis DB
(see :ref:`settings redis`), by example:
.. code:: yaml
redis:
url: unix:///usr/local/searxng-redis/run/redis.sock?db=0
"""
from __future__ import annotations
from pathlib import Path
from ipaddress import ip_address
import flask
import werkzeug
from searx.tools import config
from searx import logger
from . import (
http_accept,
http_accept_encoding,
http_accept_language,
http_user_agent,
ip_limit,
ip_lists,
)
from ._helpers import (
get_network,
get_real_ip,
dump_request,
)
logger = logger.getChild('botdetection.limiter')
CFG: config.Config = None # type: ignore
LIMITER_CFG_SCHEMA = Path(__file__).parent / "limiter.toml"
"""Base configuration (schema) of the botdetection."""
LIMITER_CFG = Path('/etc/searxng/limiter.toml')
"""Local Limiter configuration."""
CFG_DEPRECATED = {
# "dummy.old.foo": "config 'dummy.old.foo' exists only for tests. Don't use it in your real project config."
}
def get_cfg() -> config.Config:
global CFG # pylint: disable=global-statement
if CFG is None:
CFG = config.Config.from_toml(LIMITER_CFG_SCHEMA, LIMITER_CFG, CFG_DEPRECATED)
return CFG
def filter_request(request: flask.Request) -> werkzeug.Response | None:
# pylint: disable=too-many-return-statements
cfg = get_cfg()
real_ip = ip_address(get_real_ip(request))
network = get_network(real_ip, cfg)
if request.path == '/healthz':
return None
# link-local
if network.is_link_local:
return None
# block- & pass- lists
#
# 1. The IP of the request is first checked against the pass-list; if the IP
# matches an entry in the list, the request is not blocked.
# 2. If no matching entry is found in the pass-list, then a check is made against
# the block list; if the IP matches an entry in the list, the request is
# blocked.
# 3. If the IP is not in either list, the request is not blocked.
match, msg = ip_lists.pass_ip(real_ip, cfg)
if match:
logger.warning("PASS %s: matched PASSLIST - %s", network.compressed, msg)
return None
match, msg = ip_lists.block_ip(real_ip, cfg)
if match:
logger.error("BLOCK %s: matched BLOCKLIST - %s", network.compressed, msg)
return flask.make_response(('IP is on BLOCKLIST - %s' % msg, 429))
# methods applied on /
for func in [
http_user_agent,
]:
val = func.filter_request(network, request, cfg)
if val is not None:
return val
# methods applied on /search
if request.path == '/search':
for func in [
http_accept,
http_accept_encoding,
http_accept_language,
http_user_agent,
ip_limit,
]:
val = func.filter_request(network, request, cfg)
if val is not None:
return val
logger.debug(f"OK {network}: %s", dump_request(flask.request))
return None

View file

@ -1,40 +0,0 @@
[real_ip]
# Number of values to trust for X-Forwarded-For.
x_for = 1
# The prefix defines the number of leading bits in an address that are compared
# to determine whether or not an address is part of a (client) network.
ipv4_prefix = 32
ipv6_prefix = 48
[botdetection.ip_limit]
# To get unlimited access in a local network, by default link-lokal addresses
# (networks) are not monitored by the ip_limit
filter_link_local = false
# activate link_token method in the ip_limit method
link_token = false
[botdetection.ip_lists]
# In the limiter, the ip_lists method has priority over all other methods -> if
# an IP is in the pass_ip list, it has unrestricted access and it is also not
# checked if e.g. the "user agent" suggests a bot (e.g. curl).
block_ip = [
# '93.184.216.34', # IPv4 of example.org
# '257.1.1.1', # invalid IP --> will be ignored, logged in ERROR class
]
pass_ip = [
# '192.168.0.0/16', # IPv4 private network
# 'fe80::/10' # IPv6 linklocal / wins over botdetection.ip_limit.filter_link_local
]
# Activate passlist of (hardcoded) IPs from the SearXNG organization,
# e.g. `check.searx.space`.
pass_searxng_org = true

View file

@ -99,15 +99,13 @@ def ping(request: flask.Request, token: str):
The expire time of this ping-key is :py:obj:`PING_LIVE_TIME`.
"""
from . import limiter # pylint: disable=import-outside-toplevel, cyclic-import
from . import redis_client, cfg # pylint: disable=import-outside-toplevel, cyclic-import
redis_client = redisdb.client()
if not redis_client:
return
if not token_is_valid(token):
return
cfg = limiter.get_cfg()
real_ip = ip_address(get_real_ip(request))
network = get_network(real_ip, cfg)