mirror of
https://github.com/searxng/searxng.git
synced 2025-08-02 01:52:21 +02:00
[fix] sh: apply format
Related https://github.com/searxng/searxng/issues/4803
This commit is contained in:
parent
2311d16497
commit
e9ecdcc350
18 changed files with 644 additions and 548 deletions
|
@ -37,16 +37,16 @@ setup_ownership() {
|
|||
local type="$2"
|
||||
|
||||
case "$type" in
|
||||
file | directory) ;;
|
||||
*)
|
||||
cat <<EOF
|
||||
file | directory) ;;
|
||||
*)
|
||||
cat <<EOF
|
||||
!!!
|
||||
!!! ERROR
|
||||
!!! "$type" is not a valid type, exiting...
|
||||
!!!
|
||||
EOF
|
||||
exit 1
|
||||
;;
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
target_ownership=$(stat -c %U:%G "$target")
|
||||
|
|
58
manage
58
manage
|
@ -65,7 +65,7 @@ while IFS= read -r line; do
|
|||
if [ "$line" != "tests/unit/settings/syntaxerror_settings.yml" ]; then
|
||||
YAMLLINT_FILES+=("$line")
|
||||
fi
|
||||
done <<< "$(git ls-files './tests/*.yml' './searx/*.yml' './utils/templates/etc/searxng/*.yml' '.github/*.yml' '.github/*/*.yml')"
|
||||
done <<<"$(git ls-files './tests/*.yml' './searx/*.yml' './utils/templates/etc/searxng/*.yml' '.github/*.yml' '.github/*/*.yml')"
|
||||
|
||||
RST_FILES=(
|
||||
'README.rst'
|
||||
|
@ -113,7 +113,6 @@ environment ...
|
|||
EOF
|
||||
}
|
||||
|
||||
|
||||
if [ "$VERBOSE" = "1" ]; then
|
||||
SPHINX_VERBOSE="-v"
|
||||
PYLINT_VERBOSE="-v"
|
||||
|
@ -126,14 +125,14 @@ webapp.run() {
|
|||
local parent_proc="$$"
|
||||
(
|
||||
if [ "${LIVE_THEME}" ]; then
|
||||
( themes.live "${LIVE_THEME}" )
|
||||
(themes.live "${LIVE_THEME}")
|
||||
kill $parent_proc
|
||||
fi
|
||||
)&
|
||||
) &
|
||||
(
|
||||
sleep 3
|
||||
xdg-open http://127.0.0.1:8888/
|
||||
)&
|
||||
) &
|
||||
SEARXNG_DEBUG=1 pyenv.cmd python -m searx.webapp
|
||||
}
|
||||
|
||||
|
@ -143,10 +142,11 @@ gecko.driver() {
|
|||
|
||||
build_msg INSTALL "gecko.driver"
|
||||
# run installation in a subprocess and activate pyenv
|
||||
( set -e
|
||||
(
|
||||
set -e
|
||||
pyenv.activate
|
||||
|
||||
INSTALLED_VERSION=$(geckodriver -V 2> /dev/null | head -1 | awk '{ print "v" $2}') || INSTALLED_VERSION=""
|
||||
INSTALLED_VERSION=$(geckodriver -V 2>/dev/null | head -1 | awk '{ print "v" $2}') || INSTALLED_VERSION=""
|
||||
set +e
|
||||
if [ "${INSTALLED_VERSION}" = "${GECKODRIVER_VERSION}" ]; then
|
||||
build_msg INSTALL "geckodriver already installed"
|
||||
|
@ -154,13 +154,13 @@ gecko.driver() {
|
|||
fi
|
||||
PLATFORM="$(python -c 'import platform; print(platform.system().lower(), platform.architecture()[0])')"
|
||||
case "$PLATFORM" in
|
||||
"linux 32bit" | "linux2 32bit") ARCH="linux32";;
|
||||
"linux 64bit" | "linux2 64bit") ARCH="linux64";;
|
||||
"windows 32 bit") ARCH="win32";;
|
||||
"windows 64 bit") ARCH="win64";;
|
||||
"mac 64bit") ARCH="macos";;
|
||||
"linux 32bit" | "linux2 32bit") ARCH="linux32" ;;
|
||||
"linux 64bit" | "linux2 64bit") ARCH="linux64" ;;
|
||||
"windows 32 bit") ARCH="win32" ;;
|
||||
"windows 64 bit") ARCH="win64" ;;
|
||||
"mac 64bit") ARCH="macos" ;;
|
||||
esac
|
||||
GECKODRIVER_URL="https://github.com/mozilla/geckodriver/releases/download/$GECKODRIVER_VERSION/geckodriver-$GECKODRIVER_VERSION-$ARCH.tar.gz";
|
||||
GECKODRIVER_URL="https://github.com/mozilla/geckodriver/releases/download/$GECKODRIVER_VERSION/geckodriver-$GECKODRIVER_VERSION-$ARCH.tar.gz"
|
||||
|
||||
build_msg GECKO "Installing ${PY_ENV_BIN}/geckodriver from $GECKODRIVER_URL"
|
||||
|
||||
|
@ -175,13 +175,14 @@ gecko.driver() {
|
|||
py.build() {
|
||||
build_msg BUILD "python package ${PYDIST}"
|
||||
pyenv.cmd python setup.py \
|
||||
sdist -d "${PYDIST}" \
|
||||
bdist_wheel --bdist-dir "${PYBUILD}" -d "${PYDIST}"
|
||||
sdist -d "${PYDIST}" \
|
||||
bdist_wheel --bdist-dir "${PYBUILD}" -d "${PYDIST}"
|
||||
}
|
||||
|
||||
py.clean() {
|
||||
build_msg CLEAN pyenv
|
||||
( set -e
|
||||
(
|
||||
set -e
|
||||
pyenv.drop
|
||||
[ "$VERBOSE" = "1" ] && set -x
|
||||
rm -rf "${PYDIST}" "${PYBUILD}" "${PY_ENV}" ./.tox ./*.egg-info
|
||||
|
@ -192,7 +193,7 @@ py.clean() {
|
|||
}
|
||||
|
||||
pyenv.check() {
|
||||
cat <<EOF
|
||||
cat <<EOF
|
||||
import yaml
|
||||
print('import yaml --> OK')
|
||||
EOF
|
||||
|
@ -201,13 +202,14 @@ EOF
|
|||
pyenv.install() {
|
||||
|
||||
if ! pyenv.OK; then
|
||||
py.clean > /dev/null
|
||||
py.clean >/dev/null
|
||||
fi
|
||||
if pyenv.install.OK > /dev/null; then
|
||||
if pyenv.install.OK >/dev/null; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
( set -e
|
||||
(
|
||||
set -e
|
||||
pyenv
|
||||
build_msg PYENV "[install] pip install --use-pep517 --no-build-isolation -e 'searx${PY_SETUP_EXTRAS}'"
|
||||
"${PY_ENV_BIN}/python" -m pip install --use-pep517 --no-build-isolation -e ".${PY_SETUP_EXTRAS}"
|
||||
|
@ -220,8 +222,8 @@ pyenv.install() {
|
|||
|
||||
pyenv.uninstall() {
|
||||
build_msg PYENV "[pyenv.uninstall] uninstall packages: ${PYOBJECTS}"
|
||||
pyenv.cmd python setup.py develop --uninstall 2>&1 \
|
||||
| prefix_stdout "${_Blue}PYENV ${_creset}[pyenv.uninstall] "
|
||||
pyenv.cmd python setup.py develop --uninstall 2>&1 |
|
||||
prefix_stdout "${_Blue}PYENV ${_creset}[pyenv.uninstall] "
|
||||
|
||||
}
|
||||
|
||||
|
@ -243,7 +245,7 @@ docs.prebuild() {
|
|||
set -e
|
||||
[ "$VERBOSE" = "1" ] && set -x
|
||||
mkdir -p "${DOCS_BUILD}/includes"
|
||||
./utils/searxng.sh searxng.doc.rst > "${DOCS_BUILD}/includes/searxng.rst"
|
||||
./utils/searxng.sh searxng.doc.rst >"${DOCS_BUILD}/includes/searxng.rst"
|
||||
pyenv.cmd searxng_extra/docs_prebuild
|
||||
)
|
||||
dump_return $?
|
||||
|
@ -253,7 +255,8 @@ docs.prebuild() {
|
|||
main() {
|
||||
|
||||
local _type
|
||||
local cmd="$1"; shift
|
||||
local cmd="$1"
|
||||
shift
|
||||
|
||||
if [ "$cmd" == "" ]; then
|
||||
help
|
||||
|
@ -262,8 +265,11 @@ main() {
|
|||
fi
|
||||
|
||||
case "$cmd" in
|
||||
--getenv) var="$1"; echo "${!var}";;
|
||||
--help) help;;
|
||||
--getenv)
|
||||
var="$1"
|
||||
echo "${!var}"
|
||||
;;
|
||||
--help) help ;;
|
||||
--*)
|
||||
help
|
||||
err_msg "unknown option $cmd"
|
||||
|
|
|
@ -8,11 +8,11 @@ build.env.export() {
|
|||
GIT_BRANCH="$(git branch | grep '\*' | cut -d' ' -f2-)"
|
||||
GIT_REMOTE="$(git config "branch.${GIT_BRANCH}.remote")"
|
||||
GIT_URL="$(git config --get "remote.${GIT_REMOTE}.url")"
|
||||
if [[ "${GIT_URL}" == git@* ]]; then
|
||||
if [[ ${GIT_URL} == git@* ]]; then
|
||||
GIT_URL="${GIT_URL/://}"
|
||||
GIT_URL="${GIT_URL/git@/https://}"
|
||||
fi
|
||||
if [[ "${GIT_URL}" == *.git ]]; then
|
||||
if [[ ${GIT_URL} == *.git ]]; then
|
||||
GIT_URL="${GIT_URL%.git}"
|
||||
fi
|
||||
|
||||
|
@ -27,6 +27,6 @@ build.env.export() {
|
|||
|
||||
}
|
||||
|
||||
pushd "${REPO_ROOT}" &> /dev/null
|
||||
pushd "${REPO_ROOT}" &>/dev/null
|
||||
build.env.export
|
||||
popd &> /dev/null
|
||||
popd &>/dev/null
|
||||
|
|
379
utils/lib.sh
379
utils/lib.sh
File diff suppressed because it is too large
Load diff
|
@ -23,14 +23,14 @@
|
|||
# configure golang environment
|
||||
# ----------------------------
|
||||
|
||||
[[ -z "${GO_VERSION}" ]] && GO_VERSION="go1.17.3"
|
||||
[[ -z ${GO_VERSION} ]] && GO_VERSION="go1.17.3"
|
||||
|
||||
GO_DL_URL="https://golang.org/dl"
|
||||
|
||||
# implement go functions
|
||||
# -----------------------
|
||||
|
||||
go.help(){
|
||||
go.help() {
|
||||
cat <<EOF
|
||||
go.:
|
||||
ls : list golang binary archives (stable)
|
||||
|
@ -40,7 +40,7 @@ go.:
|
|||
EOF
|
||||
}
|
||||
|
||||
go.ls(){
|
||||
go.ls() {
|
||||
python <<EOF
|
||||
import sys, json, requests
|
||||
resp = requests.get("${GO_DL_URL}/?mode=json&include=all")
|
||||
|
@ -54,7 +54,7 @@ for ver in json.loads(resp.text):
|
|||
EOF
|
||||
}
|
||||
|
||||
go.ver_info(){
|
||||
go.ver_info() {
|
||||
|
||||
# print information about a golang distribution. To print filename
|
||||
# sha256 and size of the archive that fits to your OS and host:
|
||||
|
@ -84,15 +84,15 @@ EOF
|
|||
}
|
||||
|
||||
go.os() {
|
||||
local OS
|
||||
case "$(command uname -a)xx" in
|
||||
Linux\ *) OS=linux ;;
|
||||
Darwin\ *) OS=darwin ;;
|
||||
FreeBSD\ *) OS=freebsd ;;
|
||||
CYGWIN* | MSYS* | MINGW*) OS=windows ;;
|
||||
*) die 42 "OS is unknown: $(command uname -a)" ;;
|
||||
esac
|
||||
echo "${OS}"
|
||||
local OS
|
||||
case "$(command uname -a)xx" in
|
||||
Linux\ *) OS=linux ;;
|
||||
Darwin\ *) OS=darwin ;;
|
||||
FreeBSD\ *) OS=freebsd ;;
|
||||
CYGWIN* | MSYS* | MINGW*) OS=windows ;;
|
||||
*) die 42 "OS is unknown: $(command uname -a)" ;;
|
||||
esac
|
||||
echo "${OS}"
|
||||
}
|
||||
|
||||
go.arch() {
|
||||
|
@ -104,7 +104,7 @@ go.arch() {
|
|||
"armv8") ARCH=arm64 ;;
|
||||
.*386.*) ARCH=386 ;;
|
||||
ppc64*) ARCH=ppc64le ;;
|
||||
*) die 42 "ARCH is unknown: $(command uname -m)" ;;
|
||||
*) die 42 "ARCH is unknown: $(command uname -m)" ;;
|
||||
esac
|
||||
echo "${ARCH}"
|
||||
}
|
||||
|
@ -140,14 +140,14 @@ go.golang() {
|
|||
info_msg "Download go binary ${fname} (${size}B)"
|
||||
cache_download "${GO_DL_URL}/${fname}" "${fname}"
|
||||
|
||||
pushd "${CACHE}" &> /dev/null
|
||||
echo "${sha} ${fname}" > "${fname}.sha256"
|
||||
pushd "${CACHE}" &>/dev/null
|
||||
echo "${sha} ${fname}" >"${fname}.sha256"
|
||||
if ! sha256sum -c "${fname}.sha256" >/dev/null; then
|
||||
die 42 "downloaded file ${fname} checksum does not match"
|
||||
else
|
||||
info_msg "${fname} checksum OK"
|
||||
fi
|
||||
popd &> /dev/null
|
||||
popd &>/dev/null
|
||||
|
||||
info_msg "install golang"
|
||||
tee_stderr 0.1 <<EOF | sudo -i -u "${user}" | prefix_stdout "${userpr}"
|
||||
|
@ -201,7 +201,7 @@ go.bash() {
|
|||
sudo -i -u "${user}" bash --init-file "~${user}/.go_env"
|
||||
}
|
||||
|
||||
go.version(){
|
||||
go.version() {
|
||||
local user
|
||||
user="${1:-${USERNAME}}"
|
||||
sudo -i -u "${user}" <<EOF
|
||||
|
|
|
@ -17,8 +17,8 @@ declare main_cmd
|
|||
|
||||
NVM_LOCAL_FOLDER=.nvm
|
||||
|
||||
[[ -z "${NVM_GIT_URL}" ]] && NVM_GIT_URL="https://github.com/nvm-sh/nvm.git"
|
||||
[[ -z "${NVM_MIN_NODE_VER}" ]] && NVM_MIN_NODE_VER="16.13.0"
|
||||
[[ -z ${NVM_GIT_URL} ]] && NVM_GIT_URL="https://github.com/nvm-sh/nvm.git"
|
||||
[[ -z ${NVM_MIN_NODE_VER} ]] && NVM_MIN_NODE_VER="16.13.0"
|
||||
|
||||
# initialize nvm environment
|
||||
# -------------------------
|
||||
|
@ -35,7 +35,7 @@ nvm.is_installed() {
|
|||
[[ -f "${NVM_DIR}/nvm.sh" ]]
|
||||
}
|
||||
|
||||
if [[ -z "${NVM_DIR}" ]]; then
|
||||
if [[ -z ${NVM_DIR} ]]; then
|
||||
# nvm is not pre-installed in $HOME. Prepare for using nvm from <repo-root>
|
||||
NVM_DIR="$(git rev-parse --show-toplevel)/${NVM_LOCAL_FOLDER}"
|
||||
fi
|
||||
|
@ -105,7 +105,7 @@ nvm.install() {
|
|||
info_msg "install (update) NVM at ${NVM_DIR}"
|
||||
if nvm.is_installed; then
|
||||
info_msg "already cloned at: ${NVM_DIR}"
|
||||
pushd "${NVM_DIR}" &> /dev/null
|
||||
pushd "${NVM_DIR}" &>/dev/null
|
||||
git fetch --all | prefix_stdout " ${_Yellow}||${_creset} "
|
||||
else
|
||||
# delete any leftovers from previous installations
|
||||
|
@ -114,14 +114,14 @@ nvm.install() {
|
|||
fi
|
||||
info_msg "clone: ${NVM_GIT_URL}"
|
||||
git clone "${NVM_GIT_URL}" "${NVM_DIR}" 2>&1 | prefix_stdout " ${_Yellow}||${_creset} "
|
||||
pushd "${NVM_DIR}" &> /dev/null
|
||||
pushd "${NVM_DIR}" &>/dev/null
|
||||
git config --local advice.detachedHead false
|
||||
fi
|
||||
NVM_VERSION_TAG="$(git rev-list --tags --max-count=1)"
|
||||
NVM_VERSION_TAG="$(git describe --abbrev=0 --tags --match "v[0-9]*" "${NVM_VERSION_TAG}")"
|
||||
info_msg "checkout ${NVM_VERSION_TAG}"
|
||||
git checkout "${NVM_VERSION_TAG}" 2>&1 | prefix_stdout " ${_Yellow}||${_creset} "
|
||||
popd &> /dev/null
|
||||
popd &>/dev/null
|
||||
if [ -f "${REPO_ROOT}/.nvm_packages" ]; then
|
||||
cp "${REPO_ROOT}/.nvm_packages" "${NVM_DIR}/default-packages"
|
||||
fi
|
||||
|
|
|
@ -12,7 +12,7 @@ REDIS_GROUP="searxng-redis"
|
|||
REDIS_SERVICE_NAME="searxng-redis"
|
||||
REDIS_SYSTEMD_UNIT="${SYSTEMD_UNITS}/${REDIS_SERVICE_NAME}.service"
|
||||
|
||||
redis.help(){
|
||||
redis.help() {
|
||||
cat <<EOF
|
||||
redis.:
|
||||
remove : delete user (${REDIS_USER}) and remove service (${REDIS_SERVICE_NAME})
|
||||
|
@ -21,7 +21,6 @@ redis.:
|
|||
EOF
|
||||
}
|
||||
|
||||
|
||||
redis.remove() {
|
||||
sudo_or_exit
|
||||
(
|
||||
|
@ -36,7 +35,6 @@ redis.shell() {
|
|||
interactive_shell "${REDIS_USER}"
|
||||
}
|
||||
|
||||
|
||||
redis.userdel() {
|
||||
sudo_or_exit
|
||||
drop_service_account "${REDIS_USER}"
|
||||
|
|
|
@ -41,28 +41,28 @@ container.build() {
|
|||
|
||||
# Setup arch specific
|
||||
case $parch in
|
||||
"X64" | "x86_64" | "amd64")
|
||||
dockerfile="Dockerfile"
|
||||
arch="amd64"
|
||||
variant=""
|
||||
platform="linux/$arch"
|
||||
;;
|
||||
"ARM64" | "aarch64" | "arm64")
|
||||
dockerfile="Dockerfile"
|
||||
arch="arm64"
|
||||
variant=""
|
||||
platform="linux/$arch"
|
||||
;;
|
||||
"ARMV7" | "armhf" | "armv7l" | "armv7")
|
||||
dockerfile="Dockerfile"
|
||||
arch="arm"
|
||||
variant="v7"
|
||||
platform="linux/$arch/$variant"
|
||||
;;
|
||||
*)
|
||||
err_msg "Unsupported architecture; $parch"
|
||||
exit 1
|
||||
;;
|
||||
"X64" | "x86_64" | "amd64")
|
||||
dockerfile="Dockerfile"
|
||||
arch="amd64"
|
||||
variant=""
|
||||
platform="linux/$arch"
|
||||
;;
|
||||
"ARM64" | "aarch64" | "arm64")
|
||||
dockerfile="Dockerfile"
|
||||
arch="arm64"
|
||||
variant=""
|
||||
platform="linux/$arch"
|
||||
;;
|
||||
"ARMV7" | "armhf" | "armv7l" | "armv7")
|
||||
dockerfile="Dockerfile"
|
||||
arch="arm"
|
||||
variant="v7"
|
||||
platform="linux/$arch/$variant"
|
||||
;;
|
||||
*)
|
||||
err_msg "Unsupported architecture; $parch"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
info_msg "Selected platform: $platform"
|
||||
|
||||
|
@ -117,7 +117,7 @@ container.build() {
|
|||
--tag="localhost/$CONTAINER_IMAGE_ORGANIZATION/$CONTAINER_IMAGE_NAME:builder" \
|
||||
--file="./container/$dockerfile" \
|
||||
.
|
||||
build_msg CONTAINER "Image \"builder\" built"
|
||||
build_msg CONTAINER 'Image "builder" built'
|
||||
|
||||
# shellcheck disable=SC2086
|
||||
"$container_engine" $params_build \
|
||||
|
@ -161,25 +161,25 @@ container.test() {
|
|||
|
||||
# Setup arch specific
|
||||
case $parch in
|
||||
"X64" | "x86_64" | "amd64")
|
||||
arch="amd64"
|
||||
variant=""
|
||||
platform="linux/$arch"
|
||||
;;
|
||||
"ARM64" | "aarch64" | "arm64")
|
||||
arch="arm64"
|
||||
variant=""
|
||||
platform="linux/$arch"
|
||||
;;
|
||||
"ARMV7" | "armhf" | "armv7l" | "armv7")
|
||||
arch="arm"
|
||||
variant="v7"
|
||||
platform="linux/$arch/$variant"
|
||||
;;
|
||||
*)
|
||||
err_msg "Unsupported architecture; $parch"
|
||||
exit 1
|
||||
;;
|
||||
"X64" | "x86_64" | "amd64")
|
||||
arch="amd64"
|
||||
variant=""
|
||||
platform="linux/$arch"
|
||||
;;
|
||||
"ARM64" | "aarch64" | "arm64")
|
||||
arch="arm64"
|
||||
variant=""
|
||||
platform="linux/$arch"
|
||||
;;
|
||||
"ARMV7" | "armhf" | "armv7l" | "armv7")
|
||||
arch="arm"
|
||||
variant="v7"
|
||||
platform="linux/$arch/$variant"
|
||||
;;
|
||||
*)
|
||||
err_msg "Unsupported architecture; $parch"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
build_msg CONTAINER "Selected platform: $platform"
|
||||
|
||||
|
@ -224,25 +224,25 @@ container.push() {
|
|||
|
||||
for arch in "${release_archs[@]}"; do
|
||||
case $arch in
|
||||
"X64" | "x86_64" | "amd64")
|
||||
archs+=("amd64")
|
||||
variants+=("")
|
||||
platforms+=("linux/${archs[-1]}")
|
||||
;;
|
||||
"ARM64" | "aarch64" | "arm64")
|
||||
archs+=("arm64")
|
||||
variants+=("")
|
||||
platforms+=("linux/${archs[-1]}")
|
||||
;;
|
||||
"ARMV7" | "armv7" | "armhf" | "arm")
|
||||
archs+=("arm")
|
||||
variants+=("v7")
|
||||
platforms+=("linux/${archs[-1]}/${variants[-1]}")
|
||||
;;
|
||||
*)
|
||||
err_msg "Unsupported architecture; $arch"
|
||||
exit 1
|
||||
;;
|
||||
"X64" | "x86_64" | "amd64")
|
||||
archs+=("amd64")
|
||||
variants+=("")
|
||||
platforms+=("linux/${archs[-1]}")
|
||||
;;
|
||||
"ARM64" | "aarch64" | "arm64")
|
||||
archs+=("arm64")
|
||||
variants+=("")
|
||||
platforms+=("linux/${archs[-1]}")
|
||||
;;
|
||||
"ARMV7" | "armv7" | "armhf" | "arm")
|
||||
archs+=("arm")
|
||||
variants+=("v7")
|
||||
platforms+=("linux/${archs[-1]}/${variants[-1]}")
|
||||
;;
|
||||
*)
|
||||
err_msg "Unsupported architecture; $arch"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
#!/usr/bin/env bash
|
||||
# SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
|
||||
data.help(){
|
||||
data.help() {
|
||||
cat <<EOF
|
||||
data.:
|
||||
all : update searx/sxng_locales.py and searx/data/*
|
||||
|
@ -13,12 +13,13 @@ EOF
|
|||
}
|
||||
|
||||
data.all() {
|
||||
( set -e
|
||||
(
|
||||
set -e
|
||||
|
||||
pyenv.activate
|
||||
data.traits
|
||||
data.useragents
|
||||
data.locales
|
||||
data.locales
|
||||
|
||||
build_msg DATA "update searx/data/osm_keys_tags.json"
|
||||
pyenv.cmd python searxng_extra/update/update_osm_keys_tags.py
|
||||
|
@ -35,9 +36,9 @@ data.all() {
|
|||
)
|
||||
}
|
||||
|
||||
|
||||
data.traits() {
|
||||
( set -e
|
||||
(
|
||||
set -e
|
||||
pyenv.activate
|
||||
build_msg DATA "update searx/data/engine_traits.json"
|
||||
python searxng_extra/update/update_engine_traits.py
|
||||
|
@ -53,7 +54,8 @@ data.useragents() {
|
|||
}
|
||||
|
||||
data.locales() {
|
||||
( set -e
|
||||
(
|
||||
set -e
|
||||
pyenv.activate
|
||||
build_msg DATA "update searx/data/locales.json"
|
||||
python searxng_extra/update/update_locales.py
|
||||
|
@ -61,8 +63,9 @@ data.locales() {
|
|||
dump_return $?
|
||||
}
|
||||
|
||||
data.currencies(){
|
||||
( set -e
|
||||
data.currencies() {
|
||||
(
|
||||
set -e
|
||||
pyenv.activate
|
||||
build_msg DATA "update searx/data/currencies.json"
|
||||
python searxng_extra/update/update_currencies.py
|
||||
|
|
|
@ -6,7 +6,7 @@ declare _creset
|
|||
|
||||
export NODE_MINIMUM_VERSION="18.17.0"
|
||||
|
||||
node.help(){
|
||||
node.help() {
|
||||
cat <<EOF
|
||||
node.:
|
||||
env : download & install SearXNG's npm dependencies locally
|
||||
|
@ -24,7 +24,8 @@ nodejs.ensure() {
|
|||
|
||||
node.env() {
|
||||
nodejs.ensure
|
||||
( set -e
|
||||
(
|
||||
set -e
|
||||
build_msg INSTALL "[npm] ./client/simple/package.json"
|
||||
npm --prefix client/simple install
|
||||
)
|
||||
|
@ -43,20 +44,22 @@ node.clean() {
|
|||
return 0
|
||||
fi
|
||||
build_msg CLEAN "themes -- locally installed npm dependencies"
|
||||
( set -e
|
||||
npm --prefix client/simple run clean \
|
||||
| prefix_stdout "${_Blue}CLEAN ${_creset} "
|
||||
if [ "${PIPESTATUS[0]}" -ne "0" ]; then
|
||||
(
|
||||
set -e
|
||||
npm --prefix client/simple run clean |
|
||||
prefix_stdout "${_Blue}CLEAN ${_creset} "
|
||||
if [ "${PIPESTATUS[0]}" -ne "0" ]; then
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
)
|
||||
build_msg CLEAN "locally installed developer and CI tools"
|
||||
( set -e
|
||||
npm --prefix . run clean \
|
||||
| prefix_stdout "${_Blue}CLEAN ${_creset} "
|
||||
if [ "${PIPESTATUS[0]}" -ne "0" ]; then
|
||||
(
|
||||
set -e
|
||||
npm --prefix . run clean |
|
||||
prefix_stdout "${_Blue}CLEAN ${_creset} "
|
||||
if [ "${PIPESTATUS[0]}" -ne "0" ]; then
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
)
|
||||
dump_return $?
|
||||
}
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
#!/usr/bin/env bash
|
||||
# SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
|
||||
|
||||
STATIC_BUILD_COMMIT="[build] /static"
|
||||
STATIC_BUILT_PATHS=(
|
||||
'searx/templates/simple/icons.html'
|
||||
|
@ -9,7 +8,7 @@ STATIC_BUILT_PATHS=(
|
|||
'client/simple/package-lock.json'
|
||||
)
|
||||
|
||||
static.help(){
|
||||
static.help() {
|
||||
cat <<EOF
|
||||
static.build.: ${STATIC_BUILD_COMMIT}
|
||||
commit : build & commit /static folder
|
||||
|
@ -57,8 +56,8 @@ static.build.drop() {
|
|||
|
||||
# get only last (option -n1) local commit not in remotes
|
||||
branch="$(git branch --show-current)"
|
||||
last_commit_id="$(git log -n1 "${branch}" --pretty=format:'%h'\
|
||||
--not --exclude="${branch}" --branches --remotes)"
|
||||
last_commit_id="$(git log -n1 "${branch}" --pretty=format:'%h' \
|
||||
--not --exclude="${branch}" --branches --remotes)"
|
||||
|
||||
if [ -z "${last_commit_id}" ]; then
|
||||
err_msg "there are no local commits"
|
||||
|
@ -96,7 +95,8 @@ static.build.commit() {
|
|||
# drop existing commit from previous build
|
||||
static.build.drop &>/dev/null
|
||||
|
||||
( set -e
|
||||
(
|
||||
set -e
|
||||
# fix & build the themes
|
||||
themes.fix
|
||||
themes.lint
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
#!/usr/bin/env bash
|
||||
# SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
|
||||
test.help(){
|
||||
test.help() {
|
||||
cat <<EOF
|
||||
test.:
|
||||
yamllint : lint YAML files (YAMLLINT_FILES)
|
||||
|
@ -22,13 +22,14 @@ if [ "$VERBOSE" = "1" ]; then
|
|||
fi
|
||||
|
||||
test.yamllint() {
|
||||
build_msg TEST "[yamllint] \$YAMLLINT_FILES"
|
||||
build_msg TEST "[yamllint] ${YAMLLINT_FILES[*]}"
|
||||
pyenv.cmd yamllint --strict --format parsable "${YAMLLINT_FILES[@]}"
|
||||
dump_return $?
|
||||
}
|
||||
|
||||
test.pylint() {
|
||||
( set -e
|
||||
(
|
||||
set -e
|
||||
pyenv.activate
|
||||
PYLINT_OPTIONS="--rcfile .pylintrc"
|
||||
|
||||
|
@ -41,10 +42,10 @@ test.pylint() {
|
|||
build_msg TEST "[pylint] ./searx ./searxng_extra ./tests"
|
||||
# shellcheck disable=SC2086
|
||||
pylint ${PYLINT_OPTIONS} ${PYLINT_VERBOSE} \
|
||||
--ignore=searx/engines \
|
||||
searx searx/searxng.msg \
|
||||
searxng_extra searxng_extra/docs_prebuild \
|
||||
tests
|
||||
--ignore=searx/engines \
|
||||
searx searx/searxng.msg \
|
||||
searxng_extra searxng_extra/docs_prebuild \
|
||||
tests
|
||||
)
|
||||
dump_return $?
|
||||
}
|
||||
|
@ -63,13 +64,13 @@ test.types.dev() {
|
|||
build_msg TEST "[pyright/types] suppress warnings related to intentional monkey patching"
|
||||
# We run Pyright in the virtual environment because pyright executes
|
||||
# "python" to determine the Python version.
|
||||
pyenv.cmd npx --no-install pyright -p pyrightconfig.json \
|
||||
| grep -E '\.py:[0-9]+:[0-9]+'\
|
||||
| grep -v '/engines/.*.py.* - warning: "logger" is not defined'\
|
||||
| grep -v '/plugins/.*.py.* - error: "logger" is not defined'\
|
||||
| grep -v '/engines/.*.py.* - warning: "supported_languages" is not defined' \
|
||||
| grep -v '/engines/.*.py.* - warning: "language_aliases" is not defined' \
|
||||
| grep -v '/engines/.*.py.* - warning: "categories" is not defined'
|
||||
pyenv.cmd npx --no-install pyright -p pyrightconfig.json |
|
||||
grep -E '\.py:[0-9]+:[0-9]+' |
|
||||
grep -v '/engines/.*.py.* - warning: "logger" is not defined' |
|
||||
grep -v '/plugins/.*.py.* - error: "logger" is not defined' |
|
||||
grep -v '/engines/.*.py.* - warning: "supported_languages" is not defined' |
|
||||
grep -v '/engines/.*.py.* - warning: "language_aliases" is not defined' |
|
||||
grep -v '/engines/.*.py.* - warning: "categories" is not defined'
|
||||
# ignore exit value from pyright
|
||||
# dump_return ${PIPESTATUS[0]}
|
||||
return 0
|
||||
|
@ -88,13 +89,13 @@ test.types.ci() {
|
|||
build_msg TEST "[pyright] suppress warnings related to intentional monkey patching"
|
||||
# We run Pyright in the virtual environment because pyright executes
|
||||
# "python" to determine the Python version.
|
||||
pyenv.cmd npx --no-install pyright -p pyrightconfig-ci.json \
|
||||
| grep -E '\.py:[0-9]+:[0-9]+'\
|
||||
| grep -v '/engines/.*.py.* - warning: "logger" is not defined'\
|
||||
| grep -v '/plugins/.*.py.* - error: "logger" is not defined'\
|
||||
| grep -v '/engines/.*.py.* - warning: "supported_languages" is not defined' \
|
||||
| grep -v '/engines/.*.py.* - warning: "language_aliases" is not defined' \
|
||||
| grep -v '/engines/.*.py.* - warning: "categories" is not defined'
|
||||
pyenv.cmd npx --no-install pyright -p pyrightconfig-ci.json |
|
||||
grep -E '\.py:[0-9]+:[0-9]+' |
|
||||
grep -v '/engines/.*.py.* - warning: "logger" is not defined' |
|
||||
grep -v '/plugins/.*.py.* - error: "logger" is not defined' |
|
||||
grep -v '/engines/.*.py.* - warning: "supported_languages" is not defined' |
|
||||
grep -v '/engines/.*.py.* - warning: "language_aliases" is not defined' |
|
||||
grep -v '/engines/.*.py.* - warning: "categories" is not defined'
|
||||
# ignore exit value from pyright
|
||||
# dump_return ${PIPESTATUS[0]}
|
||||
return 0
|
||||
|
@ -121,7 +122,8 @@ test.unit() {
|
|||
|
||||
test.coverage() {
|
||||
build_msg TEST 'unit test coverage'
|
||||
( set -e
|
||||
(
|
||||
set -e
|
||||
pyenv.activate
|
||||
# shellcheck disable=SC2086
|
||||
python -m nose2 ${TEST_NOSE2_VERBOSE} -C --log-capture --with-coverage --coverage searx -s tests/unit
|
||||
|
@ -142,7 +144,7 @@ test.rst() {
|
|||
build_msg TEST "[reST markup] ${RST_FILES[*]}"
|
||||
|
||||
for rst in "${RST_FILES[@]}"; do
|
||||
pyenv.cmd rst2html --halt error "$rst" > /dev/null || die 42 "fix issue in $rst"
|
||||
pyenv.cmd rst2html --halt error "$rst" >/dev/null || die 42 "fix issue in $rst"
|
||||
done
|
||||
}
|
||||
|
||||
|
@ -160,7 +162,7 @@ test.pybabel() {
|
|||
}
|
||||
|
||||
test.clean() {
|
||||
build_msg CLEAN "test stuff"
|
||||
build_msg CLEAN "test stuff"
|
||||
rm -rf geckodriver.log .coverage coverage/
|
||||
dump_return $?
|
||||
}
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
#!/usr/bin/env bash
|
||||
# SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
|
||||
themes.help(){
|
||||
themes.help() {
|
||||
cat <<EOF
|
||||
themes.:
|
||||
all : test & build all themes
|
||||
|
@ -13,14 +13,16 @@ EOF
|
|||
}
|
||||
|
||||
themes.all() {
|
||||
( set -e
|
||||
(
|
||||
set -e
|
||||
vite.simple.build
|
||||
)
|
||||
dump_return $?
|
||||
}
|
||||
|
||||
themes.simple() {
|
||||
( set -e
|
||||
(
|
||||
set -e
|
||||
build_msg SIMPLE "theme: run build (simple)"
|
||||
vite.simple.build
|
||||
)
|
||||
|
@ -28,7 +30,8 @@ themes.simple() {
|
|||
}
|
||||
|
||||
themes.fix() {
|
||||
( set -e
|
||||
(
|
||||
set -e
|
||||
build_msg SIMPLE "theme: fix (all themes)"
|
||||
vite.simple.fix
|
||||
)
|
||||
|
@ -36,7 +39,8 @@ themes.fix() {
|
|||
}
|
||||
|
||||
themes.lint() {
|
||||
( set -e
|
||||
(
|
||||
set -e
|
||||
build_msg SIMPLE "theme: lint (all themes)"
|
||||
vite.simple.lint
|
||||
)
|
||||
|
@ -44,7 +48,8 @@ themes.lint() {
|
|||
}
|
||||
|
||||
themes.test() {
|
||||
( set -e
|
||||
(
|
||||
set -e
|
||||
# we run a build to test (in CI)
|
||||
build_msg SIMPLE "theme: run build (to test)"
|
||||
vite.simple.build
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
declare _Blue
|
||||
declare _creset
|
||||
|
||||
vite.help(){
|
||||
vite.help() {
|
||||
cat <<EOF
|
||||
vite.: .. to be done ..
|
||||
simple.:
|
||||
|
@ -30,7 +30,8 @@ VITE_SIMPLE_THEME="${REPO_ROOT}/client/simple"
|
|||
# }
|
||||
|
||||
vite.simple.build() {
|
||||
( set -e
|
||||
(
|
||||
set -e
|
||||
templates.simple.pygments
|
||||
|
||||
node.env
|
||||
|
@ -39,19 +40,21 @@ vite.simple.build() {
|
|||
pushd "${VITE_SIMPLE_THEME}"
|
||||
npm install
|
||||
npm run build
|
||||
popd &> /dev/null
|
||||
popd &>/dev/null
|
||||
)
|
||||
}
|
||||
|
||||
vite.simple.fix() {
|
||||
( set -e
|
||||
(
|
||||
set -e
|
||||
node.env
|
||||
npm --prefix client/simple run fix
|
||||
)
|
||||
}
|
||||
|
||||
vite.simple.lint() {
|
||||
( set -e
|
||||
(
|
||||
set -e
|
||||
node.env
|
||||
npm --prefix client/simple run lint
|
||||
)
|
||||
|
@ -59,8 +62,8 @@ vite.simple.lint() {
|
|||
|
||||
templates.simple.pygments() {
|
||||
build_msg PYGMENTS "searxng_extra/update/update_pygments.py"
|
||||
pyenv.cmd python searxng_extra/update/update_pygments.py \
|
||||
| prefix_stdout "${_Blue}PYGMENTS ${_creset} "
|
||||
pyenv.cmd python searxng_extra/update/update_pygments.py |
|
||||
prefix_stdout "${_Blue}PYGMENTS ${_creset} "
|
||||
if [ "${PIPESTATUS[0]}" -ne "0" ]; then
|
||||
build_msg PYGMENTS "building LESS files for pygments failed"
|
||||
return 1
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
#!/usr/bin/env bash
|
||||
# SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
|
||||
weblate.help(){
|
||||
weblate.help() {
|
||||
cat <<EOF
|
||||
weblate.:
|
||||
push.translations: push translation changes from SearXNG to Weblate's counterpart
|
||||
|
@ -19,8 +19,9 @@ weblate.translations.worktree() {
|
|||
#
|
||||
# remote weblate https://translate.codeberg.org/git/searxng/searxng/
|
||||
|
||||
( set -e
|
||||
if ! git remote get-url weblate 2> /dev/null; then
|
||||
(
|
||||
set -e
|
||||
if ! git remote get-url weblate 2>/dev/null; then
|
||||
git remote add weblate https://translate.codeberg.org/git/searxng/searxng/
|
||||
fi
|
||||
if [ -d "${TRANSLATIONS_WORKTREE}" ]; then
|
||||
|
@ -49,7 +50,8 @@ weblate.to.translations() {
|
|||
# 4. In translations worktree, merge changes of branch 'translations' from
|
||||
# remote 'weblate' and push it on branch 'translations' of 'origin'
|
||||
|
||||
( set -e
|
||||
(
|
||||
set -e
|
||||
pyenv.activate
|
||||
if [ "$(wlc lock-status)" != "locked: True" ]; then
|
||||
die 1 "weblate must be locked, currently: $(wlc lock-status)"
|
||||
|
@ -77,14 +79,18 @@ weblate.translations.commit() {
|
|||
# create a commit in the local branch (master)
|
||||
|
||||
local existing_commit_hash commit_body commit_message exitcode
|
||||
( set -e
|
||||
(
|
||||
set -e
|
||||
pyenv.activate
|
||||
# lock change on weblate
|
||||
wlc lock
|
||||
|
||||
# get translations branch in git worktree (TRANSLATIONS_WORKTREE)
|
||||
weblate.translations.worktree
|
||||
existing_commit_hash=$(cd "${TRANSLATIONS_WORKTREE}"; git log -n1 --pretty=format:'%h')
|
||||
existing_commit_hash=$(
|
||||
cd "${TRANSLATIONS_WORKTREE}"
|
||||
git log -n1 --pretty=format:'%h'
|
||||
)
|
||||
|
||||
# pull weblate commits
|
||||
weblate.to.translations
|
||||
|
@ -95,20 +101,23 @@ weblate.translations.commit() {
|
|||
# compile translations
|
||||
build_msg BABEL 'compile translation catalogs into binary MO files'
|
||||
pybabel compile --statistics \
|
||||
-d "searx/translations"
|
||||
-d "searx/translations"
|
||||
|
||||
# update searx/data/translation_labels.json
|
||||
data.locales
|
||||
|
||||
# git add/commit (no push)
|
||||
commit_body=$(cd "${TRANSLATIONS_WORKTREE}"; git log --pretty=format:'%h - %as - %aN <%ae>' "${existing_commit_hash}..HEAD")
|
||||
commit_body=$(
|
||||
cd "${TRANSLATIONS_WORKTREE}"
|
||||
git log --pretty=format:'%h - %as - %aN <%ae>' "${existing_commit_hash}..HEAD"
|
||||
)
|
||||
commit_message=$(echo -e "[l10n] update translations from Weblate\n\n${commit_body}")
|
||||
git add searx/translations
|
||||
git add searx/data/locales.json
|
||||
git commit -m "${commit_message}"
|
||||
)
|
||||
exitcode=$?
|
||||
( # make sure to always unlock weblate
|
||||
( # make sure to always unlock weblate
|
||||
set -e
|
||||
pyenv.cmd wlc unlock
|
||||
)
|
||||
|
@ -133,9 +142,10 @@ weblate.push.translations() {
|
|||
# 5. Notify Weblate to pull updated 'master' & 'translations' branch.
|
||||
|
||||
local messages_pot diff_messages_pot last_commit_hash last_commit_detail \
|
||||
exitcode
|
||||
exitcode
|
||||
messages_pot="${TRANSLATIONS_WORKTREE}/searx/translations/messages.pot"
|
||||
( set -e
|
||||
(
|
||||
set -e
|
||||
pyenv.activate
|
||||
# get translations branch in git worktree (TRANSLATIONS_WORKTREE)
|
||||
weblate.translations.worktree
|
||||
|
@ -143,12 +153,14 @@ weblate.push.translations() {
|
|||
# update messages.pot in the master branch
|
||||
build_msg BABEL 'extract messages from source files and generate POT file'
|
||||
pybabel extract -F babel.cfg \
|
||||
-o "${messages_pot}" \
|
||||
"searx/"
|
||||
-o "${messages_pot}" \
|
||||
"searx/"
|
||||
|
||||
# stop if there is no meaningful change in the master branch
|
||||
diff_messages_pot=$(cd "${TRANSLATIONS_WORKTREE}";\
|
||||
git diff -- "searx/translations/messages.pot")
|
||||
diff_messages_pot=$(
|
||||
cd "${TRANSLATIONS_WORKTREE}"
|
||||
git diff -- "searx/translations/messages.pot"
|
||||
)
|
||||
if ! echo "$diff_messages_pot" | grep -qE "[\+\-](msgid|msgstr)"; then
|
||||
build_msg BABEL 'no changes detected, exiting'
|
||||
return 42
|
||||
|
@ -160,7 +172,7 @@ weblate.push.translations() {
|
|||
return 0
|
||||
fi
|
||||
if [ "$exitcode" -gt 0 ]; then
|
||||
return $exitcode
|
||||
return $exitcode
|
||||
fi
|
||||
(
|
||||
set -e
|
||||
|
@ -192,7 +204,7 @@ weblate.push.translations() {
|
|||
-d "${TRANSLATIONS_WORKTREE}/searx/translations"
|
||||
|
||||
# git add/commit/push
|
||||
last_commit_hash=$(git log -n1 --pretty=format:'%h')
|
||||
last_commit_hash=$(git log -n1 --pretty=format:'%h')
|
||||
last_commit_detail=$(git log -n1 --pretty=format:'%h - %as - %aN <%ae>' "${last_commit_hash}")
|
||||
|
||||
pushd "${TRANSLATIONS_WORKTREE}"
|
||||
|
@ -207,7 +219,7 @@ weblate.push.translations() {
|
|||
wlc pull
|
||||
)
|
||||
exitcode=$?
|
||||
( # make sure to always unlock weblate
|
||||
( # make sure to always unlock weblate
|
||||
set -e
|
||||
pyenv.activate
|
||||
wlc unlock
|
||||
|
|
|
@ -5,10 +5,10 @@ valkey.distro.setup() {
|
|||
# shellcheck disable=SC2034
|
||||
|
||||
case $DIST_ID in
|
||||
ubuntu|debian)
|
||||
ubuntu | debian)
|
||||
VALKEY_PACKAGES="valkey-server"
|
||||
;;
|
||||
arch|fedora|centos)
|
||||
arch | fedora | centos)
|
||||
VALKEY_PACKAGES="valkey"
|
||||
;;
|
||||
*)
|
||||
|
@ -36,13 +36,13 @@ valkey.backports() {
|
|||
esac
|
||||
}
|
||||
|
||||
valkey.install(){
|
||||
valkey.install() {
|
||||
info_msg "installing valkey ..."
|
||||
valkey.distro.setup
|
||||
|
||||
case $DIST_ID in
|
||||
debian|ubuntu)
|
||||
apt-cache show "${VALKEY_PACKAGES}" &> /dev/null || valkey.backports
|
||||
debian | ubuntu)
|
||||
apt-cache show "${VALKEY_PACKAGES}" &>/dev/null || valkey.backports
|
||||
pkg_install "${VALKEY_PACKAGES}"
|
||||
|
||||
# do some fix ...
|
||||
|
@ -54,7 +54,7 @@ valkey.install(){
|
|||
|
||||
systemd_activate_service valkey-server
|
||||
;;
|
||||
arch|fedora|centos)
|
||||
arch | fedora | centos)
|
||||
pkg_install "${VALKEY_PACKAGES}"
|
||||
systemd_activate_service valkey
|
||||
;;
|
||||
|
|
152
utils/lxc.sh
152
utils/lxc.sh
|
@ -60,19 +60,17 @@ REMOTE_IMAGES=()
|
|||
CONTAINERS=()
|
||||
LOCAL_IMAGES=()
|
||||
|
||||
for ((i=0; i<${#LXC_SUITE[@]}; i+=2)); do
|
||||
for ((i = 0; i < ${#LXC_SUITE[@]}; i += 2)); do
|
||||
REMOTE_IMAGES=("${REMOTE_IMAGES[@]}" "${LXC_SUITE[i]}")
|
||||
CONTAINERS=("${CONTAINERS[@]}" "${LXC_HOST_PREFIX}-${LXC_SUITE[i+1]}")
|
||||
LOCAL_IMAGES=("${LOCAL_IMAGES[@]}" "${LXC_SUITE[i+1]}")
|
||||
CONTAINERS=("${CONTAINERS[@]}" "${LXC_HOST_PREFIX}-${LXC_SUITE[i + 1]}")
|
||||
LOCAL_IMAGES=("${LOCAL_IMAGES[@]}" "${LXC_SUITE[i + 1]}")
|
||||
done
|
||||
|
||||
HOST_USER="${SUDO_USER:-$USER}"
|
||||
HOST_USER_ID=$(id -u "${HOST_USER}")
|
||||
HOST_GROUP_ID=$(id -g "${HOST_USER}")
|
||||
|
||||
# ----------------------------------------------------------------------------
|
||||
usage() {
|
||||
# ----------------------------------------------------------------------------
|
||||
_cmd="$(basename "$0")"
|
||||
cat <<EOF
|
||||
usage::
|
||||
|
@ -110,12 +108,12 @@ install
|
|||
|
||||
EOF
|
||||
usage_containers
|
||||
[ -n "${1+x}" ] && err_msg "$1"
|
||||
[ -n "${1+x}" ] && err_msg "$1"
|
||||
}
|
||||
|
||||
usage_containers() {
|
||||
lxc_suite_install_info
|
||||
[ -n "${1+x}" ] && err_msg "$1"
|
||||
[ -n "${1+x}" ] && err_msg "$1"
|
||||
}
|
||||
|
||||
lxd_info() {
|
||||
|
@ -138,7 +136,7 @@ main() {
|
|||
lxc_distro_setup
|
||||
|
||||
# don't check prerequisite when in recursion
|
||||
if [[ ! $1 == __* ]] && [[ ! $1 == --help ]]; then
|
||||
if [[ $1 != __* ]] && [[ $1 != --help ]]; then
|
||||
if ! in_container; then
|
||||
! required_commands lxc && lxd_info && exit 42
|
||||
fi
|
||||
|
@ -146,27 +144,40 @@ main() {
|
|||
fi
|
||||
|
||||
case $1 in
|
||||
--getenv) var="$2"; echo "${!var}"; exit 0;;
|
||||
-h|--help) usage; exit 0;;
|
||||
--getenv)
|
||||
var="$2"
|
||||
echo "${!var}"
|
||||
exit 0
|
||||
;;
|
||||
-h | --help)
|
||||
usage
|
||||
exit 0
|
||||
;;
|
||||
|
||||
build)
|
||||
sudo_or_exit
|
||||
case $2 in
|
||||
${LXC_HOST_PREFIX}-*) build_container "$2" ;;
|
||||
''|--|containers) build_all_containers ;;
|
||||
*) usage "$_usage"; exit 42;;
|
||||
'' | -- | containers) build_all_containers ;;
|
||||
*)
|
||||
usage "$_usage"
|
||||
exit 42
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
copy)
|
||||
case $2 in
|
||||
''|images) lxc_copy_images_locally;;
|
||||
*) usage "$_usage"; exit 42;;
|
||||
'' | images) lxc_copy_images_locally ;;
|
||||
*)
|
||||
usage "$_usage"
|
||||
exit 42
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
remove)
|
||||
sudo_or_exit
|
||||
case $2 in
|
||||
''|--|containers) remove_containers ;;
|
||||
'' | -- | containers) remove_containers ;;
|
||||
images) lxc_delete_images_locally ;;
|
||||
${LXC_HOST_PREFIX}-*)
|
||||
! lxc_exists "$2" && warn_msg "container not yet exists: $2" && exit 0
|
||||
|
@ -174,19 +185,25 @@ main() {
|
|||
lxc_delete_container "$2"
|
||||
fi
|
||||
;;
|
||||
*) usage "unknown or missing container <name> $2"; exit 42;;
|
||||
*)
|
||||
usage "unknown or missing container <name> $2"
|
||||
exit 42
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
start|stop)
|
||||
start | stop)
|
||||
sudo_or_exit
|
||||
case $2 in
|
||||
''|--|containers) lxc_cmd "$1" ;;
|
||||
'' | -- | containers) lxc_cmd "$1" ;;
|
||||
${LXC_HOST_PREFIX}-*)
|
||||
! lxc_exists "$2" && usage_containers "unknown container: $2" && exit 42
|
||||
info_msg "lxc $1 $2"
|
||||
lxc "$1" "$2" | prefix_stdout "[${_BBlue}${i}${_creset}] "
|
||||
;;
|
||||
*) usage "unknown or missing container <name> $2"; exit 42;;
|
||||
*)
|
||||
usage "unknown or missing container <name> $2"
|
||||
exit 42
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
show)
|
||||
|
@ -195,10 +212,10 @@ main() {
|
|||
suite)
|
||||
case $3 in
|
||||
${LXC_HOST_PREFIX}-*)
|
||||
lxc exec -t "$3" -- "${LXC_REPO_ROOT}/utils/lxc.sh" __show suite \
|
||||
| prefix_stdout "[${_BBlue}$3${_creset}] "
|
||||
;;
|
||||
*) show_suite;;
|
||||
lxc exec -t "$3" -- "${LXC_REPO_ROOT}/utils/lxc.sh" __show suite |
|
||||
prefix_stdout "[${_BBlue}$3${_creset}] "
|
||||
;;
|
||||
*) show_suite ;;
|
||||
esac
|
||||
;;
|
||||
images) show_images ;;
|
||||
|
@ -207,7 +224,7 @@ main() {
|
|||
${LXC_HOST_PREFIX}-*)
|
||||
! lxc_exists "$3" && usage_containers "unknown container: $3" && exit 42
|
||||
lxc config show "$3" | prefix_stdout "[${_BBlue}${3}${_creset}] "
|
||||
;;
|
||||
;;
|
||||
*)
|
||||
rst_title "container configurations"
|
||||
echo
|
||||
|
@ -230,7 +247,10 @@ main() {
|
|||
;;
|
||||
esac
|
||||
;;
|
||||
*) usage "$_usage"; exit 42;;
|
||||
*)
|
||||
usage "$_usage"
|
||||
exit 42
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
__show)
|
||||
|
@ -243,30 +263,36 @@ main() {
|
|||
sudo_or_exit
|
||||
shift
|
||||
case $1 in
|
||||
--) shift; lxc_exec "$@" ;;
|
||||
--)
|
||||
shift
|
||||
lxc_exec "$@"
|
||||
;;
|
||||
${LXC_HOST_PREFIX}-*)
|
||||
! lxc_exists "$1" && usage_containers "unknown container: $1" && exit 42
|
||||
local name=$1
|
||||
shift
|
||||
lxc_exec_cmd "${name}" "$@"
|
||||
;;
|
||||
*) usage_containers "unknown container: $1" && exit 42
|
||||
esac
|
||||
*) usage_containers "unknown container: $1" && exit 42 ;;
|
||||
esac
|
||||
;;
|
||||
install)
|
||||
sudo_or_exit
|
||||
case $2 in
|
||||
suite|base)
|
||||
suite | base)
|
||||
case $3 in
|
||||
${LXC_HOST_PREFIX}-*)
|
||||
! lxc_exists "$3" && usage_containers "unknown container: $3" && exit 42
|
||||
lxc_exec_cmd "$3" "${LXC_REPO_ROOT}/utils/lxc.sh" __install "$2"
|
||||
;;
|
||||
''|--) lxc_exec "${LXC_REPO_ROOT}/utils/lxc.sh" __install "$2" ;;
|
||||
*) usage_containers "unknown container: $3" && exit 42
|
||||
'' | --) lxc_exec "${LXC_REPO_ROOT}/utils/lxc.sh" __install "$2" ;;
|
||||
*) usage_containers "unknown container: $3" && exit 42 ;;
|
||||
esac
|
||||
;;
|
||||
*) usage "$_usage"; exit 42 ;;
|
||||
*)
|
||||
usage "$_usage"
|
||||
exit 42
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
__install)
|
||||
|
@ -281,12 +307,17 @@ main() {
|
|||
echo
|
||||
echo ".. generic utils/lxc.sh documentation"
|
||||
;;
|
||||
-*) usage "unknown option $1"; exit 42;;
|
||||
*) usage "unknown or missing command $1"; exit 42;;
|
||||
-*)
|
||||
usage "unknown option $1"
|
||||
exit 42
|
||||
;;
|
||||
*)
|
||||
usage "unknown or missing command $1"
|
||||
exit 42
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
|
||||
build_all_containers() {
|
||||
rst_title "Build all LXC containers of suite"
|
||||
echo
|
||||
|
@ -310,11 +341,11 @@ build_container() {
|
|||
local image
|
||||
local boilerplate_script
|
||||
|
||||
for ((i=0; i<${#LXC_SUITE[@]}; i+=2)); do
|
||||
if [ "${LXC_HOST_PREFIX}-${LXC_SUITE[i+1]}" = "$1" ]; then
|
||||
for ((i = 0; i < ${#LXC_SUITE[@]}; i += 2)); do
|
||||
if [ "${LXC_HOST_PREFIX}-${LXC_SUITE[i + 1]}" = "$1" ]; then
|
||||
remote_image="${LXC_SUITE[i]}"
|
||||
container="${LXC_HOST_PREFIX}-${LXC_SUITE[i+1]}"
|
||||
image="${LXC_SUITE[i+1]}"
|
||||
container="${LXC_HOST_PREFIX}-${LXC_SUITE[i + 1]}"
|
||||
image="${LXC_SUITE[i + 1]}"
|
||||
boilerplate_script="${image}_boilerplate"
|
||||
boilerplate_script="${!boilerplate_script}"
|
||||
break
|
||||
|
@ -335,8 +366,8 @@ build_container() {
|
|||
lxc_install_boilerplate "${container}" "$boilerplate_script"
|
||||
echo
|
||||
rst_title "install LXC base packages" section
|
||||
lxc_exec_cmd "${container}" "${LXC_REPO_ROOT}/utils/lxc.sh" __install base \
|
||||
| prefix_stdout "[${_BBlue}${container}${_creset}] "
|
||||
lxc_exec_cmd "${container}" "${LXC_REPO_ROOT}/utils/lxc.sh" __install base |
|
||||
prefix_stdout "[${_BBlue}${container}${_creset}] "
|
||||
echo
|
||||
lxc list "$container"
|
||||
}
|
||||
|
@ -348,7 +379,7 @@ remove_containers() {
|
|||
lxc list "$LXC_HOST_PREFIX-"
|
||||
echo -en "\\n${_BRed}LXC containers to delete::${_creset}\\n\\n ${CONTAINERS[*]}\\n" | $FMT
|
||||
local default=Ny
|
||||
[[ $FORCE_TIMEOUT = 0 ]] && default=Yn
|
||||
[[ $FORCE_TIMEOUT == 0 ]] && default=Yn
|
||||
if ask_yn "Do you really want to delete these containers" $default; then
|
||||
for i in "${CONTAINERS[@]}"; do
|
||||
lxc_delete_container "$i"
|
||||
|
@ -363,8 +394,8 @@ remove_containers() {
|
|||
|
||||
lxc_copy_images_locally() {
|
||||
rst_title "copy images" section
|
||||
for ((i=0; i<${#LXC_SUITE[@]}; i+=2)); do
|
||||
lxc_image_copy "${LXC_SUITE[i]}" "${LXC_SUITE[i+1]}"
|
||||
for ((i = 0; i < ${#LXC_SUITE[@]}; i += 2)); do
|
||||
lxc_image_copy "${LXC_SUITE[i]}" "${LXC_SUITE[i + 1]}"
|
||||
done
|
||||
# lxc image list local: && wait_key
|
||||
}
|
||||
|
@ -391,7 +422,7 @@ lxc_delete_images_locally() {
|
|||
lxc image list local:
|
||||
}
|
||||
|
||||
show_images(){
|
||||
show_images() {
|
||||
rst_title "local images"
|
||||
echo
|
||||
lxc image list local:
|
||||
|
@ -408,11 +439,10 @@ show_images(){
|
|||
|
||||
}
|
||||
|
||||
|
||||
# container
|
||||
# ---------
|
||||
|
||||
show_suite(){
|
||||
show_suite() {
|
||||
rst_title "LXC suite ($LXC_HOST_PREFIX-*)"
|
||||
echo
|
||||
lxc list "$LXC_HOST_PREFIX-"
|
||||
|
@ -421,8 +451,8 @@ show_suite(){
|
|||
if ! lxc_exists "$i"; then
|
||||
warn_msg "container ${_BBlue}$i${_creset} does not yet exists"
|
||||
else
|
||||
lxc exec -t "${i}" -- "${LXC_REPO_ROOT}/utils/lxc.sh" __show suite \
|
||||
| prefix_stdout "[${_BBlue}${i}${_creset}] "
|
||||
lxc exec -t "${i}" -- "${LXC_REPO_ROOT}/utils/lxc.sh" __show suite |
|
||||
prefix_stdout "[${_BBlue}${i}${_creset}] "
|
||||
echo
|
||||
fi
|
||||
done
|
||||
|
@ -469,8 +499,8 @@ lxc_init_all_containers() {
|
|||
local image_name
|
||||
local container_name
|
||||
|
||||
for ((i=0; i<${#LXC_SUITE[@]}; i+=2)); do
|
||||
lxc_init_container "${LXC_SUITE[i+1]}" "${LXC_HOST_PREFIX}-${LXC_SUITE[i+1]}"
|
||||
for ((i = 0; i < ${#LXC_SUITE[@]}; i += 2)); do
|
||||
lxc_init_container "${LXC_SUITE[i + 1]}" "${LXC_HOST_PREFIX}-${LXC_SUITE[i + 1]}"
|
||||
done
|
||||
}
|
||||
|
||||
|
@ -487,8 +517,8 @@ lxc_config_container() {
|
|||
|
||||
info_msg "[${_BBlue}$1${_creset}] map uid/gid from host to container"
|
||||
# https://lxd.readthedocs.io/en/latest/userns-idmap/#custom-idmaps
|
||||
echo -e -n "uid $HOST_USER_ID 0\\ngid $HOST_GROUP_ID 0"\
|
||||
| lxc config set "$1" raw.idmap -
|
||||
echo -e -n "uid $HOST_USER_ID 0\\ngid $HOST_GROUP_ID 0" |
|
||||
lxc config set "$1" raw.idmap -
|
||||
|
||||
info_msg "[${_BBlue}$1${_creset}] share ${REPO_ROOT} (repo_share) from HOST into container"
|
||||
# https://lxd.readthedocs.io/en/latest/instances/#type-disk
|
||||
|
@ -504,15 +534,15 @@ lxc_boilerplate_all_containers() {
|
|||
local boilerplate_script
|
||||
local image_name
|
||||
|
||||
for ((i=0; i<${#LXC_SUITE[@]}; i+=2)); do
|
||||
for ((i = 0; i < ${#LXC_SUITE[@]}; i += 2)); do
|
||||
|
||||
image_name="${LXC_SUITE[i+1]}"
|
||||
image_name="${LXC_SUITE[i + 1]}"
|
||||
boilerplate_script="${image_name}_boilerplate"
|
||||
boilerplate_script="${!boilerplate_script}"
|
||||
|
||||
lxc_install_boilerplate "${LXC_HOST_PREFIX}-${image_name}" "$boilerplate_script"
|
||||
|
||||
if [[ -z "${boilerplate_script}" ]]; then
|
||||
if [[ -z ${boilerplate_script} ]]; then
|
||||
err_msg "[${_BBlue}${container_name}${_creset}] no boilerplate for image '${image_name}'"
|
||||
fi
|
||||
done
|
||||
|
@ -546,10 +576,10 @@ EOF
|
|||
if lxc start -q "${container_name}" &>/dev/null; then
|
||||
sleep 5 # guest needs some time to come up and get an IP
|
||||
fi
|
||||
if [[ -n "${boilerplate_script}" ]]; then
|
||||
echo "${boilerplate_script}" \
|
||||
| lxc exec "${container_name}" -- bash \
|
||||
| prefix_stdout "[${_BBlue}${container_name}${_creset}] "
|
||||
if [[ -n ${boilerplate_script} ]]; then
|
||||
echo "${boilerplate_script}" |
|
||||
lxc exec "${container_name}" -- bash |
|
||||
prefix_stdout "[${_BBlue}${container_name}${_creset}] "
|
||||
fi
|
||||
}
|
||||
|
||||
|
@ -568,6 +598,4 @@ check_connectivity() {
|
|||
return $ret_val
|
||||
}
|
||||
|
||||
# ----------------------------------------------------------------------------
|
||||
main "$@"
|
||||
# ----------------------------------------------------------------------------
|
||||
|
|
211
utils/searxng.sh
211
utils/searxng.sh
|
@ -46,7 +46,7 @@ if in_container; then
|
|||
SEARXNG_URL="http://$(primary_ip)/searxng"
|
||||
fi
|
||||
SEARXNG_URL_PATH="$(echo "${SEARXNG_URL}" | sed -e 's,^.*://[^/]*\(/.*\),\1,g')"
|
||||
[[ "${SEARXNG_URL_PATH}" == "${SEARXNG_URL}" ]] && SEARXNG_URL_PATH=/
|
||||
[[ ${SEARXNG_URL_PATH} == "${SEARXNG_URL}" ]] && SEARXNG_URL_PATH=/
|
||||
|
||||
# Apache settings
|
||||
|
||||
|
@ -98,7 +98,7 @@ case $DIST_ID-$DIST_VERS in
|
|||
SEARXNG_BUILD_PACKAGES="${SEARXNG_BUILD_PACKAGES_debian}"
|
||||
APACHE_PACKAGES="$APACHE_PACKAGES libapache2-mod-proxy-uwsgi"
|
||||
;;
|
||||
ubuntu-*|debian-*)
|
||||
ubuntu-* | debian-*)
|
||||
SEARXNG_PACKAGES="${SEARXNG_PACKAGES_debian} python-is-python3"
|
||||
SEARXNG_BUILD_PACKAGES="${SEARXNG_BUILD_PACKAGES_debian}"
|
||||
;;
|
||||
|
@ -114,9 +114,7 @@ esac
|
|||
|
||||
_service_prefix=" ${_Yellow}|${SERVICE_USER}|${_creset} "
|
||||
|
||||
# ----------------------------------------------------------------------------
|
||||
usage() {
|
||||
# ----------------------------------------------------------------------------
|
||||
|
||||
# shellcheck disable=SC1117
|
||||
cat <<EOF
|
||||
|
@ -148,7 +146,7 @@ instance:
|
|||
cmd : run command in SearXNG instance's environment (e.g. bash)
|
||||
EOF
|
||||
searxng.instance.env
|
||||
[[ -n ${1} ]] && err_msg "$1"
|
||||
[[ -n ${1} ]] && err_msg "$1"
|
||||
}
|
||||
|
||||
searxng.instance.env() {
|
||||
|
@ -170,52 +168,68 @@ EOF
|
|||
|
||||
main() {
|
||||
case $1 in
|
||||
install|remove|instance)
|
||||
install | remove | instance)
|
||||
nginx_distro_setup
|
||||
apache_distro_setup
|
||||
uWSGI_distro_setup
|
||||
required_commands \
|
||||
sudo systemctl install git wget curl \
|
||||
|| exit
|
||||
sudo systemctl install git wget curl ||
|
||||
exit
|
||||
;;
|
||||
esac
|
||||
|
||||
local _usage="unknown or missing $1 command $2"
|
||||
|
||||
case $1 in
|
||||
--getenv) var="$2"; echo "${!var}"; exit 0;;
|
||||
--cmd) shift; "$@";;
|
||||
-h|--help) usage; exit 0;;
|
||||
--getenv)
|
||||
var="$2"
|
||||
echo "${!var}"
|
||||
exit 0
|
||||
;;
|
||||
--cmd)
|
||||
shift
|
||||
"$@"
|
||||
;;
|
||||
-h | --help)
|
||||
usage
|
||||
exit 0
|
||||
;;
|
||||
install)
|
||||
sudo_or_exit
|
||||
case $2 in
|
||||
all) searxng.install.all;;
|
||||
user) searxng.install.user;;
|
||||
pyenv) searxng.install.pyenv;;
|
||||
searxng-src) searxng.install.clone;;
|
||||
settings) searxng.install.settings;;
|
||||
uwsgi) searxng.install.uwsgi;;
|
||||
packages) searxng.install.packages;;
|
||||
buildhost) searxng.install.buildhost;;
|
||||
nginx) searxng.nginx.install;;
|
||||
apache) searxng.apache.install;;
|
||||
valkey) searxng.install.valkey;;
|
||||
*) usage "$_usage"; exit 42;;
|
||||
all) searxng.install.all ;;
|
||||
user) searxng.install.user ;;
|
||||
pyenv) searxng.install.pyenv ;;
|
||||
searxng-src) searxng.install.clone ;;
|
||||
settings) searxng.install.settings ;;
|
||||
uwsgi) searxng.install.uwsgi ;;
|
||||
packages) searxng.install.packages ;;
|
||||
buildhost) searxng.install.buildhost ;;
|
||||
nginx) searxng.nginx.install ;;
|
||||
apache) searxng.apache.install ;;
|
||||
valkey) searxng.install.valkey ;;
|
||||
*)
|
||||
usage "$_usage"
|
||||
exit 42
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
remove)
|
||||
sudo_or_exit
|
||||
case $2 in
|
||||
all) searxng.remove.all;;
|
||||
user) drop_service_account "${SERVICE_USER}";;
|
||||
pyenv) searxng.remove.pyenv;;
|
||||
settings) searxng.remove.settings;;
|
||||
uwsgi) searxng.remove.uwsgi;;
|
||||
apache) searxng.apache.remove;;
|
||||
remove) searxng.nginx.remove;;
|
||||
valkey) searxng.remove.valkey;;
|
||||
redis) searxng.remove.redis;;
|
||||
*) usage "$_usage"; exit 42;;
|
||||
all) searxng.remove.all ;;
|
||||
user) drop_service_account "${SERVICE_USER}" ;;
|
||||
pyenv) searxng.remove.pyenv ;;
|
||||
settings) searxng.remove.settings ;;
|
||||
uwsgi) searxng.remove.uwsgi ;;
|
||||
apache) searxng.apache.remove ;;
|
||||
remove) searxng.nginx.remove ;;
|
||||
valkey) searxng.remove.valkey ;;
|
||||
redis) searxng.remove.redis ;;
|
||||
*)
|
||||
usage "$_usage"
|
||||
exit 42
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
instance)
|
||||
|
@ -234,19 +248,30 @@ main() {
|
|||
;;
|
||||
cmd)
|
||||
sudo_or_exit
|
||||
shift; shift; searxng.instance.exec "$@"
|
||||
shift
|
||||
shift
|
||||
searxng.instance.exec "$@"
|
||||
;;
|
||||
get_setting)
|
||||
shift; shift; searxng.instance.get_setting "$@"
|
||||
shift
|
||||
shift
|
||||
searxng.instance.get_setting "$@"
|
||||
;;
|
||||
call)
|
||||
# call a function in instance's environment
|
||||
shift; shift; searxng.instance.self.call "$@"
|
||||
shift
|
||||
shift
|
||||
searxng.instance.self.call "$@"
|
||||
;;
|
||||
_call)
|
||||
shift; shift; "$@"
|
||||
shift
|
||||
shift
|
||||
"$@"
|
||||
;;
|
||||
*)
|
||||
usage "$_usage"
|
||||
exit 42
|
||||
;;
|
||||
*) usage "$_usage"; exit 42;;
|
||||
esac
|
||||
;;
|
||||
*)
|
||||
|
@ -314,7 +339,7 @@ In your instance, valkey DB connector is configured at:
|
|||
fi
|
||||
fi
|
||||
|
||||
if ! [[ ${valkey_url} = valkey://localhost:6379/* ]]; then
|
||||
if ! [[ ${valkey_url} == valkey://localhost:6379/* ]]; then
|
||||
err_msg "SearXNG instance can't connect valkey DB / check valkey & your settings"
|
||||
return
|
||||
fi
|
||||
|
@ -352,7 +377,7 @@ searxng.remove.all() {
|
|||
fi
|
||||
|
||||
valkey_url=$(searxng.instance.get_setting valkey.url)
|
||||
if ! [[ ${valkey_url} = unix://${VALKEY_HOME}/run/valkey.sock* ]]; then
|
||||
if ! [[ ${valkey_url} == unix://${VALKEY_HOME}/run/valkey.sock* ]]; then
|
||||
searxng.remove.valkey
|
||||
fi
|
||||
|
||||
|
@ -369,9 +394,9 @@ searxng.remove.all() {
|
|||
searxng.install.user() {
|
||||
rst_title "SearXNG -- install user" section
|
||||
echo
|
||||
if getent passwd "${SERVICE_USER}" > /dev/null; then
|
||||
echo "user already exists"
|
||||
return 0
|
||||
if getent passwd "${SERVICE_USER}" >/dev/null; then
|
||||
echo "user already exists"
|
||||
return 0
|
||||
fi
|
||||
|
||||
tee_stderr 1 <<EOF | bash | prefix_stdout
|
||||
|
@ -390,7 +415,7 @@ searxng.install.packages() {
|
|||
|
||||
searxng.install.buildhost() {
|
||||
TITLE="SearXNG -- install buildhost packages" pkg_install \
|
||||
"${SEARXNG_PACKAGES} ${SEARXNG_BUILD_PACKAGES}"
|
||||
"${SEARXNG_PACKAGES} ${SEARXNG_BUILD_PACKAGES}"
|
||||
}
|
||||
|
||||
searxng.install.clone() {
|
||||
|
@ -399,11 +424,11 @@ searxng.install.clone() {
|
|||
die 42 "To clone SearXNG, first install user ${SERVICE_USER}."
|
||||
fi
|
||||
echo
|
||||
if ! sudo -i -u "${SERVICE_USER}" ls -d "$REPO_ROOT" > /dev/null; then
|
||||
if ! sudo -i -u "${SERVICE_USER}" ls -d "$REPO_ROOT" >/dev/null; then
|
||||
die 42 "user '${SERVICE_USER}' missed read permission: $REPO_ROOT"
|
||||
fi
|
||||
# SERVICE_HOME="$(sudo -i -u "${SERVICE_USER}" echo \$HOME 2>/dev/null)"
|
||||
if [[ ! "${SERVICE_HOME}" ]]; then
|
||||
if [[ ! ${SERVICE_HOME} ]]; then
|
||||
err_msg "to clone SearXNG sources, user ${SERVICE_USER} hast to be created first"
|
||||
return 42
|
||||
fi
|
||||
|
@ -412,7 +437,7 @@ searxng.install.clone() {
|
|||
info_msg "create local branch ${GIT_BRANCH} from start point: origin/${GIT_BRANCH}"
|
||||
git branch "${GIT_BRANCH}" "origin/${GIT_BRANCH}"
|
||||
fi
|
||||
if [[ ! $(git rev-parse --abbrev-ref HEAD) == "${GIT_BRANCH}" ]]; then
|
||||
if [[ $(git rev-parse --abbrev-ref HEAD) != "${GIT_BRANCH}" ]]; then
|
||||
warn_msg "take into account, installing branch $GIT_BRANCH while current branch is $(git rev-parse --abbrev-ref HEAD)"
|
||||
fi
|
||||
# export SERVICE_HOME
|
||||
|
@ -421,10 +446,10 @@ searxng.install.clone() {
|
|||
# https://github.com/searxng/searxng/issues/1251
|
||||
git config --system --add safe.directory "${REPO_ROOT}/.git"
|
||||
git_clone "$REPO_ROOT" "${SEARXNG_SRC}" \
|
||||
"$GIT_BRANCH" "${SERVICE_USER}"
|
||||
"$GIT_BRANCH" "${SERVICE_USER}"
|
||||
git config --system --add safe.directory "${SEARXNG_SRC}"
|
||||
|
||||
pushd "${SEARXNG_SRC}" > /dev/null
|
||||
pushd "${SEARXNG_SRC}" >/dev/null
|
||||
tee_stderr 0.1 <<EOF | sudo -H -u "${SERVICE_USER}" -i 2>&1 | prefix_stdout "$_service_prefix"
|
||||
cd "${SEARXNG_SRC}"
|
||||
git remote set-url origin ${GIT_URL}
|
||||
|
@ -432,7 +457,7 @@ git config user.email "${ADMIN_EMAIL}"
|
|||
git config user.name "${ADMIN_NAME}"
|
||||
git config --list
|
||||
EOF
|
||||
popd > /dev/null
|
||||
popd >/dev/null
|
||||
}
|
||||
|
||||
searxng.install.link_src() {
|
||||
|
@ -482,7 +507,7 @@ searxng.remove.pyenv() {
|
|||
return
|
||||
fi
|
||||
info_msg "remove pyenv activation from ~/.profile"
|
||||
tee_stderr 0.1 <<EOF | sudo -H -u "${SERVICE_USER}" -i 2>&1 | prefix_stdout "$_service_prefix"
|
||||
tee_stderr 0.1 <<EOF | sudo -H -u "${SERVICE_USER}" -i 2>&1 | prefix_stdout "$_service_prefix"
|
||||
grep -v 'source ${SEARXNG_PYENV}/bin/activate' ~/.profile > ~/.profile.##
|
||||
mv ~/.profile.## ~/.profile
|
||||
EOF
|
||||
|
@ -499,9 +524,9 @@ searxng.install.settings() {
|
|||
mkdir -p "$(dirname "${SEARXNG_SETTINGS_PATH}")"
|
||||
|
||||
DEFAULT_SELECT=1 \
|
||||
install_template --no-eval \
|
||||
"${SEARXNG_SETTINGS_PATH}" \
|
||||
"${SERVICE_USER}" "${SERVICE_GROUP}"
|
||||
install_template --no-eval \
|
||||
"${SEARXNG_SETTINGS_PATH}" \
|
||||
"${SERVICE_USER}" "${SERVICE_GROUP}"
|
||||
|
||||
tee_stderr 0.1 <<EOF | sudo -H -i 2>&1 | prefix_stdout "root"
|
||||
sed -i -e "s/ultrasecretkey/$(openssl rand -hex 16)/g" "${SEARXNG_SETTINGS_PATH}"
|
||||
|
@ -535,9 +560,9 @@ pip install -U --use-pep517 --no-build-isolation -e .
|
|||
EOF
|
||||
rst_para "update instance's settings.yml from ${SEARXNG_SETTINGS_PATH}"
|
||||
DEFAULT_SELECT=2 \
|
||||
install_template --no-eval \
|
||||
"${SEARXNG_SETTINGS_PATH}" \
|
||||
"${SERVICE_USER}" "${SERVICE_GROUP}"
|
||||
install_template --no-eval \
|
||||
"${SEARXNG_SETTINGS_PATH}" \
|
||||
"${SERVICE_USER}" "${SERVICE_GROUP}"
|
||||
|
||||
sudo -H -i <<EOF
|
||||
sed -i -e "s/ultrasecretkey/$(openssl rand -hex 16)/g" "${SEARXNG_SETTINGS_PATH}"
|
||||
|
@ -574,10 +599,10 @@ searxng.install.uwsgi.socket() {
|
|||
# Emperor will run the vassal using the UID/GID of the vassal
|
||||
# configuration file [1] (user and group of the app .ini file).
|
||||
# [1] https://uwsgi-docs.readthedocs.io/en/latest/Emperor.html#tyrant-mode-secure-multi-user-hosting
|
||||
uWSGI_install_app --variant=socket "${SEARXNG_UWSGI_APP}" "${SERVICE_USER}" "${SERVICE_GROUP}"
|
||||
uWSGI_install_app --variant=socket "${SEARXNG_UWSGI_APP}" "${SERVICE_USER}" "${SERVICE_GROUP}"
|
||||
;;
|
||||
*)
|
||||
uWSGI_install_app --variant=socket "${SEARXNG_UWSGI_APP}"
|
||||
uWSGI_install_app --variant=socket "${SEARXNG_UWSGI_APP}"
|
||||
;;
|
||||
esac
|
||||
sleep 5
|
||||
|
@ -588,9 +613,9 @@ searxng.install.uwsgi.socket() {
|
|||
|
||||
searxng.uwsgi.available() {
|
||||
if [[ ${SEARXNG_UWSGI_USE_SOCKET} == true ]]; then
|
||||
[[ -S "${SEARXNG_UWSGI_SOCKET}" ]]
|
||||
[[ -S ${SEARXNG_UWSGI_SOCKET} ]]
|
||||
exit_val=$?
|
||||
if [[ $exit_val = 0 ]]; then
|
||||
if [[ $exit_val == 0 ]]; then
|
||||
info_msg "uWSGI socket is located at: ${SEARXNG_UWSGI_SOCKET}"
|
||||
fi
|
||||
else
|
||||
|
@ -617,11 +642,10 @@ searxng.install.valkey() {
|
|||
valkey.install
|
||||
}
|
||||
|
||||
|
||||
searxng.instance.localtest() {
|
||||
rst_title "Test SearXNG instance locally" section
|
||||
rst_para "Activate debug mode, start a minimal SearXNG "\
|
||||
"service and debug a HTTP request/response cycle."
|
||||
rst_para "Activate debug mode, start a minimal SearXNG " \
|
||||
"service and debug a HTTP request/response cycle."
|
||||
|
||||
if service_is_available "http://${SEARXNG_INTERNAL_HTTP}" &>/dev/null; then
|
||||
err_msg "URL/port http://${SEARXNG_INTERNAL_HTTP} is already in use, you"
|
||||
|
@ -632,7 +656,7 @@ searxng.instance.localtest() {
|
|||
fi
|
||||
echo
|
||||
searxng.instance.debug.on
|
||||
tee_stderr 0.1 <<EOF | sudo -H -u "${SERVICE_USER}" -i 2>&1 | prefix_stdout "$_service_prefix"
|
||||
tee_stderr 0.1 <<EOF | sudo -H -u "${SERVICE_USER}" -i 2>&1 | prefix_stdout "$_service_prefix"
|
||||
export SEARXNG_SETTINGS_PATH="${SEARXNG_SETTINGS_PATH}"
|
||||
cd ${SEARXNG_SRC}
|
||||
timeout 10 python searx/webapp.py &
|
||||
|
@ -714,7 +738,7 @@ This installs SearXNG's uWSGI app as Nginx site. The Nginx site is located at:
|
|||
${NGINX_APPS_AVAILABLE}/${NGINX_SEARXNG_SITE} and requires a uWSGI."
|
||||
searxng.install.http.pre
|
||||
|
||||
if ! nginx_is_installed ; then
|
||||
if ! nginx_is_installed; then
|
||||
err_msg "Nginx packages are not installed"
|
||||
if ! ask_yn "Do you really want to continue and install Nginx packages?" Yn; then
|
||||
return
|
||||
|
@ -755,8 +779,8 @@ searxng.instance.exec() {
|
|||
die 42 "can't execute: instance does not exist (missed account ${SERVICE_USER})"
|
||||
fi
|
||||
sudo -H -i -u "${SERVICE_USER}" \
|
||||
SEARXNG_UWSGI_USE_SOCKET="${SEARXNG_UWSGI_USE_SOCKET}" \
|
||||
"$@"
|
||||
SEARXNG_UWSGI_USE_SOCKET="${SEARXNG_UWSGI_USE_SOCKET}" \
|
||||
"$@"
|
||||
}
|
||||
|
||||
searxng.instance.self.call() {
|
||||
|
@ -775,7 +799,7 @@ EOF
|
|||
searxng.instance.debug.on() {
|
||||
warn_msg "Do not enable debug in a production environment!"
|
||||
info_msg "try to enable debug mode ..."
|
||||
tee_stderr 0.1 <<EOF | sudo -H -i 2>&1 | prefix_stdout "$_service_prefix"
|
||||
tee_stderr 0.1 <<EOF | sudo -H -i 2>&1 | prefix_stdout "$_service_prefix"
|
||||
cd ${SEARXNG_SRC}
|
||||
sed -i -e "s/debug: false/debug: true/g" "$SEARXNG_SETTINGS_PATH"
|
||||
EOF
|
||||
|
@ -784,7 +808,7 @@ EOF
|
|||
|
||||
searxng.instance.debug.off() {
|
||||
info_msg "try to disable debug mode ..."
|
||||
tee_stderr 0.1 <<EOF | sudo -H -i 2>&1 | prefix_stdout "$_service_prefix"
|
||||
tee_stderr 0.1 <<EOF | sudo -H -i 2>&1 | prefix_stdout "$_service_prefix"
|
||||
cd ${SEARXNG_SRC}
|
||||
sed -i -e "s/debug: true/debug: false/g" "$SEARXNG_SETTINGS_PATH"
|
||||
EOF
|
||||
|
@ -805,7 +829,7 @@ searxng.instance.inspect() {
|
|||
echo
|
||||
|
||||
case $DIST_ID-$DIST_VERS in
|
||||
ubuntu-*|debian-*)
|
||||
ubuntu-* | debian-*)
|
||||
# For uWSGI debian uses the LSB init process; for each configuration
|
||||
# file new uWSGI daemon instance is started with additional option.
|
||||
service uwsgi status "${SERVICE_NAME}"
|
||||
|
@ -818,16 +842,16 @@ searxng.instance.inspect() {
|
|||
;;
|
||||
esac
|
||||
|
||||
echo -e "// use ${_BCyan}CTRL-C${_creset} to stop monitoring the log"
|
||||
echo -e "// use ${_BCyan}CTRL-C${_creset} to stop monitoring the log"
|
||||
read -r -s -n1 -t 5
|
||||
echo
|
||||
|
||||
while true; do
|
||||
while true; do
|
||||
trap break 2
|
||||
case $DIST_ID-$DIST_VERS in
|
||||
ubuntu-*|debian-*) tail -f "/var/log/uwsgi/app/${SERVICE_NAME%.*}.log" ;;
|
||||
arch-*) journalctl -f -u "uwsgi@${SERVICE_NAME%.*}" ;;
|
||||
fedora-*) journalctl -f -u uwsgi ;;
|
||||
ubuntu-* | debian-*) tail -f "/var/log/uwsgi/app/${SERVICE_NAME%.*}.log" ;;
|
||||
arch-*) journalctl -f -u "uwsgi@${SERVICE_NAME%.*}" ;;
|
||||
fedora-*) journalctl -f -u uwsgi ;;
|
||||
esac
|
||||
done
|
||||
|
||||
|
@ -870,10 +894,10 @@ searxng.doc.rst() {
|
|||
local arch_build="${SEARXNG_BUILD_PACKAGES_arch}"
|
||||
local fedora_build="${SEARXNG_BUILD_PACKAGES_fedora}"
|
||||
debian="$(echo "${debian}" | sed 's/.*/ & \\/' | sed '$ s/.$//')"
|
||||
arch="$(echo "${arch}" | sed 's/.*/ & \\/' | sed '$ s/.$//')"
|
||||
arch="$(echo "${arch}" | sed 's/.*/ & \\/' | sed '$ s/.$//')"
|
||||
fedora="$(echo "${fedora}" | sed 's/.*/ & \\/' | sed '$ s/.$//')"
|
||||
debian_build="$(echo "${debian_build}" | sed 's/.*/ & \\/' | sed '$ s/.$//')"
|
||||
arch_build="$(echo "${arch_build}" | sed 's/.*/ & \\/' | sed '$ s/.$//')"
|
||||
arch_build="$(echo "${arch_build}" | sed 's/.*/ & \\/' | sed '$ s/.$//')"
|
||||
fedora_build="$(echo "${fedora_build}" | sed 's/.*/ & \\/' | sed '$ s/.$//')"
|
||||
|
||||
if [[ ${SEARXNG_UWSGI_USE_SOCKET} == true ]]; then
|
||||
|
@ -882,7 +906,7 @@ searxng.doc.rst() {
|
|||
uwsgi_variant=':socket'
|
||||
fi
|
||||
|
||||
eval "echo \"$(< "${REPO_ROOT}/docs/build-templates/searxng.rst")\""
|
||||
eval "echo \"$(<"${REPO_ROOT}/docs/build-templates/searxng.rst")\""
|
||||
|
||||
# I use ubuntu-20.04 here to demonstrate that versions are also supported,
|
||||
# normally debian-* and ubuntu-* are most the same.
|
||||
|
@ -897,7 +921,8 @@ searxng.doc.rst() {
|
|||
echo -e "\n.. START searxng uwsgi-description $DIST_NAME"
|
||||
|
||||
case $DIST_ID-$DIST_VERS in
|
||||
ubuntu-*|debian-*) cat <<EOF
|
||||
ubuntu-* | debian-*)
|
||||
cat <<EOF
|
||||
|
||||
.. code:: bash
|
||||
|
||||
|
@ -913,8 +938,9 @@ searxng.doc.rst() {
|
|||
disable: sudo -H rm ${uWSGI_APPS_ENABLED}/${SEARXNG_UWSGI_APP}
|
||||
|
||||
EOF
|
||||
;;
|
||||
arch-*) cat <<EOF
|
||||
;;
|
||||
arch-*)
|
||||
cat <<EOF
|
||||
|
||||
.. code:: bash
|
||||
|
||||
|
@ -931,8 +957,9 @@ EOF
|
|||
disable: sudo -H systemctl disable uwsgi@${SEARXNG_UWSGI_APP%.*}
|
||||
|
||||
EOF
|
||||
;;
|
||||
fedora-*|centos-7) cat <<EOF
|
||||
;;
|
||||
fedora-* | centos-7)
|
||||
cat <<EOF
|
||||
|
||||
.. code:: bash
|
||||
|
||||
|
@ -945,46 +972,44 @@ EOF
|
|||
disable: sudo -H rm ${uWSGI_APPS_ENABLED}/${SEARXNG_UWSGI_APP}
|
||||
|
||||
EOF
|
||||
;;
|
||||
;;
|
||||
esac
|
||||
echo -e ".. END searxng uwsgi-description $DIST_NAME"
|
||||
|
||||
local _show_cursor="" # prevent from prefix_stdout's trailing show-cursor
|
||||
local _show_cursor="" # prevent from prefix_stdout's trailing show-cursor
|
||||
|
||||
echo -e "\n.. START searxng uwsgi-appini $DIST_NAME"
|
||||
echo ".. code:: bash"
|
||||
echo
|
||||
eval "echo \"$(< "${TEMPLATES}/${uWSGI_APPS_AVAILABLE}/${SEARXNG_UWSGI_APP}${uwsgi_variant}")\"" | prefix_stdout " "
|
||||
eval "echo \"$(<"${TEMPLATES}/${uWSGI_APPS_AVAILABLE}/${SEARXNG_UWSGI_APP}${uwsgi_variant}")\"" | prefix_stdout " "
|
||||
echo -e "\n.. END searxng uwsgi-appini $DIST_NAME"
|
||||
|
||||
echo -e "\n.. START nginx socket"
|
||||
echo ".. code:: nginx"
|
||||
echo
|
||||
eval "echo \"$(< "${TEMPLATES}/${NGINX_APPS_AVAILABLE}/${NGINX_SEARXNG_SITE}:socket")\"" | prefix_stdout " "
|
||||
eval "echo \"$(<"${TEMPLATES}/${NGINX_APPS_AVAILABLE}/${NGINX_SEARXNG_SITE}:socket")\"" | prefix_stdout " "
|
||||
echo -e "\n.. END nginx socket"
|
||||
|
||||
echo -e "\n.. START nginx http"
|
||||
echo ".. code:: nginx"
|
||||
echo
|
||||
eval "echo \"$(< "${TEMPLATES}/${NGINX_APPS_AVAILABLE}/${NGINX_SEARXNG_SITE}")\"" | prefix_stdout " "
|
||||
eval "echo \"$(<"${TEMPLATES}/${NGINX_APPS_AVAILABLE}/${NGINX_SEARXNG_SITE}")\"" | prefix_stdout " "
|
||||
echo -e "\n.. END nginx http"
|
||||
|
||||
echo -e "\n.. START apache socket"
|
||||
echo ".. code:: apache"
|
||||
echo
|
||||
eval "echo \"$(< "${TEMPLATES}/${APACHE_SITES_AVAILABLE}/${APACHE_SEARXNG_SITE}:socket")\"" | prefix_stdout " "
|
||||
eval "echo \"$(<"${TEMPLATES}/${APACHE_SITES_AVAILABLE}/${APACHE_SEARXNG_SITE}:socket")\"" | prefix_stdout " "
|
||||
echo -e "\n.. END apache socket"
|
||||
|
||||
echo -e "\n.. START apache http"
|
||||
echo ".. code:: apache"
|
||||
echo
|
||||
eval "echo \"$(< "${TEMPLATES}/${APACHE_SITES_AVAILABLE}/${APACHE_SEARXNG_SITE}")\"" | prefix_stdout " "
|
||||
eval "echo \"$(<"${TEMPLATES}/${APACHE_SITES_AVAILABLE}/${APACHE_SEARXNG_SITE}")\"" | prefix_stdout " "
|
||||
echo -e "\n.. END apache http"
|
||||
)
|
||||
done
|
||||
|
||||
}
|
||||
|
||||
# ----------------------------------------------------------------------------
|
||||
main "$@"
|
||||
# ----------------------------------------------------------------------------
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue