mirror of
https://github.com/searxng/searxng.git
synced 2025-08-05 03:16:46 +02:00
[fix] sh: apply format
Related https://github.com/searxng/searxng/issues/4803
This commit is contained in:
parent
2311d16497
commit
e9ecdcc350
18 changed files with 644 additions and 548 deletions
52
manage
52
manage
|
@ -65,7 +65,7 @@ while IFS= read -r line; do
|
|||
if [ "$line" != "tests/unit/settings/syntaxerror_settings.yml" ]; then
|
||||
YAMLLINT_FILES+=("$line")
|
||||
fi
|
||||
done <<< "$(git ls-files './tests/*.yml' './searx/*.yml' './utils/templates/etc/searxng/*.yml' '.github/*.yml' '.github/*/*.yml')"
|
||||
done <<<"$(git ls-files './tests/*.yml' './searx/*.yml' './utils/templates/etc/searxng/*.yml' '.github/*.yml' '.github/*/*.yml')"
|
||||
|
||||
RST_FILES=(
|
||||
'README.rst'
|
||||
|
@ -113,7 +113,6 @@ environment ...
|
|||
EOF
|
||||
}
|
||||
|
||||
|
||||
if [ "$VERBOSE" = "1" ]; then
|
||||
SPHINX_VERBOSE="-v"
|
||||
PYLINT_VERBOSE="-v"
|
||||
|
@ -126,14 +125,14 @@ webapp.run() {
|
|||
local parent_proc="$$"
|
||||
(
|
||||
if [ "${LIVE_THEME}" ]; then
|
||||
( themes.live "${LIVE_THEME}" )
|
||||
(themes.live "${LIVE_THEME}")
|
||||
kill $parent_proc
|
||||
fi
|
||||
)&
|
||||
) &
|
||||
(
|
||||
sleep 3
|
||||
xdg-open http://127.0.0.1:8888/
|
||||
)&
|
||||
) &
|
||||
SEARXNG_DEBUG=1 pyenv.cmd python -m searx.webapp
|
||||
}
|
||||
|
||||
|
@ -143,10 +142,11 @@ gecko.driver() {
|
|||
|
||||
build_msg INSTALL "gecko.driver"
|
||||
# run installation in a subprocess and activate pyenv
|
||||
( set -e
|
||||
(
|
||||
set -e
|
||||
pyenv.activate
|
||||
|
||||
INSTALLED_VERSION=$(geckodriver -V 2> /dev/null | head -1 | awk '{ print "v" $2}') || INSTALLED_VERSION=""
|
||||
INSTALLED_VERSION=$(geckodriver -V 2>/dev/null | head -1 | awk '{ print "v" $2}') || INSTALLED_VERSION=""
|
||||
set +e
|
||||
if [ "${INSTALLED_VERSION}" = "${GECKODRIVER_VERSION}" ]; then
|
||||
build_msg INSTALL "geckodriver already installed"
|
||||
|
@ -154,13 +154,13 @@ gecko.driver() {
|
|||
fi
|
||||
PLATFORM="$(python -c 'import platform; print(platform.system().lower(), platform.architecture()[0])')"
|
||||
case "$PLATFORM" in
|
||||
"linux 32bit" | "linux2 32bit") ARCH="linux32";;
|
||||
"linux 64bit" | "linux2 64bit") ARCH="linux64";;
|
||||
"windows 32 bit") ARCH="win32";;
|
||||
"windows 64 bit") ARCH="win64";;
|
||||
"mac 64bit") ARCH="macos";;
|
||||
"linux 32bit" | "linux2 32bit") ARCH="linux32" ;;
|
||||
"linux 64bit" | "linux2 64bit") ARCH="linux64" ;;
|
||||
"windows 32 bit") ARCH="win32" ;;
|
||||
"windows 64 bit") ARCH="win64" ;;
|
||||
"mac 64bit") ARCH="macos" ;;
|
||||
esac
|
||||
GECKODRIVER_URL="https://github.com/mozilla/geckodriver/releases/download/$GECKODRIVER_VERSION/geckodriver-$GECKODRIVER_VERSION-$ARCH.tar.gz";
|
||||
GECKODRIVER_URL="https://github.com/mozilla/geckodriver/releases/download/$GECKODRIVER_VERSION/geckodriver-$GECKODRIVER_VERSION-$ARCH.tar.gz"
|
||||
|
||||
build_msg GECKO "Installing ${PY_ENV_BIN}/geckodriver from $GECKODRIVER_URL"
|
||||
|
||||
|
@ -181,7 +181,8 @@ py.build() {
|
|||
|
||||
py.clean() {
|
||||
build_msg CLEAN pyenv
|
||||
( set -e
|
||||
(
|
||||
set -e
|
||||
pyenv.drop
|
||||
[ "$VERBOSE" = "1" ] && set -x
|
||||
rm -rf "${PYDIST}" "${PYBUILD}" "${PY_ENV}" ./.tox ./*.egg-info
|
||||
|
@ -201,13 +202,14 @@ EOF
|
|||
pyenv.install() {
|
||||
|
||||
if ! pyenv.OK; then
|
||||
py.clean > /dev/null
|
||||
py.clean >/dev/null
|
||||
fi
|
||||
if pyenv.install.OK > /dev/null; then
|
||||
if pyenv.install.OK >/dev/null; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
( set -e
|
||||
(
|
||||
set -e
|
||||
pyenv
|
||||
build_msg PYENV "[install] pip install --use-pep517 --no-build-isolation -e 'searx${PY_SETUP_EXTRAS}'"
|
||||
"${PY_ENV_BIN}/python" -m pip install --use-pep517 --no-build-isolation -e ".${PY_SETUP_EXTRAS}"
|
||||
|
@ -220,8 +222,8 @@ pyenv.install() {
|
|||
|
||||
pyenv.uninstall() {
|
||||
build_msg PYENV "[pyenv.uninstall] uninstall packages: ${PYOBJECTS}"
|
||||
pyenv.cmd python setup.py develop --uninstall 2>&1 \
|
||||
| prefix_stdout "${_Blue}PYENV ${_creset}[pyenv.uninstall] "
|
||||
pyenv.cmd python setup.py develop --uninstall 2>&1 |
|
||||
prefix_stdout "${_Blue}PYENV ${_creset}[pyenv.uninstall] "
|
||||
|
||||
}
|
||||
|
||||
|
@ -243,7 +245,7 @@ docs.prebuild() {
|
|||
set -e
|
||||
[ "$VERBOSE" = "1" ] && set -x
|
||||
mkdir -p "${DOCS_BUILD}/includes"
|
||||
./utils/searxng.sh searxng.doc.rst > "${DOCS_BUILD}/includes/searxng.rst"
|
||||
./utils/searxng.sh searxng.doc.rst >"${DOCS_BUILD}/includes/searxng.rst"
|
||||
pyenv.cmd searxng_extra/docs_prebuild
|
||||
)
|
||||
dump_return $?
|
||||
|
@ -253,7 +255,8 @@ docs.prebuild() {
|
|||
main() {
|
||||
|
||||
local _type
|
||||
local cmd="$1"; shift
|
||||
local cmd="$1"
|
||||
shift
|
||||
|
||||
if [ "$cmd" == "" ]; then
|
||||
help
|
||||
|
@ -262,8 +265,11 @@ main() {
|
|||
fi
|
||||
|
||||
case "$cmd" in
|
||||
--getenv) var="$1"; echo "${!var}";;
|
||||
--help) help;;
|
||||
--getenv)
|
||||
var="$1"
|
||||
echo "${!var}"
|
||||
;;
|
||||
--help) help ;;
|
||||
--*)
|
||||
help
|
||||
err_msg "unknown option $cmd"
|
||||
|
|
|
@ -8,11 +8,11 @@ build.env.export() {
|
|||
GIT_BRANCH="$(git branch | grep '\*' | cut -d' ' -f2-)"
|
||||
GIT_REMOTE="$(git config "branch.${GIT_BRANCH}.remote")"
|
||||
GIT_URL="$(git config --get "remote.${GIT_REMOTE}.url")"
|
||||
if [[ "${GIT_URL}" == git@* ]]; then
|
||||
if [[ ${GIT_URL} == git@* ]]; then
|
||||
GIT_URL="${GIT_URL/://}"
|
||||
GIT_URL="${GIT_URL/git@/https://}"
|
||||
fi
|
||||
if [[ "${GIT_URL}" == *.git ]]; then
|
||||
if [[ ${GIT_URL} == *.git ]]; then
|
||||
GIT_URL="${GIT_URL%.git}"
|
||||
fi
|
||||
|
||||
|
@ -27,6 +27,6 @@ build.env.export() {
|
|||
|
||||
}
|
||||
|
||||
pushd "${REPO_ROOT}" &> /dev/null
|
||||
pushd "${REPO_ROOT}" &>/dev/null
|
||||
build.env.export
|
||||
popd &> /dev/null
|
||||
popd &>/dev/null
|
||||
|
|
293
utils/lib.sh
293
utils/lib.sh
|
@ -4,11 +4,20 @@
|
|||
# shellcheck disable=SC2059,SC1117
|
||||
|
||||
# ubuntu, debian, arch, fedora, centos ...
|
||||
DIST_ID=$(source /etc/os-release; echo "$ID");
|
||||
DIST_ID=$(
|
||||
source /etc/os-release
|
||||
echo "$ID"
|
||||
)
|
||||
# shellcheck disable=SC2034
|
||||
DIST_VERS=$(source /etc/os-release; echo "$VERSION_ID");
|
||||
DIST_VERS=$(
|
||||
source /etc/os-release
|
||||
echo "$VERSION_ID"
|
||||
)
|
||||
# shellcheck disable=SC2034
|
||||
DIST_VERSION_CODENAME=$(source /etc/os-release; echo "$VERSION_CODENAME");
|
||||
DIST_VERSION_CODENAME=$(
|
||||
source /etc/os-release
|
||||
echo "$VERSION_CODENAME"
|
||||
)
|
||||
|
||||
ADMIN_NAME="${ADMIN_NAME:-$(git config user.name)}"
|
||||
ADMIN_NAME="${ADMIN_NAME:-$USER}"
|
||||
|
@ -16,19 +25,19 @@ ADMIN_NAME="${ADMIN_NAME:-$USER}"
|
|||
ADMIN_EMAIL="${ADMIN_EMAIL:-$(git config user.email)}"
|
||||
ADMIN_EMAIL="${ADMIN_EMAIL:-$USER@$(hostname)}"
|
||||
|
||||
if [[ -z "${REPO_ROOT}" ]]; then
|
||||
if [[ -z ${REPO_ROOT} ]]; then
|
||||
REPO_ROOT=$(dirname "${BASH_SOURCE[0]}")
|
||||
while [ -h "${REPO_ROOT}" ] ; do
|
||||
while [ -h "${REPO_ROOT}" ]; do
|
||||
REPO_ROOT=$(readlink "${REPO_ROOT}")
|
||||
done
|
||||
REPO_ROOT=$(cd "${REPO_ROOT}/.." && pwd -P )
|
||||
REPO_ROOT=$(cd "${REPO_ROOT}/.." && pwd -P)
|
||||
fi
|
||||
|
||||
if [[ -z ${TEMPLATES} ]]; then
|
||||
TEMPLATES="${REPO_ROOT}/utils/templates"
|
||||
fi
|
||||
|
||||
if [[ -z "$CACHE" ]]; then
|
||||
if [[ -z $CACHE ]]; then
|
||||
CACHE="${REPO_ROOT}/cache"
|
||||
fi
|
||||
|
||||
|
@ -42,7 +51,7 @@ fi
|
|||
DOT_CONFIG="${DOT_CONFIG:-${REPO_ROOT}/.config.sh}"
|
||||
|
||||
source_dot_config() {
|
||||
if [[ ! -e "${DOT_CONFIG}" ]]; then
|
||||
if [[ ! -e ${DOT_CONFIG} ]]; then
|
||||
err_msg "configuration does not exists at: ${DOT_CONFIG}"
|
||||
return 42
|
||||
fi
|
||||
|
@ -128,9 +137,9 @@ rst_title() {
|
|||
# usage: rst_title <header-text> [part|chapter|section]
|
||||
|
||||
case ${2-chapter} in
|
||||
part) printf "\n${_BGreen}${1//?/=}${_creset}\n${_BCyan}${1}${_creset}\n${_BGreen}${1//?/=}${_creset}\n";;
|
||||
chapter) printf "\n${_BCyan}${1}${_creset}\n${_BGreen}${1//?/=}${_creset}\n";;
|
||||
section) printf "\n${_BCyan}${1}${_creset}\n${_BGreen}${1//?/-}${_creset}\n";;
|
||||
part) printf "\n${_BGreen}${1//?/=}${_creset}\n${_BCyan}${1}${_creset}\n${_BGreen}${1//?/=}${_creset}\n" ;;
|
||||
chapter) printf "\n${_BCyan}${1}${_creset}\n${_BGreen}${1//?/=}${_creset}\n" ;;
|
||||
section) printf "\n${_BCyan}${1}${_creset}\n${_BGreen}${1//?/-}${_creset}\n" ;;
|
||||
*)
|
||||
err_msg "invalid argument '${2}' in line $(caller)"
|
||||
return 42
|
||||
|
@ -150,12 +159,12 @@ rst_para() {
|
|||
}
|
||||
|
||||
die() {
|
||||
echo -e "${_BRed}ERROR:${_creset} ${BASH_SOURCE[1]}: line ${BASH_LINENO[0]}: ${2-died ${1-1}}" >&2;
|
||||
echo -e "${_BRed}ERROR:${_creset} ${BASH_SOURCE[1]}: line ${BASH_LINENO[0]}: ${2-died ${1-1}}" >&2
|
||||
exit "${1-1}"
|
||||
}
|
||||
|
||||
die_caller() {
|
||||
echo -e "${_BRed}ERROR:${_creset} ${BASH_SOURCE[2]}: line ${BASH_LINENO[1]}: ${FUNCNAME[1]}(): ${2-died ${1-1}}" >&2;
|
||||
echo -e "${_BRed}ERROR:${_creset} ${BASH_SOURCE[2]}: line ${BASH_LINENO[1]}: ${FUNCNAME[1]}(): ${2-died ${1-1}}" >&2
|
||||
exit "${1-1}"
|
||||
}
|
||||
|
||||
|
@ -181,17 +190,17 @@ dump_return() {
|
|||
|
||||
clean_stdin() {
|
||||
if [[ $(uname -s) != 'Darwin' ]]; then
|
||||
while read -r -n1 -t 0.1; do : ; done
|
||||
while read -r -n1 -t 0.1; do :; done
|
||||
fi
|
||||
}
|
||||
|
||||
wait_key(){
|
||||
wait_key() {
|
||||
# usage: wait_key [<timeout in sec>]
|
||||
|
||||
clean_stdin
|
||||
local _t=$1
|
||||
local msg="${MSG}"
|
||||
[[ -z "$msg" ]] && msg="${_Green}** press any [${_BCyan}KEY${_Green}] to continue **${_creset}"
|
||||
[[ -z $msg ]] && msg="${_Green}** press any [${_BCyan}KEY${_Green}] to continue **${_creset}"
|
||||
|
||||
[[ -n $FORCE_TIMEOUT ]] && _t=$FORCE_TIMEOUT
|
||||
[[ -n $_t ]] && _t="-t $_t"
|
||||
|
@ -233,7 +242,8 @@ ask_yn() {
|
|||
# shellcheck disable=SC2086,SC2229
|
||||
read -r -n1 $_t
|
||||
if [[ -z $REPLY ]]; then
|
||||
printf "$default\n"; break
|
||||
printf "$default\n"
|
||||
break
|
||||
elif [[ $REPLY =~ ^[Yy]$ ]]; then
|
||||
exit_val=${EXIT_YES}
|
||||
printf "\n"
|
||||
|
@ -250,7 +260,7 @@ ask_yn() {
|
|||
return $exit_val
|
||||
}
|
||||
|
||||
tee_stderr () {
|
||||
tee_stderr() {
|
||||
|
||||
# usage::
|
||||
# tee_stderr 1 <<EOF | python -i
|
||||
|
@ -260,8 +270,8 @@ tee_stderr () {
|
|||
# >>> print("hello")
|
||||
# hello
|
||||
|
||||
local _t="0";
|
||||
if [[ -n $1 ]] ; then _t="$1"; fi
|
||||
local _t="0"
|
||||
if [[ -n $1 ]]; then _t="$1"; fi
|
||||
|
||||
(while read -r line; do
|
||||
# shellcheck disable=SC2086,SC2229
|
||||
|
@ -271,12 +281,12 @@ tee_stderr () {
|
|||
done)
|
||||
}
|
||||
|
||||
prefix_stdout () {
|
||||
prefix_stdout() {
|
||||
# usage: <cmd> | prefix_stdout [prefix]
|
||||
|
||||
local prefix="${_BYellow}-->|${_creset}"
|
||||
|
||||
if [[ -n $1 ]] ; then prefix="$1"; fi
|
||||
if [[ -n $1 ]]; then prefix="$1"; fi
|
||||
|
||||
# shellcheck disable=SC2162
|
||||
(while IFS= read line; do
|
||||
|
@ -296,7 +306,7 @@ append_line() {
|
|||
|
||||
local LINE=$1
|
||||
local FILE=$2
|
||||
grep -qFs -- "$LINE" "$FILE" || echo "$LINE" >> "$FILE"
|
||||
grep -qFs -- "$LINE" "$FILE" || echo "$LINE" >>"$FILE"
|
||||
}
|
||||
|
||||
cache_download() {
|
||||
|
@ -311,7 +321,7 @@ cache_download() {
|
|||
mkdir -p "${CACHE}"
|
||||
fi
|
||||
|
||||
if [[ -f "${CACHE}/$2" ]] ; then
|
||||
if [[ -f "${CACHE}/$2" ]]; then
|
||||
info_msg "already cached: $1"
|
||||
info_msg " --> ${CACHE}/$2"
|
||||
fi
|
||||
|
@ -320,11 +330,13 @@ cache_download() {
|
|||
info_msg "caching: $1"
|
||||
info_msg " --> ${CACHE}/$2"
|
||||
if [[ -n ${SUDO_USER} ]]; then
|
||||
sudo -u "${SUDO_USER}" wget --progress=bar -O "${CACHE}/$2" "$1" ; exit_value=$?
|
||||
sudo -u "${SUDO_USER}" wget --progress=bar -O "${CACHE}/$2" "$1"
|
||||
exit_value=$?
|
||||
else
|
||||
wget --progress=bar -O "${CACHE}/$2" "$1" ; exit_value=$?
|
||||
wget --progress=bar -O "${CACHE}/$2" "$1"
|
||||
exit_value=$?
|
||||
fi
|
||||
if [[ ! $exit_value = 0 ]]; then
|
||||
if [[ $exit_value != 0 ]]; then
|
||||
err_msg "failed to download: $1"
|
||||
fi
|
||||
fi
|
||||
|
@ -350,7 +362,7 @@ choose_one() {
|
|||
local default=${DEFAULT_SELECT-1}
|
||||
local REPLY
|
||||
local env_name=$1 && shift
|
||||
local choice=$1;
|
||||
local choice=$1
|
||||
local max="${#@}"
|
||||
local _t
|
||||
[[ -n $FORCE_TIMEOUT ]] && _t=$FORCE_TIMEOUT
|
||||
|
@ -358,8 +370,8 @@ choose_one() {
|
|||
|
||||
list=("$@")
|
||||
echo -e "${_BGreen}Menu::${_creset}"
|
||||
for ((i=1; i<= $((max -1)); i++)); do
|
||||
if [[ "$i" == "$default" ]]; then
|
||||
for ((i = 1; i <= $((max - 1)); i++)); do
|
||||
if [[ $i == "$default" ]]; then
|
||||
echo -e " ${_BGreen}$i.${_creset}) ${list[$i]} [default]"
|
||||
else
|
||||
echo -e " $i.) ${list[$i]}"
|
||||
|
@ -369,7 +381,7 @@ choose_one() {
|
|||
clean_stdin
|
||||
printf "$1 [${_BGreen}$default${_creset}] "
|
||||
|
||||
if (( 10 > max )); then
|
||||
if ((10 > max)); then
|
||||
# shellcheck disable=SC2086,SC2229
|
||||
read -r -n1 $_t
|
||||
else
|
||||
|
@ -377,7 +389,7 @@ choose_one() {
|
|||
read -r $_t
|
||||
fi
|
||||
# selection fits
|
||||
[[ $REPLY =~ ^-?[0-9]+$ ]] && (( REPLY > 0 )) && (( REPLY < max )) && break
|
||||
[[ $REPLY =~ ^-?[0-9]+$ ]] && ((REPLY > 0)) && ((REPLY < max)) && break
|
||||
|
||||
# take default
|
||||
[[ -z $REPLY ]] && REPLY=$default && break
|
||||
|
@ -414,8 +426,14 @@ install_template() {
|
|||
|
||||
for i in "$@"; do
|
||||
case $i in
|
||||
--no-eval) do_eval=0; shift ;;
|
||||
--variant=*) variant=":${i#*=}"; shift ;;
|
||||
--no-eval)
|
||||
do_eval=0
|
||||
shift
|
||||
;;
|
||||
--variant=*)
|
||||
variant=":${i#*=}"
|
||||
shift
|
||||
;;
|
||||
*) pos_args+=("$i") ;;
|
||||
esac
|
||||
done
|
||||
|
@ -431,14 +449,14 @@ install_template() {
|
|||
info_msg "install (eval=$do_eval): ${dst}"
|
||||
[[ -n $variant ]] && info_msg "variant --> ${variant}"
|
||||
|
||||
if [[ ! -f "${template_origin}" ]] ; then
|
||||
if [[ ! -f ${template_origin} ]]; then
|
||||
err_msg "${template_origin} does not exists"
|
||||
err_msg "... can't install $dst"
|
||||
wait_key 30
|
||||
return 42
|
||||
fi
|
||||
|
||||
if [[ "$do_eval" == "1" ]]; then
|
||||
if [[ $do_eval == "1" ]]; then
|
||||
template_file="${CACHE}${dst}${variant}"
|
||||
info_msg "BUILD ${template_file}"
|
||||
info_msg "BUILD using template ${template_origin}"
|
||||
|
@ -448,7 +466,7 @@ install_template() {
|
|||
mkdir -p "$(dirname "${template_file}")"
|
||||
fi
|
||||
# shellcheck disable=SC2086
|
||||
eval "echo \"$(cat ${template_origin})\"" > "${template_file}"
|
||||
eval "echo \"$(cat ${template_origin})\"" >"${template_file}"
|
||||
if [[ -n ${SUDO_USER} ]]; then
|
||||
chown "${SUDO_USER}:${SUDO_USER}" "${template_file}"
|
||||
fi
|
||||
|
@ -458,14 +476,14 @@ install_template() {
|
|||
|
||||
mkdir -p "$(dirname "${dst}")"
|
||||
|
||||
if [[ ! -f "${dst}" ]]; then
|
||||
if [[ ! -f ${dst} ]]; then
|
||||
info_msg "install: ${template_file}"
|
||||
sudo -H install -v -o "${owner}" -g "${group}" -m "${chmod}" \
|
||||
"${template_file}" "${dst}" | prefix_stdout
|
||||
return $?
|
||||
fi
|
||||
|
||||
if [[ -f "${dst}" ]] && cmp --silent "${template_file}" "${dst}" ; then
|
||||
if [[ -f ${dst} ]] && cmp --silent "${template_file}" "${dst}"; then
|
||||
info_msg "file ${dst} already installed"
|
||||
return 0
|
||||
fi
|
||||
|
@ -503,6 +521,7 @@ install_template() {
|
|||
;;
|
||||
"diff files")
|
||||
$DIFF_CMD "${dst}" "${template_file}" | prefix_stdout
|
||||
;;
|
||||
esac
|
||||
done
|
||||
}
|
||||
|
@ -517,11 +536,11 @@ service_is_available() {
|
|||
--silent -o /dev/null --head --write-out '%{http_code}' --insecure \
|
||||
"${URL}")
|
||||
exit_val=$?
|
||||
if [[ $exit_val = 0 ]]; then
|
||||
if [[ $exit_val == 0 ]]; then
|
||||
info_msg "got $http_code from ${URL}"
|
||||
fi
|
||||
case "$http_code" in
|
||||
404|410|423) exit_val=$http_code;;
|
||||
404 | 410 | 423) exit_val=$http_code ;;
|
||||
esac
|
||||
return "$exit_val"
|
||||
}
|
||||
|
@ -549,7 +568,7 @@ PYBUILD="${PYBUILD:=build/py${PY}}"
|
|||
#PY_SETUP_EXTRAS='[develop,test]'
|
||||
PY_SETUP_EXTRAS="${PY_SETUP_EXTRAS:=[develop,test]}"
|
||||
|
||||
PIP_BOILERPLATE=( pip wheel setuptools )
|
||||
PIP_BOILERPLATE=(pip wheel setuptools)
|
||||
|
||||
# shellcheck disable=SC2120
|
||||
pyenv() {
|
||||
|
@ -563,28 +582,28 @@ pyenv() {
|
|||
# files.
|
||||
|
||||
required_commands \
|
||||
sha256sum "${PYTHON}" \
|
||||
|| exit
|
||||
sha256sum "${PYTHON}" ||
|
||||
exit
|
||||
|
||||
local pip_req=()
|
||||
|
||||
if ! pyenv.OK > /dev/null; then
|
||||
if ! pyenv.OK >/dev/null; then
|
||||
rm -f "${PY_ENV}/${PY_ENV_REQ}.sha256"
|
||||
pyenv.drop > /dev/null
|
||||
pyenv.drop >/dev/null
|
||||
build_msg PYENV "[virtualenv] installing ${PY_ENV_REQ} into ${PY_ENV}"
|
||||
|
||||
"${PYTHON}" -m venv "$@" "${PY_ENV}"
|
||||
"${PY_ENV_BIN}/python" -m pip install -U "${PIP_BOILERPLATE[@]}"
|
||||
|
||||
for i in ${PY_ENV_REQ}; do
|
||||
pip_req=( "${pip_req[@]}" "-r" "$i" )
|
||||
pip_req=("${pip_req[@]}" "-r" "$i")
|
||||
done
|
||||
|
||||
(
|
||||
[ "$VERBOSE" = "1" ] && set -x
|
||||
# shellcheck disable=SC2086
|
||||
"${PY_ENV_BIN}/python" -m pip install "${pip_req[@]}" \
|
||||
&& sha256sum ${PY_ENV_REQ} > "${PY_ENV}/requirements.sha256"
|
||||
"${PY_ENV_BIN}/python" -m pip install "${pip_req[@]}" &&
|
||||
sha256sum ${PY_ENV_REQ} >"${PY_ENV}/requirements.sha256"
|
||||
)
|
||||
fi
|
||||
pyenv.OK
|
||||
|
@ -602,17 +621,17 @@ pyenv.OK() {
|
|||
return 1
|
||||
fi
|
||||
|
||||
if [ ! -f "${PY_ENV}/requirements.sha256" ] \
|
||||
|| ! sha256sum -c "${PY_ENV}/requirements.sha256" > /dev/null 2>&1; then
|
||||
if [ ! -f "${PY_ENV}/requirements.sha256" ] ||
|
||||
! sha256sum -c "${PY_ENV}/requirements.sha256" >/dev/null 2>&1; then
|
||||
build_msg PYENV "[virtualenv] requirements.sha256 failed"
|
||||
sed 's/^/ [virtualenv] - /' <"${PY_ENV}/requirements.sha256"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if [ "$VERBOSE" = "1" ]; then
|
||||
pyenv.check \
|
||||
| "${PY_ENV_BIN}/python" 2>&1 \
|
||||
| prefix_stdout "${_Blue}PYENV ${_creset}[check] "
|
||||
pyenv.check |
|
||||
"${PY_ENV_BIN}/python" 2>&1 |
|
||||
prefix_stdout "${_Blue}PYENV ${_creset}[check] "
|
||||
else
|
||||
pyenv.check | "${PY_ENV_BIN}/python" 1>/dev/null
|
||||
fi
|
||||
|
@ -657,9 +676,9 @@ EOF
|
|||
pyenv.install() {
|
||||
|
||||
if ! pyenv.OK; then
|
||||
py.clean > /dev/null
|
||||
py.clean >/dev/null
|
||||
fi
|
||||
if ! pyenv.install.OK > /dev/null; then
|
||||
if ! pyenv.install.OK >/dev/null; then
|
||||
build_msg PYENV "[install] ${PYOBJECTS}"
|
||||
if ! pyenv.OK >/dev/null; then
|
||||
pyenv
|
||||
|
@ -708,19 +727,19 @@ pyenv.uninstall() {
|
|||
build_msg PYENV "[uninstall] ${PYOBJECTS}"
|
||||
|
||||
if [ "." = "${PYOBJECTS}" ]; then
|
||||
pyenv.cmd python setup.py develop --uninstall 2>&1 \
|
||||
| prefix_stdout "${_Blue}PYENV ${_creset}[pyenv.uninstall] "
|
||||
pyenv.cmd python setup.py develop --uninstall 2>&1 |
|
||||
prefix_stdout "${_Blue}PYENV ${_creset}[pyenv.uninstall] "
|
||||
else
|
||||
# shellcheck disable=SC2086
|
||||
pyenv.cmd python -m pip uninstall --yes ${PYOBJECTS} 2>&1 \
|
||||
| prefix_stdout "${_Blue}PYENV ${_creset}[pyenv.uninstall] "
|
||||
pyenv.cmd python -m pip uninstall --yes ${PYOBJECTS} 2>&1 |
|
||||
prefix_stdout "${_Blue}PYENV ${_creset}[pyenv.uninstall] "
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
pyenv.cmd() {
|
||||
pyenv.install
|
||||
( set -e
|
||||
(
|
||||
set -e
|
||||
# shellcheck source=/dev/null
|
||||
source "${PY_ENV_BIN}/activate"
|
||||
[ "$VERBOSE" = "1" ] && set -x
|
||||
|
@ -728,14 +747,12 @@ pyenv.cmd() {
|
|||
)
|
||||
}
|
||||
|
||||
|
||||
pyenv.activate() {
|
||||
pyenv.install
|
||||
# shellcheck source=/dev/null
|
||||
source "${PY_ENV_BIN}/activate"
|
||||
}
|
||||
|
||||
|
||||
# Sphinx doc
|
||||
# ----------
|
||||
|
||||
|
@ -804,25 +821,25 @@ docs.gh-pages() {
|
|||
(
|
||||
git worktree remove -f "${GH_PAGES}"
|
||||
git branch -D gh-pages
|
||||
) &> /dev/null || true
|
||||
) &>/dev/null || true
|
||||
git worktree add --no-checkout "${GH_PAGES}" "${remote}/master"
|
||||
|
||||
pushd "${GH_PAGES}" &> /dev/null
|
||||
pushd "${GH_PAGES}" &>/dev/null
|
||||
git checkout --orphan gh-pages
|
||||
git rm -rfq .
|
||||
popd &> /dev/null
|
||||
popd &>/dev/null
|
||||
|
||||
cp -r "${DOCS_DIST}"/* "${GH_PAGES}"/
|
||||
touch "${GH_PAGES}/.nojekyll"
|
||||
cat > "${GH_PAGES}/404.html" <<EOF
|
||||
cat >"${GH_PAGES}/404.html" <<EOF
|
||||
<html><head><META http-equiv='refresh' content='0;URL=index.html'></head></html>
|
||||
EOF
|
||||
|
||||
pushd "${GH_PAGES}" &> /dev/null
|
||||
pushd "${GH_PAGES}" &>/dev/null
|
||||
git add --all .
|
||||
git commit -q -m "gh-pages build from: ${branch}@${head} (${remote_url})"
|
||||
git push -f "${remote}" gh-pages
|
||||
popd &> /dev/null
|
||||
popd &>/dev/null
|
||||
|
||||
set +x
|
||||
build_msg GH-PAGES "deployed"
|
||||
|
@ -850,7 +867,7 @@ drop_service_account() {
|
|||
fi
|
||||
}
|
||||
|
||||
interactive_shell(){
|
||||
interactive_shell() {
|
||||
|
||||
# usage: interactive_shell "${SERVICE_USER}"
|
||||
|
||||
|
@ -858,7 +875,6 @@ interactive_shell(){
|
|||
sudo -H -u "${1}" -i
|
||||
}
|
||||
|
||||
|
||||
# systemd
|
||||
# -------
|
||||
|
||||
|
@ -927,7 +943,6 @@ systemctl status --no-pager ${1}.service
|
|||
EOF
|
||||
}
|
||||
|
||||
|
||||
# nginx
|
||||
# -----
|
||||
|
||||
|
@ -945,14 +960,14 @@ nginx_distro_setup() {
|
|||
NGINX_APPS_AVAILABLE="/etc/nginx/default.apps-available"
|
||||
|
||||
case $DIST_ID-$DIST_VERS in
|
||||
ubuntu-*|debian-*)
|
||||
ubuntu-* | debian-*)
|
||||
NGINX_PACKAGES="nginx"
|
||||
NGINX_DEFAULT_SERVER=/etc/nginx/sites-available/default
|
||||
;;
|
||||
arch-*)
|
||||
NGINX_PACKAGES="nginx-mainline"
|
||||
;;
|
||||
fedora-*|centos-7)
|
||||
fedora-* | centos-7)
|
||||
NGINX_PACKAGES="nginx"
|
||||
;;
|
||||
*)
|
||||
|
@ -961,11 +976,11 @@ nginx_distro_setup() {
|
|||
esac
|
||||
}
|
||||
|
||||
install_nginx(){
|
||||
install_nginx() {
|
||||
info_msg "installing nginx ..."
|
||||
pkg_install "${NGINX_PACKAGES}"
|
||||
case $DIST_ID-$DIST_VERS in
|
||||
arch-*|fedora-*|centos-7)
|
||||
arch-* | fedora-* | centos-7)
|
||||
systemctl enable nginx
|
||||
systemctl start nginx
|
||||
;;
|
||||
|
@ -998,8 +1013,8 @@ nginx_install_app() {
|
|||
|
||||
for i in "$@"; do
|
||||
case $i in
|
||||
-*) template_opts+=("$i");;
|
||||
*) pos_args+=("$i");;
|
||||
-*) template_opts+=("$i") ;;
|
||||
*) pos_args+=("$i") ;;
|
||||
esac
|
||||
done
|
||||
|
||||
|
@ -1042,23 +1057,22 @@ nginx_include_apps_enabled() {
|
|||
(
|
||||
local line
|
||||
local stage=0
|
||||
while IFS= read -r line
|
||||
do
|
||||
while IFS= read -r line; do
|
||||
echo "$line"
|
||||
if [[ $stage = 0 ]]; then
|
||||
if [[ $stage == 0 ]]; then
|
||||
if [[ $line =~ ^[[:space:]]*server*[[:space:]]*\{ ]]; then
|
||||
stage=1
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ $stage = 1 ]]; then
|
||||
if [[ $stage == 1 ]]; then
|
||||
echo " # Load configuration files for the default server block."
|
||||
echo " $include_directive"
|
||||
echo ""
|
||||
stage=2
|
||||
fi
|
||||
done < "${server_conf}.bak"
|
||||
) > "${server_conf}"
|
||||
done <"${server_conf}.bak"
|
||||
) >"${server_conf}"
|
||||
|
||||
}
|
||||
|
||||
|
@ -1095,14 +1109,13 @@ nginx_disable_app() {
|
|||
nginx_reload
|
||||
}
|
||||
|
||||
|
||||
# Apache
|
||||
# ------
|
||||
|
||||
apache_distro_setup() {
|
||||
# shellcheck disable=SC2034
|
||||
case $DIST_ID-$DIST_VERS in
|
||||
ubuntu-*|debian-*)
|
||||
ubuntu-* | debian-*)
|
||||
# debian uses the /etc/apache2 path, while other distros use
|
||||
# the apache default at /etc/httpd
|
||||
APACHE_SITES_AVAILABLE="/etc/apache2/sites-available"
|
||||
|
@ -1116,7 +1129,7 @@ apache_distro_setup() {
|
|||
APACHE_MODULES="modules"
|
||||
APACHE_PACKAGES="apache"
|
||||
;;
|
||||
fedora-*|centos-7)
|
||||
fedora-* | centos-7)
|
||||
APACHE_SITES_AVAILABLE="/etc/httpd/sites-available"
|
||||
APACHE_SITES_ENABLED="/etc/httpd/sites-enabled"
|
||||
APACHE_MODULES="modules"
|
||||
|
@ -1128,13 +1141,13 @@ apache_distro_setup() {
|
|||
esac
|
||||
}
|
||||
|
||||
install_apache(){
|
||||
install_apache() {
|
||||
info_msg "installing apache ..."
|
||||
pkg_install "$APACHE_PACKAGES"
|
||||
case $DIST_ID-$DIST_VERS in
|
||||
arch-*|fedora-*|centos-7)
|
||||
arch-* | fedora-* | centos-7)
|
||||
if ! grep "IncludeOptional sites-enabled" "/etc/httpd/conf/httpd.conf"; then
|
||||
echo "IncludeOptional sites-enabled/*.conf" >> "/etc/httpd/conf/httpd.conf"
|
||||
echo "IncludeOptional sites-enabled/*.conf" >>"/etc/httpd/conf/httpd.conf"
|
||||
fi
|
||||
systemctl enable httpd
|
||||
systemctl start httpd
|
||||
|
@ -1144,9 +1157,9 @@ install_apache(){
|
|||
|
||||
apache_is_installed() {
|
||||
case $DIST_ID-$DIST_VERS in
|
||||
ubuntu-*|debian-*) (command -v apachectl) &>/dev/null;;
|
||||
arch-*) (command -v httpd) &>/dev/null;;
|
||||
fedora-*|centos-7) (command -v httpd) &>/dev/null;;
|
||||
ubuntu-* | debian-*) (command -v apachectl) &>/dev/null ;;
|
||||
arch-*) (command -v httpd) &>/dev/null ;;
|
||||
fedora-* | centos-7) (command -v httpd) &>/dev/null ;;
|
||||
esac
|
||||
}
|
||||
|
||||
|
@ -1155,11 +1168,11 @@ apache_reload() {
|
|||
info_msg "reload apache .."
|
||||
echo
|
||||
case $DIST_ID-$DIST_VERS in
|
||||
ubuntu-*|debian-*)
|
||||
ubuntu-* | debian-*)
|
||||
sudo -H apachectl configtest
|
||||
sudo -H systemctl force-reload apache2
|
||||
;;
|
||||
arch-*|fedora-*|centos-7)
|
||||
arch-* | fedora-* | centos-7)
|
||||
sudo -H httpd -t
|
||||
sudo -H systemctl force-reload httpd
|
||||
;;
|
||||
|
@ -1177,8 +1190,8 @@ apache_install_site() {
|
|||
|
||||
for i in "$@"; do
|
||||
case $i in
|
||||
-*) template_opts+=("$i");;
|
||||
*) pos_args+=("$i");;
|
||||
-*) template_opts+=("$i") ;;
|
||||
*) pos_args+=("$i") ;;
|
||||
esac
|
||||
done
|
||||
|
||||
|
@ -1207,7 +1220,7 @@ apache_enable_site() {
|
|||
info_msg "enable apache site: ${CONF}"
|
||||
|
||||
case $DIST_ID-$DIST_VERS in
|
||||
ubuntu-*|debian-*)
|
||||
ubuntu-* | debian-*)
|
||||
sudo -H a2ensite -q "${CONF}"
|
||||
;;
|
||||
arch-*)
|
||||
|
@ -1215,7 +1228,7 @@ apache_enable_site() {
|
|||
rm -f "${APACHE_SITES_ENABLED}/${CONF}"
|
||||
ln -s "${APACHE_SITES_AVAILABLE}/${CONF}" "${APACHE_SITES_ENABLED}/${CONF}"
|
||||
;;
|
||||
fedora-*|centos-7)
|
||||
fedora-* | centos-7)
|
||||
mkdir -p "${APACHE_SITES_ENABLED}"
|
||||
rm -f "${APACHE_SITES_ENABLED}/${CONF}"
|
||||
ln -s "${APACHE_SITES_AVAILABLE}/${CONF}" "${APACHE_SITES_ENABLED}/${CONF}"
|
||||
|
@ -1233,13 +1246,13 @@ apache_disable_site() {
|
|||
info_msg "disable apache site: ${CONF}"
|
||||
|
||||
case $DIST_ID-$DIST_VERS in
|
||||
ubuntu-*|debian-*)
|
||||
ubuntu-* | debian-*)
|
||||
sudo -H a2dissite -q "${CONF}"
|
||||
;;
|
||||
arch-*)
|
||||
rm -f "${APACHE_SITES_ENABLED}/${CONF}"
|
||||
;;
|
||||
fedora-*|centos-7)
|
||||
fedora-* | centos-7)
|
||||
rm -f "${APACHE_SITES_ENABLED}/${CONF}"
|
||||
;;
|
||||
esac
|
||||
|
@ -1256,7 +1269,7 @@ uWSGI_SETUP="${uWSGI_SETUP:=/etc/uwsgi}"
|
|||
|
||||
uWSGI_distro_setup() {
|
||||
case $DIST_ID-$DIST_VERS in
|
||||
ubuntu-*|debian-*)
|
||||
ubuntu-* | debian-*)
|
||||
# init.d --> /usr/share/doc/uwsgi/README.Debian.gz
|
||||
# For uWSGI debian uses the LSB init process, this might be changed
|
||||
# one day, see https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=833067
|
||||
|
@ -1273,7 +1286,7 @@ uWSGI_distro_setup() {
|
|||
uWSGI_APPS_ENABLED="${uWSGI_SETUP}"
|
||||
uWSGI_PACKAGES="uwsgi"
|
||||
;;
|
||||
fedora-*|centos-7)
|
||||
fedora-* | centos-7)
|
||||
# systemd --> /usr/lib/systemd/system/uwsgi.service
|
||||
# Fedora runs uWSGI in emperor-tyrant mode: in Tyrant mode the
|
||||
# Emperor will run the vassal using the UID/GID of the vassal
|
||||
|
@ -1287,14 +1300,14 @@ uWSGI_distro_setup() {
|
|||
*)
|
||||
err_msg "$DIST_ID-$DIST_VERS: uWSGI not yet implemented"
|
||||
;;
|
||||
esac
|
||||
esac
|
||||
}
|
||||
|
||||
install_uwsgi(){
|
||||
install_uwsgi() {
|
||||
info_msg "installing uwsgi ..."
|
||||
pkg_install "$uWSGI_PACKAGES"
|
||||
case $DIST_ID-$DIST_VERS in
|
||||
fedora-*|centos-7)
|
||||
fedora-* | centos-7)
|
||||
# enable & start should be called once at uWSGI installation time
|
||||
systemctl enable uwsgi
|
||||
systemctl restart uwsgi
|
||||
|
@ -1311,7 +1324,7 @@ uWSGI_restart() {
|
|||
[[ -z $CONF ]] && die_caller 42 "missing argument <myapp.ini>"
|
||||
info_msg "restart uWSGI service"
|
||||
case $DIST_ID-$DIST_VERS in
|
||||
ubuntu-*|debian-*)
|
||||
ubuntu-* | debian-*)
|
||||
# the 'service' method seems broken in that way, that it (re-)starts
|
||||
# the whole uwsgi process.
|
||||
service uwsgi restart "${CONF%.*}"
|
||||
|
@ -1324,7 +1337,7 @@ uWSGI_restart() {
|
|||
info_msg "[uWSGI:systemd-template] ${CONF} not installed (no need to restart)"
|
||||
fi
|
||||
;;
|
||||
fedora-*|centos-7)
|
||||
fedora-* | centos-7)
|
||||
# in emperor mode, just touch the file to restart
|
||||
if uWSGI_app_enabled "${CONF}"; then
|
||||
touch "${uWSGI_APPS_ENABLED}/${CONF}"
|
||||
|
@ -1360,8 +1373,8 @@ uWSGI_install_app() {
|
|||
|
||||
for i in "$@"; do
|
||||
case $i in
|
||||
-*) template_opts+=("$i");;
|
||||
*) pos_args+=("$i");;
|
||||
-*) template_opts+=("$i") ;;
|
||||
*) pos_args+=("$i") ;;
|
||||
esac
|
||||
done
|
||||
mkdir -p "${uWSGI_APPS_AVAILABLE}"
|
||||
|
@ -1394,7 +1407,7 @@ uWSGI_app_enabled() {
|
|||
|
||||
[[ -z $CONF ]] && die_caller 42 "missing argument <myapp.ini>"
|
||||
case $DIST_ID-$DIST_VERS in
|
||||
ubuntu-*|debian-*)
|
||||
ubuntu-* | debian-*)
|
||||
[[ -f "${uWSGI_APPS_ENABLED}/${CONF}" ]]
|
||||
exit_val=$?
|
||||
;;
|
||||
|
@ -1402,7 +1415,7 @@ uWSGI_app_enabled() {
|
|||
systemctl -q is-enabled "uwsgi@${CONF%.*}"
|
||||
exit_val=$?
|
||||
;;
|
||||
fedora-*|centos-7)
|
||||
fedora-* | centos-7)
|
||||
[[ -f "${uWSGI_APPS_ENABLED}/${CONF}" ]]
|
||||
exit_val=$?
|
||||
;;
|
||||
|
@ -1424,7 +1437,7 @@ uWSGI_enable_app() {
|
|||
|
||||
[[ -z $CONF ]] && die_caller 42 "missing argument <myapp.ini>"
|
||||
case $DIST_ID-$DIST_VERS in
|
||||
ubuntu-*|debian-*)
|
||||
ubuntu-* | debian-*)
|
||||
mkdir -p "${uWSGI_APPS_ENABLED}"
|
||||
rm -f "${uWSGI_APPS_ENABLED}/${CONF}"
|
||||
ln -s "${uWSGI_APPS_AVAILABLE}/${CONF}" "${uWSGI_APPS_ENABLED}/${CONF}"
|
||||
|
@ -1437,7 +1450,7 @@ uWSGI_enable_app() {
|
|||
systemctl enable "uwsgi@${CONF%.*}"
|
||||
info_msg "enabled uWSGI app: ${CONF} (restart required)"
|
||||
;;
|
||||
fedora-*|centos-7)
|
||||
fedora-* | centos-7)
|
||||
mkdir -p "${uWSGI_APPS_ENABLED}"
|
||||
rm -f "${uWSGI_APPS_ENABLED}/${CONF}"
|
||||
ln -s "${uWSGI_APPS_AVAILABLE}/${CONF}" "${uWSGI_APPS_ENABLED}/${CONF}"
|
||||
|
@ -1458,7 +1471,7 @@ uWSGI_disable_app() {
|
|||
|
||||
[[ -z $CONF ]] && die_caller 42 "missing argument <myapp.ini>"
|
||||
case $DIST_ID-$DIST_VERS in
|
||||
ubuntu-*|debian-*)
|
||||
ubuntu-* | debian-*)
|
||||
service uwsgi stop "${CONF%.*}"
|
||||
rm -f "${uWSGI_APPS_ENABLED}/${CONF}"
|
||||
info_msg "disabled uWSGI app: ${CONF} (restart uWSGI required)"
|
||||
|
@ -1468,7 +1481,7 @@ uWSGI_disable_app() {
|
|||
systemctl disable "uwsgi@${CONF%.*}"
|
||||
rm -f "${uWSGI_APPS_ENABLED}/${CONF}"
|
||||
;;
|
||||
fedora-*|centos-7)
|
||||
fedora-* | centos-7)
|
||||
# in emperor mode, just remove the app.ini file
|
||||
rm -f "${uWSGI_APPS_ENABLED}/${CONF}"
|
||||
;;
|
||||
|
@ -1497,7 +1510,7 @@ pkg_install() {
|
|||
return 42
|
||||
fi
|
||||
case $DIST_ID in
|
||||
ubuntu|debian)
|
||||
ubuntu | debian)
|
||||
if [[ $_apt_pkg_info_is_updated == 0 ]]; then
|
||||
export _apt_pkg_info_is_updated=1
|
||||
apt update
|
||||
|
@ -1533,7 +1546,7 @@ pkg_remove() {
|
|||
return 42
|
||||
fi
|
||||
case $DIST_ID in
|
||||
ubuntu|debian)
|
||||
ubuntu | debian)
|
||||
# shellcheck disable=SC2068
|
||||
apt-get purge --autoremove --ignore-missing -y $@
|
||||
;;
|
||||
|
@ -1557,20 +1570,20 @@ pkg_is_installed() {
|
|||
# usage: pkg_is_install foopkg || pkg_install foopkg
|
||||
|
||||
case $DIST_ID in
|
||||
ubuntu|debian)
|
||||
dpkg -l "$1" &> /dev/null
|
||||
ubuntu | debian)
|
||||
dpkg -l "$1" &>/dev/null
|
||||
return $?
|
||||
;;
|
||||
arch)
|
||||
pacman -Qsq "$1" &> /dev/null
|
||||
pacman -Qsq "$1" &>/dev/null
|
||||
return $?
|
||||
;;
|
||||
fedora)
|
||||
dnf list -q --installed "$1" &> /dev/null
|
||||
dnf list -q --installed "$1" &>/dev/null
|
||||
return $?
|
||||
;;
|
||||
centos)
|
||||
yum list -q --installed "$1" &> /dev/null
|
||||
yum list -q --installed "$1" &>/dev/null
|
||||
return $?
|
||||
;;
|
||||
esac
|
||||
|
@ -1601,15 +1614,15 @@ git_clone() {
|
|||
local bash_cmd="bash"
|
||||
local remote="origin"
|
||||
|
||||
if [[ ! "${dest:0:1}" = "/" ]]; then
|
||||
if [[ ${dest:0:1} != "/" ]]; then
|
||||
dest="$CACHE/$dest"
|
||||
fi
|
||||
|
||||
[[ -z $branch ]] && branch=master
|
||||
[[ -z $user ]] && [[ -n "${SUDO_USER}" ]] && user="${SUDO_USER}"
|
||||
[[ -z $user ]] && [[ -n ${SUDO_USER} ]] && user="${SUDO_USER}"
|
||||
[[ -n $user ]] && bash_cmd="sudo -H -u $user -i"
|
||||
|
||||
if [[ -d "${dest}" ]] ; then
|
||||
if [[ -d ${dest} ]]; then
|
||||
info_msg "already cloned: $dest"
|
||||
tee_stderr 0.1 <<EOF | $bash_cmd 2>&1 | prefix_stdout " ${_Yellow}|$user|${_creset} "
|
||||
cd "${dest}"
|
||||
|
@ -1684,7 +1697,7 @@ LXC_BASE_PACKAGES_centos="bash git python3"
|
|||
|
||||
lxc_distro_setup() {
|
||||
case $DIST_ID in
|
||||
ubuntu|debian) LXC_BASE_PACKAGES="${LXC_BASE_PACKAGES_debian}" ;;
|
||||
ubuntu | debian) LXC_BASE_PACKAGES="${LXC_BASE_PACKAGES_debian}" ;;
|
||||
arch) LXC_BASE_PACKAGES="${LXC_BASE_PACKAGES_arch}" ;;
|
||||
fedora) LXC_BASE_PACKAGES="${LXC_BASE_PACKAGES_fedora}" ;;
|
||||
centos) LXC_BASE_PACKAGES="${LXC_BASE_PACKAGES_centos}" ;;
|
||||
|
@ -1700,19 +1713,18 @@ lxc_install_base_packages() {
|
|||
pkg_install "${LXC_BASE_PACKAGES}"
|
||||
}
|
||||
|
||||
|
||||
lxc_image_copy() {
|
||||
|
||||
# usage: lxc_image_copy <remote image> <local image>
|
||||
#
|
||||
# lxc_image_copy "images:ubuntu/20.04" "ubu2004"
|
||||
|
||||
if lxc_image_exists "local:${LXC_SUITE[i+1]}"; then
|
||||
info_msg "image ${LXC_SUITE[i]} already copied --> ${LXC_SUITE[i+1]}"
|
||||
if lxc_image_exists "local:${LXC_SUITE[i + 1]}"; then
|
||||
info_msg "image ${LXC_SUITE[i]} already copied --> ${LXC_SUITE[i + 1]}"
|
||||
else
|
||||
info_msg "copy image locally ${LXC_SUITE[i]} --> ${LXC_SUITE[i+1]}"
|
||||
info_msg "copy image locally ${LXC_SUITE[i]} --> ${LXC_SUITE[i + 1]}"
|
||||
lxc image copy "${LXC_SUITE[i]}" local: \
|
||||
--alias "${LXC_SUITE[i+1]}" | prefix_stdout
|
||||
--alias "${LXC_SUITE[i + 1]}" | prefix_stdout
|
||||
fi
|
||||
}
|
||||
|
||||
|
@ -1731,14 +1743,14 @@ lxc_init_container() {
|
|||
fi
|
||||
}
|
||||
|
||||
lxc_exists(){
|
||||
lxc_exists() {
|
||||
|
||||
# usage: lxc_exists <name> || echo "container <name> does not exists"
|
||||
|
||||
lxc info "$1" &>/dev/null
|
||||
}
|
||||
|
||||
lxc_image_exists(){
|
||||
lxc_image_exists() {
|
||||
# usage: lxc_image_exists <alias> || echo "image <alias> does locally not exists"
|
||||
|
||||
lxc image info "local:$1" &>/dev/null
|
||||
|
@ -1766,11 +1778,10 @@ lxc_delete_local_image() {
|
|||
lxc image delete "local:$i"
|
||||
}
|
||||
|
||||
|
||||
# IP
|
||||
# --
|
||||
|
||||
global_IPs(){
|
||||
global_IPs() {
|
||||
# usage: global_IPS
|
||||
#
|
||||
# print list of host's SCOPE global addresses and adapters e.g::
|
||||
|
@ -1787,9 +1798,9 @@ primary_ip() {
|
|||
|
||||
case $DIST_ID in
|
||||
arch)
|
||||
ip -o addr show \
|
||||
| sed -nr 's/[0-9]*:\s*([a-z0-9]*).*inet[6]?\s*([a-z0-9.:]*).*scope global.*/\2/p' \
|
||||
| head -n 1
|
||||
ip -o addr show |
|
||||
sed -nr 's/[0-9]*:\s*([a-z0-9]*).*inet[6]?\s*([a-z0-9.:]*).*scope global.*/\2/p' |
|
||||
head -n 1
|
||||
;;
|
||||
*) hostname -I | cut -d' ' -f1 ;;
|
||||
esac
|
||||
|
@ -1798,7 +1809,7 @@ primary_ip() {
|
|||
# URL
|
||||
# ---
|
||||
|
||||
url_replace_hostname(){
|
||||
url_replace_hostname() {
|
||||
|
||||
# usage: url_replace_hostname <url> <new hostname>
|
||||
|
||||
|
|
|
@ -23,14 +23,14 @@
|
|||
# configure golang environment
|
||||
# ----------------------------
|
||||
|
||||
[[ -z "${GO_VERSION}" ]] && GO_VERSION="go1.17.3"
|
||||
[[ -z ${GO_VERSION} ]] && GO_VERSION="go1.17.3"
|
||||
|
||||
GO_DL_URL="https://golang.org/dl"
|
||||
|
||||
# implement go functions
|
||||
# -----------------------
|
||||
|
||||
go.help(){
|
||||
go.help() {
|
||||
cat <<EOF
|
||||
go.:
|
||||
ls : list golang binary archives (stable)
|
||||
|
@ -40,7 +40,7 @@ go.:
|
|||
EOF
|
||||
}
|
||||
|
||||
go.ls(){
|
||||
go.ls() {
|
||||
python <<EOF
|
||||
import sys, json, requests
|
||||
resp = requests.get("${GO_DL_URL}/?mode=json&include=all")
|
||||
|
@ -54,7 +54,7 @@ for ver in json.loads(resp.text):
|
|||
EOF
|
||||
}
|
||||
|
||||
go.ver_info(){
|
||||
go.ver_info() {
|
||||
|
||||
# print information about a golang distribution. To print filename
|
||||
# sha256 and size of the archive that fits to your OS and host:
|
||||
|
@ -140,14 +140,14 @@ go.golang() {
|
|||
info_msg "Download go binary ${fname} (${size}B)"
|
||||
cache_download "${GO_DL_URL}/${fname}" "${fname}"
|
||||
|
||||
pushd "${CACHE}" &> /dev/null
|
||||
echo "${sha} ${fname}" > "${fname}.sha256"
|
||||
pushd "${CACHE}" &>/dev/null
|
||||
echo "${sha} ${fname}" >"${fname}.sha256"
|
||||
if ! sha256sum -c "${fname}.sha256" >/dev/null; then
|
||||
die 42 "downloaded file ${fname} checksum does not match"
|
||||
else
|
||||
info_msg "${fname} checksum OK"
|
||||
fi
|
||||
popd &> /dev/null
|
||||
popd &>/dev/null
|
||||
|
||||
info_msg "install golang"
|
||||
tee_stderr 0.1 <<EOF | sudo -i -u "${user}" | prefix_stdout "${userpr}"
|
||||
|
@ -201,7 +201,7 @@ go.bash() {
|
|||
sudo -i -u "${user}" bash --init-file "~${user}/.go_env"
|
||||
}
|
||||
|
||||
go.version(){
|
||||
go.version() {
|
||||
local user
|
||||
user="${1:-${USERNAME}}"
|
||||
sudo -i -u "${user}" <<EOF
|
||||
|
|
|
@ -17,8 +17,8 @@ declare main_cmd
|
|||
|
||||
NVM_LOCAL_FOLDER=.nvm
|
||||
|
||||
[[ -z "${NVM_GIT_URL}" ]] && NVM_GIT_URL="https://github.com/nvm-sh/nvm.git"
|
||||
[[ -z "${NVM_MIN_NODE_VER}" ]] && NVM_MIN_NODE_VER="16.13.0"
|
||||
[[ -z ${NVM_GIT_URL} ]] && NVM_GIT_URL="https://github.com/nvm-sh/nvm.git"
|
||||
[[ -z ${NVM_MIN_NODE_VER} ]] && NVM_MIN_NODE_VER="16.13.0"
|
||||
|
||||
# initialize nvm environment
|
||||
# -------------------------
|
||||
|
@ -35,7 +35,7 @@ nvm.is_installed() {
|
|||
[[ -f "${NVM_DIR}/nvm.sh" ]]
|
||||
}
|
||||
|
||||
if [[ -z "${NVM_DIR}" ]]; then
|
||||
if [[ -z ${NVM_DIR} ]]; then
|
||||
# nvm is not pre-installed in $HOME. Prepare for using nvm from <repo-root>
|
||||
NVM_DIR="$(git rev-parse --show-toplevel)/${NVM_LOCAL_FOLDER}"
|
||||
fi
|
||||
|
@ -105,7 +105,7 @@ nvm.install() {
|
|||
info_msg "install (update) NVM at ${NVM_DIR}"
|
||||
if nvm.is_installed; then
|
||||
info_msg "already cloned at: ${NVM_DIR}"
|
||||
pushd "${NVM_DIR}" &> /dev/null
|
||||
pushd "${NVM_DIR}" &>/dev/null
|
||||
git fetch --all | prefix_stdout " ${_Yellow}||${_creset} "
|
||||
else
|
||||
# delete any leftovers from previous installations
|
||||
|
@ -114,14 +114,14 @@ nvm.install() {
|
|||
fi
|
||||
info_msg "clone: ${NVM_GIT_URL}"
|
||||
git clone "${NVM_GIT_URL}" "${NVM_DIR}" 2>&1 | prefix_stdout " ${_Yellow}||${_creset} "
|
||||
pushd "${NVM_DIR}" &> /dev/null
|
||||
pushd "${NVM_DIR}" &>/dev/null
|
||||
git config --local advice.detachedHead false
|
||||
fi
|
||||
NVM_VERSION_TAG="$(git rev-list --tags --max-count=1)"
|
||||
NVM_VERSION_TAG="$(git describe --abbrev=0 --tags --match "v[0-9]*" "${NVM_VERSION_TAG}")"
|
||||
info_msg "checkout ${NVM_VERSION_TAG}"
|
||||
git checkout "${NVM_VERSION_TAG}" 2>&1 | prefix_stdout " ${_Yellow}||${_creset} "
|
||||
popd &> /dev/null
|
||||
popd &>/dev/null
|
||||
if [ -f "${REPO_ROOT}/.nvm_packages" ]; then
|
||||
cp "${REPO_ROOT}/.nvm_packages" "${NVM_DIR}/default-packages"
|
||||
fi
|
||||
|
|
|
@ -12,7 +12,7 @@ REDIS_GROUP="searxng-redis"
|
|||
REDIS_SERVICE_NAME="searxng-redis"
|
||||
REDIS_SYSTEMD_UNIT="${SYSTEMD_UNITS}/${REDIS_SERVICE_NAME}.service"
|
||||
|
||||
redis.help(){
|
||||
redis.help() {
|
||||
cat <<EOF
|
||||
redis.:
|
||||
remove : delete user (${REDIS_USER}) and remove service (${REDIS_SERVICE_NAME})
|
||||
|
@ -21,7 +21,6 @@ redis.:
|
|||
EOF
|
||||
}
|
||||
|
||||
|
||||
redis.remove() {
|
||||
sudo_or_exit
|
||||
(
|
||||
|
@ -36,7 +35,6 @@ redis.shell() {
|
|||
interactive_shell "${REDIS_USER}"
|
||||
}
|
||||
|
||||
|
||||
redis.userdel() {
|
||||
sudo_or_exit
|
||||
drop_service_account "${REDIS_USER}"
|
||||
|
|
|
@ -117,7 +117,7 @@ container.build() {
|
|||
--tag="localhost/$CONTAINER_IMAGE_ORGANIZATION/$CONTAINER_IMAGE_NAME:builder" \
|
||||
--file="./container/$dockerfile" \
|
||||
.
|
||||
build_msg CONTAINER "Image \"builder\" built"
|
||||
build_msg CONTAINER 'Image "builder" built'
|
||||
|
||||
# shellcheck disable=SC2086
|
||||
"$container_engine" $params_build \
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
#!/usr/bin/env bash
|
||||
# SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
|
||||
data.help(){
|
||||
data.help() {
|
||||
cat <<EOF
|
||||
data.:
|
||||
all : update searx/sxng_locales.py and searx/data/*
|
||||
|
@ -13,7 +13,8 @@ EOF
|
|||
}
|
||||
|
||||
data.all() {
|
||||
( set -e
|
||||
(
|
||||
set -e
|
||||
|
||||
pyenv.activate
|
||||
data.traits
|
||||
|
@ -35,9 +36,9 @@ data.all() {
|
|||
)
|
||||
}
|
||||
|
||||
|
||||
data.traits() {
|
||||
( set -e
|
||||
(
|
||||
set -e
|
||||
pyenv.activate
|
||||
build_msg DATA "update searx/data/engine_traits.json"
|
||||
python searxng_extra/update/update_engine_traits.py
|
||||
|
@ -53,7 +54,8 @@ data.useragents() {
|
|||
}
|
||||
|
||||
data.locales() {
|
||||
( set -e
|
||||
(
|
||||
set -e
|
||||
pyenv.activate
|
||||
build_msg DATA "update searx/data/locales.json"
|
||||
python searxng_extra/update/update_locales.py
|
||||
|
@ -61,8 +63,9 @@ data.locales() {
|
|||
dump_return $?
|
||||
}
|
||||
|
||||
data.currencies(){
|
||||
( set -e
|
||||
data.currencies() {
|
||||
(
|
||||
set -e
|
||||
pyenv.activate
|
||||
build_msg DATA "update searx/data/currencies.json"
|
||||
python searxng_extra/update/update_currencies.py
|
||||
|
|
|
@ -6,7 +6,7 @@ declare _creset
|
|||
|
||||
export NODE_MINIMUM_VERSION="18.17.0"
|
||||
|
||||
node.help(){
|
||||
node.help() {
|
||||
cat <<EOF
|
||||
node.:
|
||||
env : download & install SearXNG's npm dependencies locally
|
||||
|
@ -24,7 +24,8 @@ nodejs.ensure() {
|
|||
|
||||
node.env() {
|
||||
nodejs.ensure
|
||||
( set -e
|
||||
(
|
||||
set -e
|
||||
build_msg INSTALL "[npm] ./client/simple/package.json"
|
||||
npm --prefix client/simple install
|
||||
)
|
||||
|
@ -43,17 +44,19 @@ node.clean() {
|
|||
return 0
|
||||
fi
|
||||
build_msg CLEAN "themes -- locally installed npm dependencies"
|
||||
( set -e
|
||||
npm --prefix client/simple run clean \
|
||||
| prefix_stdout "${_Blue}CLEAN ${_creset} "
|
||||
(
|
||||
set -e
|
||||
npm --prefix client/simple run clean |
|
||||
prefix_stdout "${_Blue}CLEAN ${_creset} "
|
||||
if [ "${PIPESTATUS[0]}" -ne "0" ]; then
|
||||
return 1
|
||||
fi
|
||||
)
|
||||
build_msg CLEAN "locally installed developer and CI tools"
|
||||
( set -e
|
||||
npm --prefix . run clean \
|
||||
| prefix_stdout "${_Blue}CLEAN ${_creset} "
|
||||
(
|
||||
set -e
|
||||
npm --prefix . run clean |
|
||||
prefix_stdout "${_Blue}CLEAN ${_creset} "
|
||||
if [ "${PIPESTATUS[0]}" -ne "0" ]; then
|
||||
return 1
|
||||
fi
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
#!/usr/bin/env bash
|
||||
# SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
|
||||
|
||||
STATIC_BUILD_COMMIT="[build] /static"
|
||||
STATIC_BUILT_PATHS=(
|
||||
'searx/templates/simple/icons.html'
|
||||
|
@ -9,7 +8,7 @@ STATIC_BUILT_PATHS=(
|
|||
'client/simple/package-lock.json'
|
||||
)
|
||||
|
||||
static.help(){
|
||||
static.help() {
|
||||
cat <<EOF
|
||||
static.build.: ${STATIC_BUILD_COMMIT}
|
||||
commit : build & commit /static folder
|
||||
|
@ -57,7 +56,7 @@ static.build.drop() {
|
|||
|
||||
# get only last (option -n1) local commit not in remotes
|
||||
branch="$(git branch --show-current)"
|
||||
last_commit_id="$(git log -n1 "${branch}" --pretty=format:'%h'\
|
||||
last_commit_id="$(git log -n1 "${branch}" --pretty=format:'%h' \
|
||||
--not --exclude="${branch}" --branches --remotes)"
|
||||
|
||||
if [ -z "${last_commit_id}" ]; then
|
||||
|
@ -96,7 +95,8 @@ static.build.commit() {
|
|||
# drop existing commit from previous build
|
||||
static.build.drop &>/dev/null
|
||||
|
||||
( set -e
|
||||
(
|
||||
set -e
|
||||
# fix & build the themes
|
||||
themes.fix
|
||||
themes.lint
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
#!/usr/bin/env bash
|
||||
# SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
|
||||
test.help(){
|
||||
test.help() {
|
||||
cat <<EOF
|
||||
test.:
|
||||
yamllint : lint YAML files (YAMLLINT_FILES)
|
||||
|
@ -22,13 +22,14 @@ if [ "$VERBOSE" = "1" ]; then
|
|||
fi
|
||||
|
||||
test.yamllint() {
|
||||
build_msg TEST "[yamllint] \$YAMLLINT_FILES"
|
||||
build_msg TEST "[yamllint] ${YAMLLINT_FILES[*]}"
|
||||
pyenv.cmd yamllint --strict --format parsable "${YAMLLINT_FILES[@]}"
|
||||
dump_return $?
|
||||
}
|
||||
|
||||
test.pylint() {
|
||||
( set -e
|
||||
(
|
||||
set -e
|
||||
pyenv.activate
|
||||
PYLINT_OPTIONS="--rcfile .pylintrc"
|
||||
|
||||
|
@ -63,13 +64,13 @@ test.types.dev() {
|
|||
build_msg TEST "[pyright/types] suppress warnings related to intentional monkey patching"
|
||||
# We run Pyright in the virtual environment because pyright executes
|
||||
# "python" to determine the Python version.
|
||||
pyenv.cmd npx --no-install pyright -p pyrightconfig.json \
|
||||
| grep -E '\.py:[0-9]+:[0-9]+'\
|
||||
| grep -v '/engines/.*.py.* - warning: "logger" is not defined'\
|
||||
| grep -v '/plugins/.*.py.* - error: "logger" is not defined'\
|
||||
| grep -v '/engines/.*.py.* - warning: "supported_languages" is not defined' \
|
||||
| grep -v '/engines/.*.py.* - warning: "language_aliases" is not defined' \
|
||||
| grep -v '/engines/.*.py.* - warning: "categories" is not defined'
|
||||
pyenv.cmd npx --no-install pyright -p pyrightconfig.json |
|
||||
grep -E '\.py:[0-9]+:[0-9]+' |
|
||||
grep -v '/engines/.*.py.* - warning: "logger" is not defined' |
|
||||
grep -v '/plugins/.*.py.* - error: "logger" is not defined' |
|
||||
grep -v '/engines/.*.py.* - warning: "supported_languages" is not defined' |
|
||||
grep -v '/engines/.*.py.* - warning: "language_aliases" is not defined' |
|
||||
grep -v '/engines/.*.py.* - warning: "categories" is not defined'
|
||||
# ignore exit value from pyright
|
||||
# dump_return ${PIPESTATUS[0]}
|
||||
return 0
|
||||
|
@ -88,13 +89,13 @@ test.types.ci() {
|
|||
build_msg TEST "[pyright] suppress warnings related to intentional monkey patching"
|
||||
# We run Pyright in the virtual environment because pyright executes
|
||||
# "python" to determine the Python version.
|
||||
pyenv.cmd npx --no-install pyright -p pyrightconfig-ci.json \
|
||||
| grep -E '\.py:[0-9]+:[0-9]+'\
|
||||
| grep -v '/engines/.*.py.* - warning: "logger" is not defined'\
|
||||
| grep -v '/plugins/.*.py.* - error: "logger" is not defined'\
|
||||
| grep -v '/engines/.*.py.* - warning: "supported_languages" is not defined' \
|
||||
| grep -v '/engines/.*.py.* - warning: "language_aliases" is not defined' \
|
||||
| grep -v '/engines/.*.py.* - warning: "categories" is not defined'
|
||||
pyenv.cmd npx --no-install pyright -p pyrightconfig-ci.json |
|
||||
grep -E '\.py:[0-9]+:[0-9]+' |
|
||||
grep -v '/engines/.*.py.* - warning: "logger" is not defined' |
|
||||
grep -v '/plugins/.*.py.* - error: "logger" is not defined' |
|
||||
grep -v '/engines/.*.py.* - warning: "supported_languages" is not defined' |
|
||||
grep -v '/engines/.*.py.* - warning: "language_aliases" is not defined' |
|
||||
grep -v '/engines/.*.py.* - warning: "categories" is not defined'
|
||||
# ignore exit value from pyright
|
||||
# dump_return ${PIPESTATUS[0]}
|
||||
return 0
|
||||
|
@ -121,7 +122,8 @@ test.unit() {
|
|||
|
||||
test.coverage() {
|
||||
build_msg TEST 'unit test coverage'
|
||||
( set -e
|
||||
(
|
||||
set -e
|
||||
pyenv.activate
|
||||
# shellcheck disable=SC2086
|
||||
python -m nose2 ${TEST_NOSE2_VERBOSE} -C --log-capture --with-coverage --coverage searx -s tests/unit
|
||||
|
@ -142,7 +144,7 @@ test.rst() {
|
|||
build_msg TEST "[reST markup] ${RST_FILES[*]}"
|
||||
|
||||
for rst in "${RST_FILES[@]}"; do
|
||||
pyenv.cmd rst2html --halt error "$rst" > /dev/null || die 42 "fix issue in $rst"
|
||||
pyenv.cmd rst2html --halt error "$rst" >/dev/null || die 42 "fix issue in $rst"
|
||||
done
|
||||
}
|
||||
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
#!/usr/bin/env bash
|
||||
# SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
|
||||
themes.help(){
|
||||
themes.help() {
|
||||
cat <<EOF
|
||||
themes.:
|
||||
all : test & build all themes
|
||||
|
@ -13,14 +13,16 @@ EOF
|
|||
}
|
||||
|
||||
themes.all() {
|
||||
( set -e
|
||||
(
|
||||
set -e
|
||||
vite.simple.build
|
||||
)
|
||||
dump_return $?
|
||||
}
|
||||
|
||||
themes.simple() {
|
||||
( set -e
|
||||
(
|
||||
set -e
|
||||
build_msg SIMPLE "theme: run build (simple)"
|
||||
vite.simple.build
|
||||
)
|
||||
|
@ -28,7 +30,8 @@ themes.simple() {
|
|||
}
|
||||
|
||||
themes.fix() {
|
||||
( set -e
|
||||
(
|
||||
set -e
|
||||
build_msg SIMPLE "theme: fix (all themes)"
|
||||
vite.simple.fix
|
||||
)
|
||||
|
@ -36,7 +39,8 @@ themes.fix() {
|
|||
}
|
||||
|
||||
themes.lint() {
|
||||
( set -e
|
||||
(
|
||||
set -e
|
||||
build_msg SIMPLE "theme: lint (all themes)"
|
||||
vite.simple.lint
|
||||
)
|
||||
|
@ -44,7 +48,8 @@ themes.lint() {
|
|||
}
|
||||
|
||||
themes.test() {
|
||||
( set -e
|
||||
(
|
||||
set -e
|
||||
# we run a build to test (in CI)
|
||||
build_msg SIMPLE "theme: run build (to test)"
|
||||
vite.simple.build
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
declare _Blue
|
||||
declare _creset
|
||||
|
||||
vite.help(){
|
||||
vite.help() {
|
||||
cat <<EOF
|
||||
vite.: .. to be done ..
|
||||
simple.:
|
||||
|
@ -30,7 +30,8 @@ VITE_SIMPLE_THEME="${REPO_ROOT}/client/simple"
|
|||
# }
|
||||
|
||||
vite.simple.build() {
|
||||
( set -e
|
||||
(
|
||||
set -e
|
||||
templates.simple.pygments
|
||||
|
||||
node.env
|
||||
|
@ -39,19 +40,21 @@ vite.simple.build() {
|
|||
pushd "${VITE_SIMPLE_THEME}"
|
||||
npm install
|
||||
npm run build
|
||||
popd &> /dev/null
|
||||
popd &>/dev/null
|
||||
)
|
||||
}
|
||||
|
||||
vite.simple.fix() {
|
||||
( set -e
|
||||
(
|
||||
set -e
|
||||
node.env
|
||||
npm --prefix client/simple run fix
|
||||
)
|
||||
}
|
||||
|
||||
vite.simple.lint() {
|
||||
( set -e
|
||||
(
|
||||
set -e
|
||||
node.env
|
||||
npm --prefix client/simple run lint
|
||||
)
|
||||
|
@ -59,8 +62,8 @@ vite.simple.lint() {
|
|||
|
||||
templates.simple.pygments() {
|
||||
build_msg PYGMENTS "searxng_extra/update/update_pygments.py"
|
||||
pyenv.cmd python searxng_extra/update/update_pygments.py \
|
||||
| prefix_stdout "${_Blue}PYGMENTS ${_creset} "
|
||||
pyenv.cmd python searxng_extra/update/update_pygments.py |
|
||||
prefix_stdout "${_Blue}PYGMENTS ${_creset} "
|
||||
if [ "${PIPESTATUS[0]}" -ne "0" ]; then
|
||||
build_msg PYGMENTS "building LESS files for pygments failed"
|
||||
return 1
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
#!/usr/bin/env bash
|
||||
# SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
|
||||
weblate.help(){
|
||||
weblate.help() {
|
||||
cat <<EOF
|
||||
weblate.:
|
||||
push.translations: push translation changes from SearXNG to Weblate's counterpart
|
||||
|
@ -19,8 +19,9 @@ weblate.translations.worktree() {
|
|||
#
|
||||
# remote weblate https://translate.codeberg.org/git/searxng/searxng/
|
||||
|
||||
( set -e
|
||||
if ! git remote get-url weblate 2> /dev/null; then
|
||||
(
|
||||
set -e
|
||||
if ! git remote get-url weblate 2>/dev/null; then
|
||||
git remote add weblate https://translate.codeberg.org/git/searxng/searxng/
|
||||
fi
|
||||
if [ -d "${TRANSLATIONS_WORKTREE}" ]; then
|
||||
|
@ -49,7 +50,8 @@ weblate.to.translations() {
|
|||
# 4. In translations worktree, merge changes of branch 'translations' from
|
||||
# remote 'weblate' and push it on branch 'translations' of 'origin'
|
||||
|
||||
( set -e
|
||||
(
|
||||
set -e
|
||||
pyenv.activate
|
||||
if [ "$(wlc lock-status)" != "locked: True" ]; then
|
||||
die 1 "weblate must be locked, currently: $(wlc lock-status)"
|
||||
|
@ -77,14 +79,18 @@ weblate.translations.commit() {
|
|||
# create a commit in the local branch (master)
|
||||
|
||||
local existing_commit_hash commit_body commit_message exitcode
|
||||
( set -e
|
||||
(
|
||||
set -e
|
||||
pyenv.activate
|
||||
# lock change on weblate
|
||||
wlc lock
|
||||
|
||||
# get translations branch in git worktree (TRANSLATIONS_WORKTREE)
|
||||
weblate.translations.worktree
|
||||
existing_commit_hash=$(cd "${TRANSLATIONS_WORKTREE}"; git log -n1 --pretty=format:'%h')
|
||||
existing_commit_hash=$(
|
||||
cd "${TRANSLATIONS_WORKTREE}"
|
||||
git log -n1 --pretty=format:'%h'
|
||||
)
|
||||
|
||||
# pull weblate commits
|
||||
weblate.to.translations
|
||||
|
@ -101,7 +107,10 @@ weblate.translations.commit() {
|
|||
data.locales
|
||||
|
||||
# git add/commit (no push)
|
||||
commit_body=$(cd "${TRANSLATIONS_WORKTREE}"; git log --pretty=format:'%h - %as - %aN <%ae>' "${existing_commit_hash}..HEAD")
|
||||
commit_body=$(
|
||||
cd "${TRANSLATIONS_WORKTREE}"
|
||||
git log --pretty=format:'%h - %as - %aN <%ae>' "${existing_commit_hash}..HEAD"
|
||||
)
|
||||
commit_message=$(echo -e "[l10n] update translations from Weblate\n\n${commit_body}")
|
||||
git add searx/translations
|
||||
git add searx/data/locales.json
|
||||
|
@ -135,7 +144,8 @@ weblate.push.translations() {
|
|||
local messages_pot diff_messages_pot last_commit_hash last_commit_detail \
|
||||
exitcode
|
||||
messages_pot="${TRANSLATIONS_WORKTREE}/searx/translations/messages.pot"
|
||||
( set -e
|
||||
(
|
||||
set -e
|
||||
pyenv.activate
|
||||
# get translations branch in git worktree (TRANSLATIONS_WORKTREE)
|
||||
weblate.translations.worktree
|
||||
|
@ -147,8 +157,10 @@ weblate.push.translations() {
|
|||
"searx/"
|
||||
|
||||
# stop if there is no meaningful change in the master branch
|
||||
diff_messages_pot=$(cd "${TRANSLATIONS_WORKTREE}";\
|
||||
git diff -- "searx/translations/messages.pot")
|
||||
diff_messages_pot=$(
|
||||
cd "${TRANSLATIONS_WORKTREE}"
|
||||
git diff -- "searx/translations/messages.pot"
|
||||
)
|
||||
if ! echo "$diff_messages_pot" | grep -qE "[\+\-](msgid|msgstr)"; then
|
||||
build_msg BABEL 'no changes detected, exiting'
|
||||
return 42
|
||||
|
|
|
@ -5,10 +5,10 @@ valkey.distro.setup() {
|
|||
# shellcheck disable=SC2034
|
||||
|
||||
case $DIST_ID in
|
||||
ubuntu|debian)
|
||||
ubuntu | debian)
|
||||
VALKEY_PACKAGES="valkey-server"
|
||||
;;
|
||||
arch|fedora|centos)
|
||||
arch | fedora | centos)
|
||||
VALKEY_PACKAGES="valkey"
|
||||
;;
|
||||
*)
|
||||
|
@ -36,13 +36,13 @@ valkey.backports() {
|
|||
esac
|
||||
}
|
||||
|
||||
valkey.install(){
|
||||
valkey.install() {
|
||||
info_msg "installing valkey ..."
|
||||
valkey.distro.setup
|
||||
|
||||
case $DIST_ID in
|
||||
debian|ubuntu)
|
||||
apt-cache show "${VALKEY_PACKAGES}" &> /dev/null || valkey.backports
|
||||
debian | ubuntu)
|
||||
apt-cache show "${VALKEY_PACKAGES}" &>/dev/null || valkey.backports
|
||||
pkg_install "${VALKEY_PACKAGES}"
|
||||
|
||||
# do some fix ...
|
||||
|
@ -54,7 +54,7 @@ valkey.install(){
|
|||
|
||||
systemd_activate_service valkey-server
|
||||
;;
|
||||
arch|fedora|centos)
|
||||
arch | fedora | centos)
|
||||
pkg_install "${VALKEY_PACKAGES}"
|
||||
systemd_activate_service valkey
|
||||
;;
|
||||
|
|
142
utils/lxc.sh
142
utils/lxc.sh
|
@ -60,19 +60,17 @@ REMOTE_IMAGES=()
|
|||
CONTAINERS=()
|
||||
LOCAL_IMAGES=()
|
||||
|
||||
for ((i=0; i<${#LXC_SUITE[@]}; i+=2)); do
|
||||
for ((i = 0; i < ${#LXC_SUITE[@]}; i += 2)); do
|
||||
REMOTE_IMAGES=("${REMOTE_IMAGES[@]}" "${LXC_SUITE[i]}")
|
||||
CONTAINERS=("${CONTAINERS[@]}" "${LXC_HOST_PREFIX}-${LXC_SUITE[i+1]}")
|
||||
LOCAL_IMAGES=("${LOCAL_IMAGES[@]}" "${LXC_SUITE[i+1]}")
|
||||
CONTAINERS=("${CONTAINERS[@]}" "${LXC_HOST_PREFIX}-${LXC_SUITE[i + 1]}")
|
||||
LOCAL_IMAGES=("${LOCAL_IMAGES[@]}" "${LXC_SUITE[i + 1]}")
|
||||
done
|
||||
|
||||
HOST_USER="${SUDO_USER:-$USER}"
|
||||
HOST_USER_ID=$(id -u "${HOST_USER}")
|
||||
HOST_GROUP_ID=$(id -g "${HOST_USER}")
|
||||
|
||||
# ----------------------------------------------------------------------------
|
||||
usage() {
|
||||
# ----------------------------------------------------------------------------
|
||||
_cmd="$(basename "$0")"
|
||||
cat <<EOF
|
||||
usage::
|
||||
|
@ -138,7 +136,7 @@ main() {
|
|||
lxc_distro_setup
|
||||
|
||||
# don't check prerequisite when in recursion
|
||||
if [[ ! $1 == __* ]] && [[ ! $1 == --help ]]; then
|
||||
if [[ $1 != __* ]] && [[ $1 != --help ]]; then
|
||||
if ! in_container; then
|
||||
! required_commands lxc && lxd_info && exit 42
|
||||
fi
|
||||
|
@ -146,27 +144,40 @@ main() {
|
|||
fi
|
||||
|
||||
case $1 in
|
||||
--getenv) var="$2"; echo "${!var}"; exit 0;;
|
||||
-h|--help) usage; exit 0;;
|
||||
--getenv)
|
||||
var="$2"
|
||||
echo "${!var}"
|
||||
exit 0
|
||||
;;
|
||||
-h | --help)
|
||||
usage
|
||||
exit 0
|
||||
;;
|
||||
|
||||
build)
|
||||
sudo_or_exit
|
||||
case $2 in
|
||||
${LXC_HOST_PREFIX}-*) build_container "$2" ;;
|
||||
''|--|containers) build_all_containers ;;
|
||||
*) usage "$_usage"; exit 42;;
|
||||
'' | -- | containers) build_all_containers ;;
|
||||
*)
|
||||
usage "$_usage"
|
||||
exit 42
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
copy)
|
||||
case $2 in
|
||||
''|images) lxc_copy_images_locally;;
|
||||
*) usage "$_usage"; exit 42;;
|
||||
'' | images) lxc_copy_images_locally ;;
|
||||
*)
|
||||
usage "$_usage"
|
||||
exit 42
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
remove)
|
||||
sudo_or_exit
|
||||
case $2 in
|
||||
''|--|containers) remove_containers ;;
|
||||
'' | -- | containers) remove_containers ;;
|
||||
images) lxc_delete_images_locally ;;
|
||||
${LXC_HOST_PREFIX}-*)
|
||||
! lxc_exists "$2" && warn_msg "container not yet exists: $2" && exit 0
|
||||
|
@ -174,19 +185,25 @@ main() {
|
|||
lxc_delete_container "$2"
|
||||
fi
|
||||
;;
|
||||
*) usage "unknown or missing container <name> $2"; exit 42;;
|
||||
*)
|
||||
usage "unknown or missing container <name> $2"
|
||||
exit 42
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
start|stop)
|
||||
start | stop)
|
||||
sudo_or_exit
|
||||
case $2 in
|
||||
''|--|containers) lxc_cmd "$1" ;;
|
||||
'' | -- | containers) lxc_cmd "$1" ;;
|
||||
${LXC_HOST_PREFIX}-*)
|
||||
! lxc_exists "$2" && usage_containers "unknown container: $2" && exit 42
|
||||
info_msg "lxc $1 $2"
|
||||
lxc "$1" "$2" | prefix_stdout "[${_BBlue}${i}${_creset}] "
|
||||
;;
|
||||
*) usage "unknown or missing container <name> $2"; exit 42;;
|
||||
*)
|
||||
usage "unknown or missing container <name> $2"
|
||||
exit 42
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
show)
|
||||
|
@ -195,10 +212,10 @@ main() {
|
|||
suite)
|
||||
case $3 in
|
||||
${LXC_HOST_PREFIX}-*)
|
||||
lxc exec -t "$3" -- "${LXC_REPO_ROOT}/utils/lxc.sh" __show suite \
|
||||
| prefix_stdout "[${_BBlue}$3${_creset}] "
|
||||
lxc exec -t "$3" -- "${LXC_REPO_ROOT}/utils/lxc.sh" __show suite |
|
||||
prefix_stdout "[${_BBlue}$3${_creset}] "
|
||||
;;
|
||||
*) show_suite;;
|
||||
*) show_suite ;;
|
||||
esac
|
||||
;;
|
||||
images) show_images ;;
|
||||
|
@ -230,7 +247,10 @@ main() {
|
|||
;;
|
||||
esac
|
||||
;;
|
||||
*) usage "$_usage"; exit 42;;
|
||||
*)
|
||||
usage "$_usage"
|
||||
exit 42
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
__show)
|
||||
|
@ -243,30 +263,36 @@ main() {
|
|||
sudo_or_exit
|
||||
shift
|
||||
case $1 in
|
||||
--) shift; lxc_exec "$@" ;;
|
||||
--)
|
||||
shift
|
||||
lxc_exec "$@"
|
||||
;;
|
||||
${LXC_HOST_PREFIX}-*)
|
||||
! lxc_exists "$1" && usage_containers "unknown container: $1" && exit 42
|
||||
local name=$1
|
||||
shift
|
||||
lxc_exec_cmd "${name}" "$@"
|
||||
;;
|
||||
*) usage_containers "unknown container: $1" && exit 42
|
||||
*) usage_containers "unknown container: $1" && exit 42 ;;
|
||||
esac
|
||||
;;
|
||||
install)
|
||||
sudo_or_exit
|
||||
case $2 in
|
||||
suite|base)
|
||||
suite | base)
|
||||
case $3 in
|
||||
${LXC_HOST_PREFIX}-*)
|
||||
! lxc_exists "$3" && usage_containers "unknown container: $3" && exit 42
|
||||
lxc_exec_cmd "$3" "${LXC_REPO_ROOT}/utils/lxc.sh" __install "$2"
|
||||
;;
|
||||
''|--) lxc_exec "${LXC_REPO_ROOT}/utils/lxc.sh" __install "$2" ;;
|
||||
*) usage_containers "unknown container: $3" && exit 42
|
||||
'' | --) lxc_exec "${LXC_REPO_ROOT}/utils/lxc.sh" __install "$2" ;;
|
||||
*) usage_containers "unknown container: $3" && exit 42 ;;
|
||||
esac
|
||||
;;
|
||||
*) usage "$_usage"; exit 42 ;;
|
||||
*)
|
||||
usage "$_usage"
|
||||
exit 42
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
__install)
|
||||
|
@ -281,12 +307,17 @@ main() {
|
|||
echo
|
||||
echo ".. generic utils/lxc.sh documentation"
|
||||
;;
|
||||
-*) usage "unknown option $1"; exit 42;;
|
||||
*) usage "unknown or missing command $1"; exit 42;;
|
||||
-*)
|
||||
usage "unknown option $1"
|
||||
exit 42
|
||||
;;
|
||||
*)
|
||||
usage "unknown or missing command $1"
|
||||
exit 42
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
|
||||
build_all_containers() {
|
||||
rst_title "Build all LXC containers of suite"
|
||||
echo
|
||||
|
@ -310,11 +341,11 @@ build_container() {
|
|||
local image
|
||||
local boilerplate_script
|
||||
|
||||
for ((i=0; i<${#LXC_SUITE[@]}; i+=2)); do
|
||||
if [ "${LXC_HOST_PREFIX}-${LXC_SUITE[i+1]}" = "$1" ]; then
|
||||
for ((i = 0; i < ${#LXC_SUITE[@]}; i += 2)); do
|
||||
if [ "${LXC_HOST_PREFIX}-${LXC_SUITE[i + 1]}" = "$1" ]; then
|
||||
remote_image="${LXC_SUITE[i]}"
|
||||
container="${LXC_HOST_PREFIX}-${LXC_SUITE[i+1]}"
|
||||
image="${LXC_SUITE[i+1]}"
|
||||
container="${LXC_HOST_PREFIX}-${LXC_SUITE[i + 1]}"
|
||||
image="${LXC_SUITE[i + 1]}"
|
||||
boilerplate_script="${image}_boilerplate"
|
||||
boilerplate_script="${!boilerplate_script}"
|
||||
break
|
||||
|
@ -335,8 +366,8 @@ build_container() {
|
|||
lxc_install_boilerplate "${container}" "$boilerplate_script"
|
||||
echo
|
||||
rst_title "install LXC base packages" section
|
||||
lxc_exec_cmd "${container}" "${LXC_REPO_ROOT}/utils/lxc.sh" __install base \
|
||||
| prefix_stdout "[${_BBlue}${container}${_creset}] "
|
||||
lxc_exec_cmd "${container}" "${LXC_REPO_ROOT}/utils/lxc.sh" __install base |
|
||||
prefix_stdout "[${_BBlue}${container}${_creset}] "
|
||||
echo
|
||||
lxc list "$container"
|
||||
}
|
||||
|
@ -348,7 +379,7 @@ remove_containers() {
|
|||
lxc list "$LXC_HOST_PREFIX-"
|
||||
echo -en "\\n${_BRed}LXC containers to delete::${_creset}\\n\\n ${CONTAINERS[*]}\\n" | $FMT
|
||||
local default=Ny
|
||||
[[ $FORCE_TIMEOUT = 0 ]] && default=Yn
|
||||
[[ $FORCE_TIMEOUT == 0 ]] && default=Yn
|
||||
if ask_yn "Do you really want to delete these containers" $default; then
|
||||
for i in "${CONTAINERS[@]}"; do
|
||||
lxc_delete_container "$i"
|
||||
|
@ -363,8 +394,8 @@ remove_containers() {
|
|||
|
||||
lxc_copy_images_locally() {
|
||||
rst_title "copy images" section
|
||||
for ((i=0; i<${#LXC_SUITE[@]}; i+=2)); do
|
||||
lxc_image_copy "${LXC_SUITE[i]}" "${LXC_SUITE[i+1]}"
|
||||
for ((i = 0; i < ${#LXC_SUITE[@]}; i += 2)); do
|
||||
lxc_image_copy "${LXC_SUITE[i]}" "${LXC_SUITE[i + 1]}"
|
||||
done
|
||||
# lxc image list local: && wait_key
|
||||
}
|
||||
|
@ -391,7 +422,7 @@ lxc_delete_images_locally() {
|
|||
lxc image list local:
|
||||
}
|
||||
|
||||
show_images(){
|
||||
show_images() {
|
||||
rst_title "local images"
|
||||
echo
|
||||
lxc image list local:
|
||||
|
@ -408,11 +439,10 @@ show_images(){
|
|||
|
||||
}
|
||||
|
||||
|
||||
# container
|
||||
# ---------
|
||||
|
||||
show_suite(){
|
||||
show_suite() {
|
||||
rst_title "LXC suite ($LXC_HOST_PREFIX-*)"
|
||||
echo
|
||||
lxc list "$LXC_HOST_PREFIX-"
|
||||
|
@ -421,8 +451,8 @@ show_suite(){
|
|||
if ! lxc_exists "$i"; then
|
||||
warn_msg "container ${_BBlue}$i${_creset} does not yet exists"
|
||||
else
|
||||
lxc exec -t "${i}" -- "${LXC_REPO_ROOT}/utils/lxc.sh" __show suite \
|
||||
| prefix_stdout "[${_BBlue}${i}${_creset}] "
|
||||
lxc exec -t "${i}" -- "${LXC_REPO_ROOT}/utils/lxc.sh" __show suite |
|
||||
prefix_stdout "[${_BBlue}${i}${_creset}] "
|
||||
echo
|
||||
fi
|
||||
done
|
||||
|
@ -469,8 +499,8 @@ lxc_init_all_containers() {
|
|||
local image_name
|
||||
local container_name
|
||||
|
||||
for ((i=0; i<${#LXC_SUITE[@]}; i+=2)); do
|
||||
lxc_init_container "${LXC_SUITE[i+1]}" "${LXC_HOST_PREFIX}-${LXC_SUITE[i+1]}"
|
||||
for ((i = 0; i < ${#LXC_SUITE[@]}; i += 2)); do
|
||||
lxc_init_container "${LXC_SUITE[i + 1]}" "${LXC_HOST_PREFIX}-${LXC_SUITE[i + 1]}"
|
||||
done
|
||||
}
|
||||
|
||||
|
@ -487,8 +517,8 @@ lxc_config_container() {
|
|||
|
||||
info_msg "[${_BBlue}$1${_creset}] map uid/gid from host to container"
|
||||
# https://lxd.readthedocs.io/en/latest/userns-idmap/#custom-idmaps
|
||||
echo -e -n "uid $HOST_USER_ID 0\\ngid $HOST_GROUP_ID 0"\
|
||||
| lxc config set "$1" raw.idmap -
|
||||
echo -e -n "uid $HOST_USER_ID 0\\ngid $HOST_GROUP_ID 0" |
|
||||
lxc config set "$1" raw.idmap -
|
||||
|
||||
info_msg "[${_BBlue}$1${_creset}] share ${REPO_ROOT} (repo_share) from HOST into container"
|
||||
# https://lxd.readthedocs.io/en/latest/instances/#type-disk
|
||||
|
@ -504,15 +534,15 @@ lxc_boilerplate_all_containers() {
|
|||
local boilerplate_script
|
||||
local image_name
|
||||
|
||||
for ((i=0; i<${#LXC_SUITE[@]}; i+=2)); do
|
||||
for ((i = 0; i < ${#LXC_SUITE[@]}; i += 2)); do
|
||||
|
||||
image_name="${LXC_SUITE[i+1]}"
|
||||
image_name="${LXC_SUITE[i + 1]}"
|
||||
boilerplate_script="${image_name}_boilerplate"
|
||||
boilerplate_script="${!boilerplate_script}"
|
||||
|
||||
lxc_install_boilerplate "${LXC_HOST_PREFIX}-${image_name}" "$boilerplate_script"
|
||||
|
||||
if [[ -z "${boilerplate_script}" ]]; then
|
||||
if [[ -z ${boilerplate_script} ]]; then
|
||||
err_msg "[${_BBlue}${container_name}${_creset}] no boilerplate for image '${image_name}'"
|
||||
fi
|
||||
done
|
||||
|
@ -546,10 +576,10 @@ EOF
|
|||
if lxc start -q "${container_name}" &>/dev/null; then
|
||||
sleep 5 # guest needs some time to come up and get an IP
|
||||
fi
|
||||
if [[ -n "${boilerplate_script}" ]]; then
|
||||
echo "${boilerplate_script}" \
|
||||
| lxc exec "${container_name}" -- bash \
|
||||
| prefix_stdout "[${_BBlue}${container_name}${_creset}] "
|
||||
if [[ -n ${boilerplate_script} ]]; then
|
||||
echo "${boilerplate_script}" |
|
||||
lxc exec "${container_name}" -- bash |
|
||||
prefix_stdout "[${_BBlue}${container_name}${_creset}] "
|
||||
fi
|
||||
}
|
||||
|
||||
|
@ -568,6 +598,4 @@ check_connectivity() {
|
|||
return $ret_val
|
||||
}
|
||||
|
||||
# ----------------------------------------------------------------------------
|
||||
main "$@"
|
||||
# ----------------------------------------------------------------------------
|
||||
|
|
151
utils/searxng.sh
151
utils/searxng.sh
|
@ -46,7 +46,7 @@ if in_container; then
|
|||
SEARXNG_URL="http://$(primary_ip)/searxng"
|
||||
fi
|
||||
SEARXNG_URL_PATH="$(echo "${SEARXNG_URL}" | sed -e 's,^.*://[^/]*\(/.*\),\1,g')"
|
||||
[[ "${SEARXNG_URL_PATH}" == "${SEARXNG_URL}" ]] && SEARXNG_URL_PATH=/
|
||||
[[ ${SEARXNG_URL_PATH} == "${SEARXNG_URL}" ]] && SEARXNG_URL_PATH=/
|
||||
|
||||
# Apache settings
|
||||
|
||||
|
@ -98,7 +98,7 @@ case $DIST_ID-$DIST_VERS in
|
|||
SEARXNG_BUILD_PACKAGES="${SEARXNG_BUILD_PACKAGES_debian}"
|
||||
APACHE_PACKAGES="$APACHE_PACKAGES libapache2-mod-proxy-uwsgi"
|
||||
;;
|
||||
ubuntu-*|debian-*)
|
||||
ubuntu-* | debian-*)
|
||||
SEARXNG_PACKAGES="${SEARXNG_PACKAGES_debian} python-is-python3"
|
||||
SEARXNG_BUILD_PACKAGES="${SEARXNG_BUILD_PACKAGES_debian}"
|
||||
;;
|
||||
|
@ -114,9 +114,7 @@ esac
|
|||
|
||||
_service_prefix=" ${_Yellow}|${SERVICE_USER}|${_creset} "
|
||||
|
||||
# ----------------------------------------------------------------------------
|
||||
usage() {
|
||||
# ----------------------------------------------------------------------------
|
||||
|
||||
# shellcheck disable=SC1117
|
||||
cat <<EOF
|
||||
|
@ -170,52 +168,68 @@ EOF
|
|||
|
||||
main() {
|
||||
case $1 in
|
||||
install|remove|instance)
|
||||
install | remove | instance)
|
||||
nginx_distro_setup
|
||||
apache_distro_setup
|
||||
uWSGI_distro_setup
|
||||
required_commands \
|
||||
sudo systemctl install git wget curl \
|
||||
|| exit
|
||||
sudo systemctl install git wget curl ||
|
||||
exit
|
||||
;;
|
||||
esac
|
||||
|
||||
local _usage="unknown or missing $1 command $2"
|
||||
|
||||
case $1 in
|
||||
--getenv) var="$2"; echo "${!var}"; exit 0;;
|
||||
--cmd) shift; "$@";;
|
||||
-h|--help) usage; exit 0;;
|
||||
--getenv)
|
||||
var="$2"
|
||||
echo "${!var}"
|
||||
exit 0
|
||||
;;
|
||||
--cmd)
|
||||
shift
|
||||
"$@"
|
||||
;;
|
||||
-h | --help)
|
||||
usage
|
||||
exit 0
|
||||
;;
|
||||
install)
|
||||
sudo_or_exit
|
||||
case $2 in
|
||||
all) searxng.install.all;;
|
||||
user) searxng.install.user;;
|
||||
pyenv) searxng.install.pyenv;;
|
||||
searxng-src) searxng.install.clone;;
|
||||
settings) searxng.install.settings;;
|
||||
uwsgi) searxng.install.uwsgi;;
|
||||
packages) searxng.install.packages;;
|
||||
buildhost) searxng.install.buildhost;;
|
||||
nginx) searxng.nginx.install;;
|
||||
apache) searxng.apache.install;;
|
||||
valkey) searxng.install.valkey;;
|
||||
*) usage "$_usage"; exit 42;;
|
||||
all) searxng.install.all ;;
|
||||
user) searxng.install.user ;;
|
||||
pyenv) searxng.install.pyenv ;;
|
||||
searxng-src) searxng.install.clone ;;
|
||||
settings) searxng.install.settings ;;
|
||||
uwsgi) searxng.install.uwsgi ;;
|
||||
packages) searxng.install.packages ;;
|
||||
buildhost) searxng.install.buildhost ;;
|
||||
nginx) searxng.nginx.install ;;
|
||||
apache) searxng.apache.install ;;
|
||||
valkey) searxng.install.valkey ;;
|
||||
*)
|
||||
usage "$_usage"
|
||||
exit 42
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
remove)
|
||||
sudo_or_exit
|
||||
case $2 in
|
||||
all) searxng.remove.all;;
|
||||
user) drop_service_account "${SERVICE_USER}";;
|
||||
pyenv) searxng.remove.pyenv;;
|
||||
settings) searxng.remove.settings;;
|
||||
uwsgi) searxng.remove.uwsgi;;
|
||||
apache) searxng.apache.remove;;
|
||||
remove) searxng.nginx.remove;;
|
||||
valkey) searxng.remove.valkey;;
|
||||
redis) searxng.remove.redis;;
|
||||
*) usage "$_usage"; exit 42;;
|
||||
all) searxng.remove.all ;;
|
||||
user) drop_service_account "${SERVICE_USER}" ;;
|
||||
pyenv) searxng.remove.pyenv ;;
|
||||
settings) searxng.remove.settings ;;
|
||||
uwsgi) searxng.remove.uwsgi ;;
|
||||
apache) searxng.apache.remove ;;
|
||||
remove) searxng.nginx.remove ;;
|
||||
valkey) searxng.remove.valkey ;;
|
||||
redis) searxng.remove.redis ;;
|
||||
*)
|
||||
usage "$_usage"
|
||||
exit 42
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
instance)
|
||||
|
@ -234,19 +248,30 @@ main() {
|
|||
;;
|
||||
cmd)
|
||||
sudo_or_exit
|
||||
shift; shift; searxng.instance.exec "$@"
|
||||
shift
|
||||
shift
|
||||
searxng.instance.exec "$@"
|
||||
;;
|
||||
get_setting)
|
||||
shift; shift; searxng.instance.get_setting "$@"
|
||||
shift
|
||||
shift
|
||||
searxng.instance.get_setting "$@"
|
||||
;;
|
||||
call)
|
||||
# call a function in instance's environment
|
||||
shift; shift; searxng.instance.self.call "$@"
|
||||
shift
|
||||
shift
|
||||
searxng.instance.self.call "$@"
|
||||
;;
|
||||
_call)
|
||||
shift; shift; "$@"
|
||||
shift
|
||||
shift
|
||||
"$@"
|
||||
;;
|
||||
*)
|
||||
usage "$_usage"
|
||||
exit 42
|
||||
;;
|
||||
*) usage "$_usage"; exit 42;;
|
||||
esac
|
||||
;;
|
||||
*)
|
||||
|
@ -314,7 +339,7 @@ In your instance, valkey DB connector is configured at:
|
|||
fi
|
||||
fi
|
||||
|
||||
if ! [[ ${valkey_url} = valkey://localhost:6379/* ]]; then
|
||||
if ! [[ ${valkey_url} == valkey://localhost:6379/* ]]; then
|
||||
err_msg "SearXNG instance can't connect valkey DB / check valkey & your settings"
|
||||
return
|
||||
fi
|
||||
|
@ -352,7 +377,7 @@ searxng.remove.all() {
|
|||
fi
|
||||
|
||||
valkey_url=$(searxng.instance.get_setting valkey.url)
|
||||
if ! [[ ${valkey_url} = unix://${VALKEY_HOME}/run/valkey.sock* ]]; then
|
||||
if ! [[ ${valkey_url} == unix://${VALKEY_HOME}/run/valkey.sock* ]]; then
|
||||
searxng.remove.valkey
|
||||
fi
|
||||
|
||||
|
@ -369,7 +394,7 @@ searxng.remove.all() {
|
|||
searxng.install.user() {
|
||||
rst_title "SearXNG -- install user" section
|
||||
echo
|
||||
if getent passwd "${SERVICE_USER}" > /dev/null; then
|
||||
if getent passwd "${SERVICE_USER}" >/dev/null; then
|
||||
echo "user already exists"
|
||||
return 0
|
||||
fi
|
||||
|
@ -399,11 +424,11 @@ searxng.install.clone() {
|
|||
die 42 "To clone SearXNG, first install user ${SERVICE_USER}."
|
||||
fi
|
||||
echo
|
||||
if ! sudo -i -u "${SERVICE_USER}" ls -d "$REPO_ROOT" > /dev/null; then
|
||||
if ! sudo -i -u "${SERVICE_USER}" ls -d "$REPO_ROOT" >/dev/null; then
|
||||
die 42 "user '${SERVICE_USER}' missed read permission: $REPO_ROOT"
|
||||
fi
|
||||
# SERVICE_HOME="$(sudo -i -u "${SERVICE_USER}" echo \$HOME 2>/dev/null)"
|
||||
if [[ ! "${SERVICE_HOME}" ]]; then
|
||||
if [[ ! ${SERVICE_HOME} ]]; then
|
||||
err_msg "to clone SearXNG sources, user ${SERVICE_USER} hast to be created first"
|
||||
return 42
|
||||
fi
|
||||
|
@ -412,7 +437,7 @@ searxng.install.clone() {
|
|||
info_msg "create local branch ${GIT_BRANCH} from start point: origin/${GIT_BRANCH}"
|
||||
git branch "${GIT_BRANCH}" "origin/${GIT_BRANCH}"
|
||||
fi
|
||||
if [[ ! $(git rev-parse --abbrev-ref HEAD) == "${GIT_BRANCH}" ]]; then
|
||||
if [[ $(git rev-parse --abbrev-ref HEAD) != "${GIT_BRANCH}" ]]; then
|
||||
warn_msg "take into account, installing branch $GIT_BRANCH while current branch is $(git rev-parse --abbrev-ref HEAD)"
|
||||
fi
|
||||
# export SERVICE_HOME
|
||||
|
@ -424,7 +449,7 @@ searxng.install.clone() {
|
|||
"$GIT_BRANCH" "${SERVICE_USER}"
|
||||
git config --system --add safe.directory "${SEARXNG_SRC}"
|
||||
|
||||
pushd "${SEARXNG_SRC}" > /dev/null
|
||||
pushd "${SEARXNG_SRC}" >/dev/null
|
||||
tee_stderr 0.1 <<EOF | sudo -H -u "${SERVICE_USER}" -i 2>&1 | prefix_stdout "$_service_prefix"
|
||||
cd "${SEARXNG_SRC}"
|
||||
git remote set-url origin ${GIT_URL}
|
||||
|
@ -432,7 +457,7 @@ git config user.email "${ADMIN_EMAIL}"
|
|||
git config user.name "${ADMIN_NAME}"
|
||||
git config --list
|
||||
EOF
|
||||
popd > /dev/null
|
||||
popd >/dev/null
|
||||
}
|
||||
|
||||
searxng.install.link_src() {
|
||||
|
@ -588,9 +613,9 @@ searxng.install.uwsgi.socket() {
|
|||
|
||||
searxng.uwsgi.available() {
|
||||
if [[ ${SEARXNG_UWSGI_USE_SOCKET} == true ]]; then
|
||||
[[ -S "${SEARXNG_UWSGI_SOCKET}" ]]
|
||||
[[ -S ${SEARXNG_UWSGI_SOCKET} ]]
|
||||
exit_val=$?
|
||||
if [[ $exit_val = 0 ]]; then
|
||||
if [[ $exit_val == 0 ]]; then
|
||||
info_msg "uWSGI socket is located at: ${SEARXNG_UWSGI_SOCKET}"
|
||||
fi
|
||||
else
|
||||
|
@ -617,10 +642,9 @@ searxng.install.valkey() {
|
|||
valkey.install
|
||||
}
|
||||
|
||||
|
||||
searxng.instance.localtest() {
|
||||
rst_title "Test SearXNG instance locally" section
|
||||
rst_para "Activate debug mode, start a minimal SearXNG "\
|
||||
rst_para "Activate debug mode, start a minimal SearXNG " \
|
||||
"service and debug a HTTP request/response cycle."
|
||||
|
||||
if service_is_available "http://${SEARXNG_INTERNAL_HTTP}" &>/dev/null; then
|
||||
|
@ -714,7 +738,7 @@ This installs SearXNG's uWSGI app as Nginx site. The Nginx site is located at:
|
|||
${NGINX_APPS_AVAILABLE}/${NGINX_SEARXNG_SITE} and requires a uWSGI."
|
||||
searxng.install.http.pre
|
||||
|
||||
if ! nginx_is_installed ; then
|
||||
if ! nginx_is_installed; then
|
||||
err_msg "Nginx packages are not installed"
|
||||
if ! ask_yn "Do you really want to continue and install Nginx packages?" Yn; then
|
||||
return
|
||||
|
@ -805,7 +829,7 @@ searxng.instance.inspect() {
|
|||
echo
|
||||
|
||||
case $DIST_ID-$DIST_VERS in
|
||||
ubuntu-*|debian-*)
|
||||
ubuntu-* | debian-*)
|
||||
# For uWSGI debian uses the LSB init process; for each configuration
|
||||
# file new uWSGI daemon instance is started with additional option.
|
||||
service uwsgi status "${SERVICE_NAME}"
|
||||
|
@ -825,7 +849,7 @@ searxng.instance.inspect() {
|
|||
while true; do
|
||||
trap break 2
|
||||
case $DIST_ID-$DIST_VERS in
|
||||
ubuntu-*|debian-*) tail -f "/var/log/uwsgi/app/${SERVICE_NAME%.*}.log" ;;
|
||||
ubuntu-* | debian-*) tail -f "/var/log/uwsgi/app/${SERVICE_NAME%.*}.log" ;;
|
||||
arch-*) journalctl -f -u "uwsgi@${SERVICE_NAME%.*}" ;;
|
||||
fedora-*) journalctl -f -u uwsgi ;;
|
||||
esac
|
||||
|
@ -882,7 +906,7 @@ searxng.doc.rst() {
|
|||
uwsgi_variant=':socket'
|
||||
fi
|
||||
|
||||
eval "echo \"$(< "${REPO_ROOT}/docs/build-templates/searxng.rst")\""
|
||||
eval "echo \"$(<"${REPO_ROOT}/docs/build-templates/searxng.rst")\""
|
||||
|
||||
# I use ubuntu-20.04 here to demonstrate that versions are also supported,
|
||||
# normally debian-* and ubuntu-* are most the same.
|
||||
|
@ -897,7 +921,8 @@ searxng.doc.rst() {
|
|||
echo -e "\n.. START searxng uwsgi-description $DIST_NAME"
|
||||
|
||||
case $DIST_ID-$DIST_VERS in
|
||||
ubuntu-*|debian-*) cat <<EOF
|
||||
ubuntu-* | debian-*)
|
||||
cat <<EOF
|
||||
|
||||
.. code:: bash
|
||||
|
||||
|
@ -914,7 +939,8 @@ searxng.doc.rst() {
|
|||
|
||||
EOF
|
||||
;;
|
||||
arch-*) cat <<EOF
|
||||
arch-*)
|
||||
cat <<EOF
|
||||
|
||||
.. code:: bash
|
||||
|
||||
|
@ -932,7 +958,8 @@ EOF
|
|||
|
||||
EOF
|
||||
;;
|
||||
fedora-*|centos-7) cat <<EOF
|
||||
fedora-* | centos-7)
|
||||
cat <<EOF
|
||||
|
||||
.. code:: bash
|
||||
|
||||
|
@ -954,37 +981,35 @@ EOF
|
|||
echo -e "\n.. START searxng uwsgi-appini $DIST_NAME"
|
||||
echo ".. code:: bash"
|
||||
echo
|
||||
eval "echo \"$(< "${TEMPLATES}/${uWSGI_APPS_AVAILABLE}/${SEARXNG_UWSGI_APP}${uwsgi_variant}")\"" | prefix_stdout " "
|
||||
eval "echo \"$(<"${TEMPLATES}/${uWSGI_APPS_AVAILABLE}/${SEARXNG_UWSGI_APP}${uwsgi_variant}")\"" | prefix_stdout " "
|
||||
echo -e "\n.. END searxng uwsgi-appini $DIST_NAME"
|
||||
|
||||
echo -e "\n.. START nginx socket"
|
||||
echo ".. code:: nginx"
|
||||
echo
|
||||
eval "echo \"$(< "${TEMPLATES}/${NGINX_APPS_AVAILABLE}/${NGINX_SEARXNG_SITE}:socket")\"" | prefix_stdout " "
|
||||
eval "echo \"$(<"${TEMPLATES}/${NGINX_APPS_AVAILABLE}/${NGINX_SEARXNG_SITE}:socket")\"" | prefix_stdout " "
|
||||
echo -e "\n.. END nginx socket"
|
||||
|
||||
echo -e "\n.. START nginx http"
|
||||
echo ".. code:: nginx"
|
||||
echo
|
||||
eval "echo \"$(< "${TEMPLATES}/${NGINX_APPS_AVAILABLE}/${NGINX_SEARXNG_SITE}")\"" | prefix_stdout " "
|
||||
eval "echo \"$(<"${TEMPLATES}/${NGINX_APPS_AVAILABLE}/${NGINX_SEARXNG_SITE}")\"" | prefix_stdout " "
|
||||
echo -e "\n.. END nginx http"
|
||||
|
||||
echo -e "\n.. START apache socket"
|
||||
echo ".. code:: apache"
|
||||
echo
|
||||
eval "echo \"$(< "${TEMPLATES}/${APACHE_SITES_AVAILABLE}/${APACHE_SEARXNG_SITE}:socket")\"" | prefix_stdout " "
|
||||
eval "echo \"$(<"${TEMPLATES}/${APACHE_SITES_AVAILABLE}/${APACHE_SEARXNG_SITE}:socket")\"" | prefix_stdout " "
|
||||
echo -e "\n.. END apache socket"
|
||||
|
||||
echo -e "\n.. START apache http"
|
||||
echo ".. code:: apache"
|
||||
echo
|
||||
eval "echo \"$(< "${TEMPLATES}/${APACHE_SITES_AVAILABLE}/${APACHE_SEARXNG_SITE}")\"" | prefix_stdout " "
|
||||
eval "echo \"$(<"${TEMPLATES}/${APACHE_SITES_AVAILABLE}/${APACHE_SEARXNG_SITE}")\"" | prefix_stdout " "
|
||||
echo -e "\n.. END apache http"
|
||||
)
|
||||
done
|
||||
|
||||
}
|
||||
|
||||
# ----------------------------------------------------------------------------
|
||||
main "$@"
|
||||
# ----------------------------------------------------------------------------
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue