authentik/lifecycle/ak

90 lines
3.6 KiB
Plaintext
Raw Normal View History

#!/bin/bash -e
python -m lifecycle.wait_for_db
function log {
printf '{"event": "%s", "level": "info", "logger": "bootstrap"}\n' "$@" > /dev/stderr
}
log "Bootstrap completed"
function check_if_root {
if [[ $EUID -ne 0 ]]; then
log "Not running as root, disabling permission fixes"
$1
return
fi
SOCKET="/var/run/docker.sock"
GROUP="authentik"
if [[ -e "$SOCKET" ]]; then
# Get group ID of the docker socket, so we can create a matching group and
# add ourselves to it
DOCKER_GID=$(stat -c '%g' $SOCKET)
# Ensure group for the id exists
getent group $DOCKER_GID || groupadd -f -g $DOCKER_GID docker
usermod -a -G $DOCKER_GID authentik
# since the name of the group might not be docker, we need to lookup the group id
GROUP_NAME=$(getent group $DOCKER_GID | sed 's/:/\n/g' | head -1)
GROUP="authentik:${GROUP_NAME}"
fi
# Fix permissions of backups and media
chown -R authentik:authentik /media /backups
chpst -u authentik:$GROUP env HOME=/authentik $1
}
function prefixwith {
local prefix="$1"
shift
"$@" > >(sed "s/^/$prefix: /") 2> >(sed "s/^/$prefix (err): /" >&2)
}
function restore {
PG_HOST=$(python -m authentik.lib.config postgresql.host 2> /dev/null)
PG_NAME=$(python -m authentik.lib.config postgresql.name 2> /dev/null)
PG_USER=$(python -m authentik.lib.config postgresql.user 2> /dev/null)
PG_PORT=$(python -m authentik.lib.config postgresql.port 2> /dev/null)
export PGPASSWORD=$(python -m authentik.lib.config postgresql.password 2> /dev/null)
log "Ensuring no one can connect to the database"
prefixwith "psql" psql -h"${PG_HOST}" -U"${PG_USER}" -c"UPDATE pg_database SET datallowconn = 'false' WHERE datname = '${PG_NAME}';" "postgres"
prefixwith "psql" psql -h"${PG_HOST}" -U"${PG_USER}" -c"SELECT pg_terminate_backend(pid) FROM pg_stat_activity WHERE datname = '${PG_NAME}';" "postgres"
log "deleting and re-creating database"
prefixwith "psql" dropdb -h"${PG_HOST}" -U"${PG_USER}" "${PG_NAME}" || trueacku
prefixwith "psql" createdb -h"${PG_HOST}" -U"${PG_USER}" "${PG_NAME}"
log "running initial migrations"
prefixwith "migrate" python -m lifecycle.migrate 2> /dev/null
log "restoring database"
prefixwith "restore" python -m manage dbrestore -i ${@:2}
}
MODE_FILE="/tmp/authentik-mode"
if [[ "$1" == "server" ]]; then
echo "server" > $MODE_FILE
# We only set prometheus_multiproc_dir for serer, as with the worker it just fills up the disk
export prometheus_multiproc_dir=/dev/shm/
python -m lifecycle.migrate
/authentik-proxy
elif [[ "$1" == "worker" ]]; then
echo "worker" > $MODE_FILE
check_if_root "celery -A authentik.root.celery worker --autoscale 3,1 -E -B -s /tmp/celerybeat-schedule -Q authentik,authentik_scheduled,authentik_events"
elif [[ "$1" == "backup" ]]; then
python -m manage dbbackup --clean
elif [[ "$1" == "restore" ]]; then
restore $@
elif [[ "$1" == "bash" ]]; then
/bin/bash
elif [[ "$1" == "test" ]]; then
pip install --no-cache -r requirements-dev.txt
touch /unittest.xml
chown authentik:authentik /unittest.xml
check_if_root "python -m manage test authentik"
elif [[ "$1" == "healthcheck" ]]; then
mode=$(cat $MODE_FILE)
if [[ $mode == "server" ]]; then
curl --user-agent "goauthentik.io lifecycle Healthcheck" -I http://localhost:9000/-/health/ready/
elif [[ $mode == "worker" ]]; then
celery -A authentik.root.celery inspect ping -d celery@$HOSTNAME --timeout 5 -j
fi
else
python -m manage "$@"
fi