442 lines
16 KiB
Text
442 lines
16 KiB
Text
velero_backup() {
|
|
#name=$(date +%y%m%d.%h%m) && velero create backup $name --exclude-namespaces velero --wait && velero backup logs $name'
|
|
name=$(date +%y%m%d.%H%M)
|
|
velero create backup $name --exclude-namespaces velero --wait
|
|
velero backup logs $name
|
|
}
|
|
|
|
export PROJECTS="${PROJECTS:-$HOME/projects}"
|
|
|
|
## STACKSPIN
|
|
export STACKSPIN="${STACKSPIN:-$PROJECTS/stackspin}"
|
|
_stackspin_cluster_cache=/var/tmp/stackspin-cluster
|
|
|
|
# Stackspin CLI Wrapper:
|
|
# Initialize once with "stack select example.org",
|
|
# then it loads the last selected cluster on startup.
|
|
# Presumes a mapping like the following in your ssh config:
|
|
# Host example.org
|
|
# Hostname [IP]
|
|
# This is a function so it can change directory.
|
|
stack() {
|
|
cmdname=${FUNCNAME:-$0}
|
|
local pod_suffix='-\(0\|[0-f]\+\)'
|
|
if test $# -lt 1; then
|
|
builtin cd "$STACKSPIN" || cd /mnt/b/media/backups/servers/stackspin/2310_stackspin
|
|
echo "Usage: $cmdname <COMMAND> [args...]"
|
|
echo "Stackspin commands: select, sso, user, push"
|
|
echo "Kubepod commands: pod, exec, app, shell, ls, logs, upload"
|
|
echo "App commands: occ, vikunja"
|
|
return 1
|
|
fi
|
|
local command="$1"
|
|
shift
|
|
case "$command" in
|
|
# stackspin administration
|
|
(select)
|
|
export _cluster_name="$1"
|
|
export _cluster_ip="$(ssh -G "$_cluster_name" | grep --max-count 1 "^hostname " | cut -d " " -f2-)"
|
|
export CLUSTER_DIR="$STACKSPIN/clusters/$_cluster_name"
|
|
export KUBECONFIG="$CLUSTER_DIR/kube_config_cluster.yml"
|
|
# Uncomment the line below to always use the main stackspin repo, even when running in a fork.
|
|
#export GITLAB_CI="true"
|
|
echo Selected Stackspin cluster "$_cluster_name" with IP "$_cluster_ip"
|
|
echo "$_cluster_name" >"$_stackspin_cluster_cache" || true
|
|
#test "$PWD" = "$HOME" && builtin cd "$STACKSPIN"
|
|
;;
|
|
(activate)
|
|
test -d "$STACKSPIN" && . "$STACKSPIN/env/bin/activate"
|
|
test $# -gt 0 && "$cmdname" select "$@"
|
|
;;
|
|
(setup) # https://docs.stackspin.net/en/latest/installation/install_cli.html
|
|
cd "$STACKSPIN" &&
|
|
python3 -m venv env &&
|
|
"$cmdname" activate
|
|
;;
|
|
(override)
|
|
cd "$STACKSPIN/../stackspout/overrides"
|
|
name=stackspin-$1-override
|
|
echo "apiVersion: v1
|
|
kind: ConfigMap
|
|
metadata:
|
|
namespace: stackspin-apps
|
|
name: $name
|
|
data:
|
|
values.yaml: |" >"$name.yaml"
|
|
$EDITOR "$name.yaml"
|
|
;;
|
|
(sso) "$cmdname" exec dashboard-backend -- flask "$@";;
|
|
(users)
|
|
if test "$1" = "delete"
|
|
then shift
|
|
for arg
|
|
do "$cmdname" user delete "$arg"
|
|
done
|
|
elif test $# -gt 0
|
|
then
|
|
for arg
|
|
do "$cmdname" user show $arg
|
|
done
|
|
else "$cmdname" users $("$cmdname" user list | sed 's|.*<\(.*\)>.*|\1|')
|
|
fi;;
|
|
(user|app)
|
|
if test "$1" = "init"
|
|
then test $# -gt 2 || { echo "Usage: $cmdname $command $1 MAIL NAME"; return 2; }
|
|
mail="$2"
|
|
shift 2
|
|
"$cmdname" user create "$mail" &&
|
|
"$cmdname" user update "$mail" name "$*" &&
|
|
echo "Initialized user '$*' with email '$mail'"
|
|
else "$cmdname" sso cli "$command" "$@"
|
|
fi;;
|
|
(invite) (
|
|
# Mail invitation to new users
|
|
test $# -gt 0 || { printf "Usage: $cmdname $command MAIL [NAME] [TEMPLATE]\nName can be omitted if mail is firstname.lastname@domain\n"; return 2; };
|
|
export mail=$1
|
|
export name=${2:-$(echo $mail | sed -E 's/(.*)\.(.*)@.*/\u\1 \u\2/' )}
|
|
#echo "$mail,$name"
|
|
stack user init "$mail" "$name"
|
|
stack-invite "$3"
|
|
);;
|
|
(push)
|
|
test -f "$1" && $EDITOR "$1"
|
|
# Allow force: https://open.greenhost.net/xeruf/stackspout/-/settings/repository#js-protected-branches-settings
|
|
git commit "$@"
|
|
git push &&
|
|
git push greenhost && # FIXME remove
|
|
flux reconcile source git -n flux-system "$(basename $(git rev-parse --show-toplevel))"
|
|
flux reconcile kustomization -n flux-system "$(basename $(git rev-parse --show-toplevel))"
|
|
;;
|
|
# FLUX
|
|
(flux)
|
|
case "$1" in
|
|
(env) # Apply changes to .flux.env
|
|
kubectl apply -k "$CLUSTER_DIR"
|
|
flux reconcile -n flux-system kustomization velero
|
|
flux get -A kustomizations --no-header | awk -F' ' '{system("flux reconcile -n " $1 " kustomization " $2)}'
|
|
;;
|
|
esac
|
|
;;
|
|
(reconcile)
|
|
local app=$1
|
|
local namespace=${2:-stackspout}
|
|
shift $(expr $# \& $# \< 2 \| 2)
|
|
if flux suspend helmrelease -n $namespace $app "$@"
|
|
then flux resume helmrelease -n $namespace $app "$@"
|
|
else flux suspend helmrelease -n stackspin-apps $app "$@"
|
|
flux resume helmrelease -n stackspin-apps $app "$@"
|
|
fi
|
|
flux suspend kustomization $app "$@"
|
|
flux resume kustomization $app "$@"
|
|
;;
|
|
(edit)
|
|
# Edit the URL for an application
|
|
local app=$1
|
|
kubectl edit configmap -n flux-system stackspin-$app-kustomization-variables
|
|
"$cmdname" reconcile $app
|
|
;;
|
|
# Velero
|
|
(restic)
|
|
(
|
|
namespace=stackspin
|
|
case $1 in (-n|--namespace) namespace=$2; shift 2;; esac
|
|
source $CLUSTER_DIR/.flux.env || return $?
|
|
export RESTIC_REPOSITORY="s3:${backup_s3_url}/${backup_s3_bucket}/${backup_s3_prefix}/restic/$namespace"
|
|
export AWS_ACCESS_KEY_ID="${backup_s3_aws_access_key_id}"
|
|
export AWS_SECRET_ACCESS_KEY="${backup_s3_aws_secret_access_key}"
|
|
export RESTIC_PASSWORD="$(kubectl get secret -n velero velero-repo-credentials -o jsonpath='{.data.repository-password}' | base64 -d)"
|
|
restic "$@"
|
|
)
|
|
;;
|
|
(backup)
|
|
if test $# -gt 0
|
|
then velero "$@"
|
|
else
|
|
local backupname=$(date +%y%m%d.%H%m)
|
|
velero create backup $backupname --exclude-namespaces velero --wait
|
|
velero backup logs $backupname
|
|
fi;;
|
|
(restore)
|
|
if test $# -lt 2
|
|
then echo "Usage: $cmdname $command <backup> <app> [namespace]"
|
|
echo "Recent Completed Backups:"
|
|
velero backup get | grep Completed | awk '{print $5 "\t" $1}' | sort -r | head -9
|
|
return 1
|
|
fi
|
|
local backup=$1; app=$2
|
|
local namespace=${3:-stackspin-apps} # TODO automatically handle stackspout apps
|
|
local restore="${backup}-$app-$(date +%s)"
|
|
if test "$app" = dashboard
|
|
then kust=single-sign-on
|
|
hr="$kust-database"
|
|
namespace=stackspin
|
|
else hr="$app"
|
|
kust="$app"
|
|
fi
|
|
flux suspend kustomization $kust
|
|
flux suspend helmrelease -n $namespace $hr
|
|
(
|
|
test $# -gt 3 && shift 3 || shift $#
|
|
set -e
|
|
kubectl delete all -n $namespace -l stackspin.net/backupSet=$app
|
|
kubectl delete secret -n $namespace -l stackspin.net/backupSet=$app
|
|
kubectl delete configmap -n $namespace -l stackspin.net/backupSet=$app
|
|
kubectl delete pvc -n $namespace -l stackspin.net/backupSet=$app
|
|
echo "Creating $app velero restore..."
|
|
velero restore create "$restore" --from-backup=$backup --selector stackspin.net/backupSet=$app --existing-resource-policy update "$@"
|
|
velero restore create "$restore-secrets" --from-backup=$backup --include-resources Secret --selector kustomize.toolkit.fluxcd.io/name=$app-secrets --existing-resource-policy update
|
|
echo "Waiting a few seconds for $app backup restore to start..."
|
|
sleep 10
|
|
local readresult
|
|
while test -z "$readresult"
|
|
do velero restore describe $restore
|
|
echo "Press enter to check again, any text if backup is ready to resume flux resources:"
|
|
read readresult
|
|
done
|
|
test $app = dashboard &&
|
|
"$cmdname" mariar stackspin-database hydra -e 'DELETE FROM hydra_client;' &&
|
|
kubectl delete secret -n stackspin hydra &&
|
|
flux reconcile helmrelease -n stackspin hydra
|
|
)
|
|
flux resume helmrelease -n $namespace $hr # TODO timeout
|
|
flux resume kustomization $kust
|
|
;;
|
|
(restore-pvc)
|
|
# Restore PVC for app from local directory
|
|
test $# -lt 1 && echo "Usage: $cmdname $command <app> [dir]" >&2 && return 1
|
|
local app=$1
|
|
if test -d "$2"
|
|
then dir="$2"
|
|
target=$(ssh "$_cluster_name" find /var/lib/Stackspin/local-storage/ -maxdepth 1 -name "*$app")
|
|
test -z "$target" && echo "No target found for ${app}" && return 1
|
|
ssh "$_cluster_name" mv -v "$target" "$target.$(date +%s)"
|
|
rsync --links --hard-links --times --recursive --info=progress2,remove,symsafe,flist,del --human-readable "$dir/" "$_cluster_name:$target/"
|
|
else
|
|
for vol in $(ls -d pvc*$app* | cut -d_ -f3 | sort)
|
|
do "$cmdname" restore-pvc $vol $(find -maxdepth 1 -name "*$vol")
|
|
done
|
|
fi
|
|
;;
|
|
# KUBE
|
|
# app clis
|
|
(nc-apps)
|
|
"$cmdname" reconcile nextcloud stackspin-apps --timeout 2m
|
|
kubectl get -n stackspin-apps configmap nc-setup-apps -o jsonpath='{.data.setup-apps\.sh}' | kubectl exec -i -n $(kubectl get pods --all-namespaces --field-selector="status.phase=Running" -o=custom-columns=S:.metadata.namespace,N:.metadata.name --no-headers "$@" | grep nextcloud) -- /bin/bash
|
|
;;
|
|
(occ) "$cmdname" exec nc-nextcloud -c nextcloud -it -- su www-data -s /bin/bash -c "php $command $*";;
|
|
(zulip) "$cmdname" exec zulip -- su zulip -c "/home/zulip/deployments/current/scripts/$* || /home/zulip/deployments/current/manage.py $*";;
|
|
(vikunja*)
|
|
local pod=$command
|
|
poddir="$(basename "$CLUSTER_DIR"):$(kubectl describe pv -n stackspout | grep _$pod-files | awk '{print $2}')"
|
|
case "$1" in
|
|
(dump|export) cd "$PROJECTS/vikunja"
|
|
filename="$pod-dump_$(date +%F).zip"
|
|
if ! "$cmdname" exec "$pod-api" -- sh -c 'rm -f *.zip && ./vikunja dump >/dev/null && ls --color -lAhF >&2 && cat *.zip' >"$filename"
|
|
then "$cmdname" exec "$pod" -- ./vikunja dump -p /app/vikunja/files &&
|
|
scp "$poddir/*.zip" "$filename"
|
|
fi &&
|
|
(
|
|
cur="$PWD/${filename}"
|
|
cd "$(mktemp -d --suffix=_$filename)"
|
|
unzip "$cur"
|
|
zip "${cur}_noconfig.zip" VERSION .env database/*
|
|
)
|
|
;;
|
|
(restore)
|
|
file=$2
|
|
if ! test -f "$file"
|
|
then echo "Usage: $cmdname vikunja[suffix] restore <file>" >&2
|
|
else
|
|
scp "$file" "$poddir/"
|
|
#"$cmdname" upload "$pod" "$file"
|
|
fi
|
|
"$cmdname" exec "$pod" -it -- ./vikunja restore "/app/vikunja/files/$(basename $file)"
|
|
;;
|
|
(psql)
|
|
kubectl exec -it -n $("$cmdname" pod "$pod-postgresql") -- \
|
|
sh -c "PGPASSWORD=$(kubectl get secret --namespace stackspout $pod-postgresql -o jsonpath='{.data.password}' | base64 --decode) psql -h localhost -U vikunja -p 5432 vikunja";;
|
|
(*) echo "Unknown $command subcommand: dump, restore, psql";;
|
|
esac
|
|
;;
|
|
(psql)
|
|
local app=$1
|
|
shift
|
|
case "$1" in
|
|
(restore)
|
|
shift
|
|
file=$1
|
|
db=${2:-$app}
|
|
"$cmdname" upload "$app-postgresql" "$file"
|
|
"$cmdname" psql "$app" exec createdb "$db"
|
|
stack psql "$app" "$db" -f \
|
|
$(kubectl describe pod -n $(stack pod "$app-postgresql") | grep "from data" | awk '{print $1}')/$file
|
|
;;
|
|
(exec) command="$2"
|
|
shift 2
|
|
echo '\\du+ (users) \\l (databases, then \\c DATABASE) \\dt (tables, then \\d+ TABLE)'
|
|
kubectl exec -it -n $("$cmdname" pod "$app-postgresql") -- sh -c "PGPASSWORD=$(kubectl get secret --namespace stackspout $app-postgresql -o jsonpath='{.data.password}' | base64 --decode) $command -h localhost -U $(echo $app | cut -d- -f1) -p 5432 $*"
|
|
;;
|
|
(*)
|
|
"$cmdname" psql "$app" exec psql "$@"
|
|
;;
|
|
esac;;
|
|
(maria)
|
|
local app=$1
|
|
local pw="$(kubectl get secret -n flux-system stackspin-$app-variables --template '{{.data.mariadb_password}}' | base64 -d 2>/dev/null ||
|
|
kubectl get secret -n flux-system stackspin-$app-variables --template "{{.data.${app}_mariadb_password}}" | base64 -d)"
|
|
case $app in
|
|
(nextcloud) n=nc-mariadb;;
|
|
(wordpress) n=wordpress-database;;
|
|
(*) n=$app-mariadb;;
|
|
esac
|
|
"$cmdname" exec $n -it -- env "MYSQL_PWD=$pw" mysql -u "$app" "$@"
|
|
;;
|
|
(mariar)
|
|
local app="$1"
|
|
shift
|
|
kubesecret="$(kubectl get secret --all-namespaces -o=custom-columns=S:.metadata.namespace,N:.metadata.name --no-headers | command grep -- " $app")" ||
|
|
{ echo "Cannot find database pod"; return 2; }
|
|
local db=${kubesecret##*-}
|
|
local databasepod="$app-$db"
|
|
local mysql_pwd="$(kubectl get secret -n $kubesecret -o "jsonpath={.data.${db}-root-password}" | base64 -d)"
|
|
local execline="$cmdname exec $databasepod -it -- env MYSQL_PWD=$mysql_pwd"
|
|
if test "$1" = dump
|
|
then
|
|
dumpfile="$(date +%y%m%d)_${databasepod}"
|
|
echo "Dumping into $dumpfile..."
|
|
$execline mysqldump --extended-insert=FALSE -u root --all-databases >"${dumpfile}.sql"
|
|
$execline mysqldump --no-data -u root --all-databases >"${dumpfile}_schema.sql"
|
|
else echo "MySQL Reference --- SHOW databases; USE <db>; SHOW tables; DESC <table>;"
|
|
$execline mysql --silent -u root "${@:-${app%-database}}"
|
|
fi
|
|
;;
|
|
# high-level
|
|
(all)
|
|
case $1 in (-*);; (*) args="-A -l stackspin.net/backupSet=$1";; esac
|
|
kubectl api-resources --verbs=list --namespaced -o name | xargs -n 1 kubectl get -o custom-columns=KIND:.kind,NS:.metadata.namespace,NAME:.metadata.name,AGE:.metadata.creationTimestamp --no-headers ${args:-$@}
|
|
;;
|
|
(list)
|
|
flux get all | grep --color=never "$1"
|
|
kubectl get all -A --no-headers | grep --color=never "$1"
|
|
;;
|
|
(shell)
|
|
local container=$1
|
|
shift
|
|
test "$1" = "-c" && pod=$2 && shift 2
|
|
"$cmdname" exec "$container" -c "$pod" -it -- /bin/sh "$@"
|
|
;;
|
|
(ls)
|
|
if test $# -gt 1 && ! [[ "$2" =~ ".*/.*" ]]
|
|
then "$cmdname" exec "$1" "$2" "$3" -it -- ls -lAhF --group-directories-first "${@:4}"
|
|
else for container in $("$cmdname" kube get "$1" pod -o "jsonpath={.spec.containers[*].name}")
|
|
do highlight "Listing content of $container" &&
|
|
"$cmdname" ls "$1" -c "$container" "${@:2}"
|
|
done
|
|
fi;;
|
|
(upload)
|
|
kubectl cp "$2" -n $("$cmdname" pod "$1$pod_suffix"):$2 "${@:3}"
|
|
"$cmdname" ls "$1" "${@:3}";;
|
|
(exec) "$cmdname" kube exec "$@";;
|
|
(logs) test $# -gt 0 || { echo "$cmdname $command <pod> [lnav-args...]"; return; }
|
|
podname="$1" || return $?
|
|
shift
|
|
"$cmdname" kube logs "$podname" -f | $(command which ${LOGPAGER:-lnav} || { which bat >/dev/null && echo "bat --number -l toml" } || echo 'less -RF') "$@" ||
|
|
kubectl describe pod -n $("$cmdname" pod "$podname" --field-selector='')
|
|
;;
|
|
# low-level
|
|
(kube)
|
|
test $# -gt 1 || { echo "Please provide a command and pod name" >&2; return 1; }
|
|
local pods="$("$cmdname" pod "$2$pod_suffix")"
|
|
test -n "$pods" || return $?
|
|
local subcommand=$1
|
|
shift 2
|
|
local commands=()
|
|
for arg
|
|
do case "$arg" in (-*) break;; (*) commands+="$arg"; shift;; esac
|
|
done
|
|
if test $(echo "$pods" | wc -l) -gt 1
|
|
then
|
|
for namespacedpod in "${(f)pods}"
|
|
do test "$subcommand" = get ||
|
|
highlight "Running $subcommand on $namespacedpod" >&2
|
|
kubectl "$subcommand" "${commands[@]}" -n $namespacedpod "$@"
|
|
done
|
|
else
|
|
namespacedpod="$pods"
|
|
test "$subcommand" = get ||
|
|
highlight "Running $subcommand on $namespacedpod" >&2
|
|
kubectl "$subcommand" "${commands[@]}" -n $namespacedpod "$@"
|
|
fi
|
|
;;
|
|
(pod)
|
|
test $# -gt 0 && local podname=$1 && shift
|
|
if ! kubectl get pods --all-namespaces --field-selector="status.phase=Running" -o=custom-columns=S:.metadata.namespace,N:.metadata.name --no-headers "$@" | grep --color=never -- "$podname"
|
|
then
|
|
code=$?
|
|
echo "No running pod found for $podname" >&2
|
|
return $(expr $code \| 1)
|
|
fi
|
|
;;
|
|
(clean-pods)
|
|
kubectl get pods --all-namespaces --no-headers |
|
|
egrep -iv 'PodInitializing|Running|ContainerCreating|Terminating' |
|
|
awk '{print "--namespace=" $1 " " $2}' |
|
|
xargs -L 1 kubectl delete pod;;
|
|
# stackspin bare
|
|
(upgrade)
|
|
"$cmdname" backup
|
|
git switch v2
|
|
git pull upstream v2
|
|
git push
|
|
flux resume source git stackspin
|
|
;;
|
|
(*) if which "$cmdname-$command" >/dev/null 2>&1
|
|
then "$cmdname-$command" "$@"
|
|
return $?
|
|
fi
|
|
builtin cd "$STACKSPIN"
|
|
stack activate
|
|
# Since the install command can also be given bare to install stackspin itself
|
|
if test "$command" = "install"; then
|
|
case "$1" in
|
|
([a-z]*)
|
|
for arg
|
|
do kubectl exec -n stackspin deploy/dashboard-backend -- flask cli app install "$arg"
|
|
done;;
|
|
(""|-*)
|
|
python -m pip install --upgrade pip
|
|
python -m pip install -r requirements.txt
|
|
python -m stackspin "$@" "${_cluster_name}" "$command"
|
|
cp -nv "install/.flux.env.example" "clusters/$_cluster_name/.flux.env" &&
|
|
$EDITOR "clusters/$_cluster_name/.flux.env"
|
|
cp -nv install/kustomization.yaml $CLUSTER_DIR/
|
|
kubectl get namespace flux-system 2>/dev/null || kubectl create namespace flux-system
|
|
kubectl apply -k $CLUSTER_DIR
|
|
|
|
ssh "root@${_cluster_name}" mkdir /etc/nftables.d
|
|
ssh "root@${_cluster_name}" "echo 'tcp dport { 2222 } counter accept' | tee /etc/nftables.d/ssh.nft"
|
|
ssh "root@${_cluster_name}" "echo 'udp dport { 22222 } counter accept' | tee /etc/nftables.d/mosh.nft"
|
|
|
|
./install/install-stackspin.sh
|
|
;;
|
|
esac
|
|
else python3 -m stackspin "$_cluster_name" "$command" "$@"
|
|
fi;;
|
|
esac
|
|
}
|
|
|
|
cat "$_stackspin_cluster_cache" 2>/dev/null |
|
|
while read cluster; do stack select "$cluster"; done
|
|
|
|
|
|
# Run following code only on headless machines
|
|
test -z "$DISPLAY" && test "$XDG_VTNR" != 1 && ! pgrep -qx "SystemUIServer" || return 0
|
|
|
|
which kubectl >/dev/null ||
|
|
{ kubectl() { sudo k3s kubectl "$@"; } && export -f kubectl; }
|
|
|
|
export PATH="$PATH:$HOME/.local/bin/server"
|
|
|
|
test -f "$HOME/.rvm/scripts/rvm" &&
|
|
source "$HOME/.rvm/scripts/rvm" && # Load RVM into a shell session *as a function*
|
|
rvm use 3.0
|