velero_backup() { #name=$(date +%y%m%d.%h%m) && velero create backup $name --exclude-namespaces velero --wait && velero backup logs $name' name=$(date +%y%m%d.%H%M) velero create backup $name --exclude-namespaces velero --wait velero backup logs $name } export PROJECTS="${PROJECTS:-$HOME/projects}" ## STACKSPIN export STACKSPIN="${STACKSPIN:-$PROJECTS/stackspin}" _stackspin_cluster_cache=/var/tmp/stackspin-cluster # Stackspin CLI Wrapper: # Initialize once with "stack select example.org", # then it loads the last selected cluster on startup. # Presumes a mapping like the following in your ssh config: # Host example.org # Hostname [IP] # This is a function so it can change directory. stack() { cmdname=${FUNCNAME:-$0} local pod_suffix='-\(0\|[0-f]\+\)' if test $# -lt 1; then builtin cd "$STACKSPIN" || cd /mnt/b/media/backups/servers/stackspin/2310_stackspin echo "Usage: $cmdname [args...]" echo "Stackspin commands: select, sso, user, push" echo "Kubepod commands: pod, exec, app, shell, ls, logs, upload" echo "App commands: occ, vikunja" return 1 fi local command="$1" shift case "$command" in # stackspin administration (select) export _cluster_name="$1" export _cluster_ip="$(ssh -G "$_cluster_name" | grep --max-count 1 "^hostname " | cut -d " " -f2-)" export CLUSTER_DIR="$STACKSPIN/clusters/$_cluster_name" export KUBECONFIG="$CLUSTER_DIR/kube_config_cluster.yml" # Uncomment the line below to always use the main stackspin repo, even when running in a fork. #export GITLAB_CI="true" echo Selected Stackspin cluster "$_cluster_name" with IP "$_cluster_ip" echo "$_cluster_name" >"$_stackspin_cluster_cache" || true #test "$PWD" = "$HOME" && builtin cd "$STACKSPIN" ;; (activate) test -d "$STACKSPIN" && . $STACKSPIN/env/bin/activate ;; (sso) "$cmdname" exec dashboard-backend -- flask "$@";; (users) if test "$1" = "delete" then shift for arg do "$cmdname" user delete "$arg" done elif test $# -gt 0 then for arg do "$cmdname" user show $arg done else "$cmdname" users $("$cmdname" user list | sed 's|.*<\(.*\)>.*|\1|') fi;; (user|app) if test "$1" = "init" then test $# -gt 2 || { echo "$0 $command $1 MAIL NAME"; return 2; } mail="$2" shift 2 "$cmdname" user create "$mail" && "$cmdname" user update "$mail" name "$*" && echo "Initialized user '$*' with email '$mail'" else "$cmdname" sso cli "$command" "$@" fi;; (invite) ( # Mail invitation to new users test $# -gt 0 || { printf "$0 $command MAIL [NAME] [TEMPLATE]\nName can be omitted if mail is firstname.lastname@domain\n"; return 2; }; export mail=$1 export name=${2:-$(echo $mail | sed -E 's/(.*)\.(.*)@.*/\u\1 \u\2/' )} #echo "$mail,$name" stack user init "$mail" "$name" stack-invite "$3" );; (push) test -f "$1" && $EDITOR "$1" # Allow force: https://open.greenhost.net/xeruf/stackspout/-/settings/repository#js-protected-branches-settings git commit "$@" git push && git push greenhost && # FIXME remove flux reconcile source git -n flux-system "$(basename $(git rev-parse --show-toplevel))" flux reconcile kustomization -n flux-system "$(basename $(git rev-parse --show-toplevel))" ;; # FLUX (flux) case "$1" in (env) # Apply changes to .flux.env kubectl apply -k "$CLUSTER_DIR" flux reconcile -n flux-system kustomization velero flux get -A kustomizations --no-header | awk -F' ' '{system("flux reconcile -n " $1 " kustomization " $2)}' ;; esac ;; (reconcile) app=$1 namespace=${2:-stackspout} if flux suspend helmrelease -n $namespace $app then flux resume helmrelease -n $namespace $app else flux suspend helmrelease -n stackspin-apps $app flux resume helmrelease -n stackspin-apps $app fi flux suspend kustomization $app flux resume kustomization $app ;; (edit) # Edit the URL for an application app=$1 kubectl edit configmap -n flux-system stackspin-$app-kustomization-variables "$0" reconcile $app ;; # Velero (restic) ( namespace=stackspin case $1 in (-n|--namespace) namespace=$2; shift 2;; esac source $CLUSTER_DIR/.flux.env || return $? export RESTIC_REPOSITORY="s3:${backup_s3_url}/${backup_s3_bucket}/${backup_s3_prefix}/restic/$namespace" export AWS_ACCESS_KEY_ID="${backup_s3_aws_access_key_id}" export AWS_SECRET_ACCESS_KEY="${backup_s3_aws_secret_access_key}" export RESTIC_PASSWORD="$(kubectl get secret -n velero velero-repo-credentials -o jsonpath='{.data.repository-password}' | base64 -d)" restic "$@" ) ;; (backup) backupname=$(date +%y%m%d.%H%m) velero create backup $backupname --exclude-namespaces velero --wait velero backup logs $backupname;; (restore) if test $# -lt 2 then echo "$0 $command [namespace]" echo "Recent Backups:" velero backup get | grep Completed | awk '{print $5 "\t" $1}' | sort -r | head -9 return 1 fi backup=$1; app=$2 namespace=${3:-stackspin-apps} # TODO automatically handle stackspout apps restore="${backup}-$app-$(date +%s)" if test "$app" = dashboard then kust=single-sign-on hr="$kust-database" namespace=stackspin else hr="$app" kust="$app" fi flux suspend kustomization $kust flux suspend helmrelease -n $namespace $hr kubectl delete all -n $namespace -l stackspin.net/backupSet=$app kubectl delete secret -n $namespace -l stackspin.net/backupSet=$app kubectl delete configmap -n $namespace -l stackspin.net/backupSet=$app kubectl delete pvc -n $namespace -l stackspin.net/backupSet=$app velero restore create $restore --from-backup=$backup -l stackspin.net/backupSet=$app echo "Waiting a few seconds for $app backup to restore..." sleep 10 local readresult while test -z "$readresult" do velero restore describe $restore echo "Press enter to check again, any text if backup is ready to resume flux resources:" read readresult done test $app = dashboard && kubectl delete secret -n stackspin hydra && flux reconcile helmrelease -n stackspin hydra flux resume helmrelease -n $namespace $hr # TODO timeout flux resume kustomization $kust ;; (restore-pvc) test $# -lt 1 && echo "$0 $command [dir]" >&2 && return 1 local app=$1 if test -d "$2" then dir="$2" target=$(ssh "$_cluster_name" find /var/lib/Stackspin/local-storage/ -maxdepth 1 -name "*$app") test -z "$target" && echo "No target found for ${app}" && return 1 ssh "$_cluster_name" mv -v "$target" "$target.$(date +%s)" rsync --links --hard-links --times --recursive --info=progress2,remove,symsafe,flist,del --human-readable "$dir/" "$_cluster_name:$target/" else for vol in $(ls -d pvc*$app* | cut -d_ -f3 | sort) do "$cmdname" restore-pvc $vol $(find -maxdepth 1 -name "*$vol") done fi ;; # KUBE # app clis (occ) "$cmdname" exec nc-nextcloud -c nextcloud -it -- su www-data -s /bin/bash -c "php $command $*";; (zulip) "$cmdname" exec zulip -- su zulip -c "/home/zulip/deployments/current/manage.py $*";; (vikunja*) local pod=$command case "$1" in (dump|export) cd "$PROJECTS/vikunja" "$cmdname" exec "$pod-api" -- \ sh -c 'rm -f *.zip && ./vikunja dump >/dev/null && ls --color -lAhF >&2 && cat *.zip' >"$pod-dump_$(date +%F).zip" ;; (restore) if ! test -f "$2" then echo "Usage: $0 vikunja[suffix] restore " >&2 return 2 fi file=$2 "$cmdname" upload "$pod-api" "$file" "$cmdname" exec "$pod-api" -it -- ./vikunja restore "$file" ;; (psql) kubectl exec -it -n $("$cmdname" pod "$pod-postgresql") -- \ sh -c "PGPASSWORD=$(kubectl get secret --namespace stackspout $pod-postgresql -o jsonpath='{.data.password}' | base64 --decode) psql -h localhost -U vikunja -p 5432 vikunja";; (*) echo "Unknown $command subcommand: dump, restore, psql";; esac ;; (psql) local app=$1 shift case "$1" in (restore) shift file=$1 db=${2:-$app} "$cmdname" upload "$app-postgresql" "$file" "$cmdname" psql "$app" exec createdb "$db" stack psql "$app" "$db" -f \ $(kubectl describe pod -n $(stack pod "$app-postgresql") | grep "from data" | awk '{print $1}')/$file ;; (exec) command="$2" shift 2 kubectl exec -it -n $("$cmdname" pod "$app-postgresql") -- sh -c "PGPASSWORD=$(kubectl get secret --namespace stackspout $app-postgresql -o jsonpath='{.data.password}' | base64 --decode) $command -h localhost -U $app -p 5432 $*" ;; (*) "$cmdname" psql "$app" exec psql "$@" ;; esac;; (maria) local app=$1 local pw="$(kubectl get secret -n flux-system stackspin-$app-variables --template '{{.data.mariadb_password}}' | base64 -d 2>/dev/null || kubectl get secret -n flux-system stackspin-$app-variables --template "{{.data.${app}_mariadb_password}}" | base64 -d)" case $app in (nextcloud) n=nc-mariadb;; (wordpress) n=wordpress-database;; (*) n=$app-mariadb;; esac "$cmdname" exec $n -it -- env "MYSQL_PWD=$pw" mysql -u $app "$@" ;; (mariar) name="$1-mariadb" shift pod="$(kubectl get secret --all-namespaces -o=custom-columns=S:.metadata.namespace,N:.metadata.name --no-headers | grep --color=never -- "$name")" && "$cmdname" exec "$name" -it -- env "MYSQL_PWD=$(kubectl get secret -n $pod -o jsonpath='{.data.mariadb-root-password}' | base64 -d)" mysql -u root "$@" ;; # high-level (list) flux get all | grep "$1" kubectl get all -A | grep "$1";; (shell) local container=$1 shift test "$1" = "-c" && pod=$2 && shift 2 "$cmdname" exec "$container" -c "$pod" -it -- /bin/sh "$@";; (ls) if test $# -gt 1 && ! [[ "$2" =~ ".*/.*" ]] then "$cmdname" exec "$1" "$2" "$3" -it -- ls -lAhF --group-directories-first "${@:4}" else for container in $("$cmdname" kube get "$1" pod -o "jsonpath={.spec.containers[*].name}") do highlight "Listing content of $container" && "$cmdname" ls "$1" -c "$container" "${@:2}" done fi;; (upload) kubectl cp "$2" -n $("$cmdname" pod "$1$pod_suffix"):$2 "${@:3}" "$cmdname" ls "$1" "${@:3}";; (exec) "$cmdname" kube exec "$@";; (logs) podname=$1 shift "$cmdname" kube logs "$podname" -f | $(command which ${LOGPAGER:-lnav} || { which bat >/dev/null && echo "bat --number -l toml" } || echo 'less -RF') "$@";; # low-level (kube) test $# -gt 1 || { echo "Please provide a command and pod name" >&2 && return 1; } local pods=$("$cmdname" pod "$2$pod_suffix") || { echo "No pod found for $2" >&2 && return 1; } local subcommand=$1 shift 2 local commands=() for arg do case "$arg" in (-*) break;; (*) commands+="$arg"; shift;; esac done namespacedpod="$pods" #while IFS= read -r namespacedpod; do test "$subcommand" = get || highlight "Running $subcommand on $namespacedpod" >&2 kubectl "$subcommand" "${commands[@]}" -n $namespacedpod "$@" #done <<< "$pods" ;; (pod) test $# -gt 0 && local podname=$1 && shift if ! kubectl get pods --all-namespaces --field-selector="status.phase=Running" -o=custom-columns=S:.metadata.namespace,N:.metadata.name --no-headers "$@" | grep --color=never -- "$podname" then code=$? echo "No pod found for $podname" >&2 return $code fi ;; # stackspin bare (*) if which "$cmdname-$command" >/dev/null 2>&1 then "$cmdname-$command" "$@" return $? fi builtin cd "$STACKSPIN" stack activate # Since the install command can also be given bare to install stackspin itself if test "$command" = "install"; then case "$1" in ([a-z]*) for arg do kubectl exec -n stackspin deploy/dashboard -c backend -- flask cli app install "$arg" done;; (""|-*) python3 -m pip install --upgrade pip python3 -m pip install -r requirements.txt python3 -m stackspin "$@" "$_cluster_name" "$command" cp -nv "install/.flux.env.example" "clusters/$_cluster_name/.flux.env" && $EDITOR "clusters/$_cluster_name/.flux.env" cp -nv install/kustomization.yaml $CLUSTER_DIR/ kubectl get namespace flux-system 2>/dev/null || kubectl create namespace flux-system kubectl apply -k $CLUSTER_DIR ssh "root@${_cluster_name}" mkdir /etc/nftables.d ssh "root@${_cluster_name}" echo 'tcp dport { 2222 } counter accept' | tee /etc/nftables.d/ssh.nft ./install/install-stackspin.sh ;; esac else python3 -m stackspin "$_cluster_name" "$command" "$@" fi;; esac } cat "$_stackspin_cluster_cache" 2>/dev/null | while read cluster; do stack select "$cluster"; done test -z "$DISPLAY" && test "$XDG_VTNR" != 1 || return 0 # The following runs only on headless machines which kubectl >/dev/null || { kubectl() { sudo k3s kubectl "$@"; } && export -f kubectl; } export PATH="$PATH:$HOME/.local/bin/server" test -d "$MUSIC" || export MUSIC="/srv/funkwhale/data/music/janek" test -f "$HOME/.rvm/scripts/rvm" && source "$HOME/.rvm/scripts/rvm" && # Load RVM into a shell session *as a function* rvm use 3.0