first general deploy doc

parent cdb90493
Pipeline #39079 failed with stages
in 47 minutes and 23 seconds
# NOMAD deployments
This in information on how to deploy the archive and analytics toolkit.
Some information is a bit specific to our own production and development systems.
In general scripts create files to do the installation and print out commands.
For security reasons they do not execute the deployment themselves.
You might want to do it manually, or deploy only part of the system.
## deploy/kubernetes
## deploy/base
contains info for deploying the basic infrastructure on the top of kubernetes (execute the baseSetup.sh script)
## deploy/frontend
## deploy/api
## deploy/container-manager
## Machine specific hints:
#!/bin/bash
nomadRoot=${nomadRoot:-/nomad/nomadlab}
updateDeploy=1
target_hostname=${target_hostname:-$HOSTNAME}
chownRoot=
tls=
secretWebCerts=
while test ${#} -gt 0
do
case "$1" in
--tls)
tls=--tls
;;
--secret-web-certs)
shift
secretWebCerts=${1:-web-certs}
;;
--target-hostname)
shift
target_hostname=$1
;;
--nomad-root)
shift
nomadRoot=$1
;;
--chown-root)
shift
chownRoot=$1
;;
*)
echo "usage: $0 [--tls] [--nomad-root <pathToNomadRoot>] [--chown-root <pathForPrometheusVolumes>] [--target-hostname hostname]"
echo
echo "Env variables: target_hostname, nomadRoot"
exit 0
;;
esac
shift
done
chownRoot=${chownRoot:-$nomadRoot/servers/$target_hostname}
echo "# Initial setup"
echo "To make kubectl work, for example for the test kubernetes"
echo " export KUBECONFIG=/etc/kubernetes/admin.conf"
echo "# Helm install"
if [ -n updateDeploy ]; then
cat > helm-tiller-serviceaccount.yaml <<EOF
apiVersion: v1
kind: ServiceAccount
metadata:
name: tiller
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: tiller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: tiller
namespace: kube-system
EOF
cat > prometheus-alertmanager-volume.yaml <<EOF
apiVersion: v1
kind: PersistentVolume
metadata:
name: prometheus-alertmanager
spec:
capacity:
storage: 5Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Recycle
storageClassName: manual-alertmanager
hostPath:
path: $chownRoot/prometheus/alertmanager-volume
EOF
cat > prometheus-server-volume.yaml <<EOF
apiVersion: v1
kind: PersistentVolume
metadata:
name: prometheus-server
spec:
capacity:
storage: 16Gi
storageClassName: manual-prometheus
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Recycle
hostPath:
path: $chownRoot/prometheus/server-volume
EOF
cat > prometheus-values.yaml <<EOF
alertmanager:
persistentVolume:
storageClass: manual-alertmanager
service:
type: NodePort
server:
persistentVolume:
storageClass: manual-prometheus
service:
type: NodePort
EOF
fi
echo " kubectl create -f helm-tiller-serviceaccount.yaml"
if [ -n "$tls" ] ; then
echo "# secure heml as described in https://docs.helm.sh/using_helm/#using-ssl-between-helm-and-tiller"
echo "# create certificates"
echo "mkdir helm-certs"
echo "cd helm-certs"
echo "openssl genrsa -out ./ca.key.pem 4096"
echo "openssl req -key ca.key.pem -new -x509 -days 7300 -sha256 -out ca.cert.pem -extensions v3_ca"
echo "openssl genrsa -out ./tiller.key.pem 4096"
echo "openssl genrsa -out ./helm.key.pem 4096"
echo "openssl req -key tiller.key.pem -new -sha256 -out tiller.csr.pem"
echo "openssl req -key helm.key.pem -new -sha256 -out helm.csr.pem"
echo "openssl x509 -req -CA ca.cert.pem -CAkey ca.key.pem -CAcreateserial -in tiller.csr.pem -out tiller.cert.pem -days 365"
echo "openssl x509 -req -CA ca.cert.pem -CAkey ca.key.pem -CAcreateserial -in helm.csr.pem -out helm.cert.pem -days 365"
echo "cp ca.cert.pem \$(helm home)/ca.pem"
echo "cp helm.cert.pem \$(helm home)/cert.pem"
echo "cp helm.key.pem \$(helm home)/key.pem"
echo "# initialize helm"
echo "helm init --override 'spec.template.spec.containers[0].command'='{/tiller,--storage=secret}' \\"
echo " --tiller-tls \\"
echo " --tiller-tls-verify \\"
echo " --tiller-tls-cert=cert.pem \\"
echo " --tiller-tls-key=key.pem \\"
echo " --tls-ca-cert=ca.pem \\"
echo " --service-account=tiller"
else
echo " helm init --service-account tiller"
fi
echo "# Prometheus setup"
echo " kubectl create -f prometheus-alertmanager-volume.yaml"
echo " kubectl create -f prometheus-server-volume.yaml"
echo " helm install $tls --name prometheus -f prometheus-values.yaml stable/prometheus"
from labdev-nomad.container
cd /nomad/nomadlab/servers/nomad-vis-test/analytics/remotevis
Update info on services of labdev that we use as we share the session db (we should probably clean up this ugly command)
kubectl exec -ti $(kubectl get po | grep nomad-container-manager-beaker | cut -f1 -d ' ') node app.js serviceDumper -- --out-file labdev-nomad.services.yaml
update config with current info on the redis dbs of labdev (default-remotevis.hjson.in -> default-remotevis.hjson)
docker run -ti -v $PWD:/usr/src/app -v /nomad/nomadlab/servers/labdev-nomad/analytics/beaker:/mnt -w /usr/src/app --rm node:carbon node app.js templateEvaluer --replacements /mnt/labdev-nomad.services.yaml --template config/nomad-vis-test.hjson.in --out-file config/nomad-vis-test.hjson
deploy
./deploy.sh --tls --env nomad-vis-test --target-hostname nomad-vis-test --secret-web-certs web-certs
and execute the deploy for remote vis
kubectl create -f container-manager-service-remotevis.yaml
if ! kubectl get deployment nomad-container-manager-remotevis >& /dev/null ; then
kubectl create --save-config -f container-manager-deploy-remotevis.yaml
else
kubectl apply -f container-manager-deploy-remotevis.yaml
fi
if only that changed, otherwise on has also to create the secrets and analytics namespace.
A serviceDump has to be run to reexport the ports to the frontend, then the frontend setup needs to be updated.
---
frontend:
- server_name: labdev-nomad.esc.rzg.mpg.de
ssl_certificate: /web-certs/cert.pem
ssl_certificate_key: /web-certs/key.pem
shortcuts: true
repoapi:
- nodes:
- staging-nomad.esc.rzg.mpg.de
ports:
- nodePort: 8111
industry-project-imeall:
- nodes:
- labdev-nomad.esc.rzg.mpg.de
ports:
- nodePort: 34695
---
frontend:
server_name: analytics-toolkit.nomad-coe.eu
other_servers: |
server {
listen 80;
server_name labtest-nomad.esc.rzg.mpg.de;
return 301 https://$server_name$request_uri;
}
server {
listen 443 ssl;
server_name labtest-nomad.esc.rzg.mpg.de;
ssl_certificate /certs/cert-8701391933287641330712620431.pem;
ssl_certificate_key /certs/labtest-nomad.esc.rzg.mpg.de.key;
return 301 https://analytics-toolkit.nomad-coe.eu/$request_uri;
}
shortcuts: true
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment