From ed0e6dda747d3b750b596567641ed5885ea425b4 Mon Sep 17 00:00:00 2001
From: Markus Scheidgen <markus.scheidgen@gmail.com>
Date: Wed, 24 Jan 2024 17:04:48 +0100
Subject: [PATCH] Added the new helm chart and deployment ci actions for the
 mpcdf cloud.

---
 .gitlab-ci.yml                                | 161 ++---
 docs/howto/oasis/install.md                   |  26 +-
 ops/kubernetes/README.md                      |   6 +
 ops/kubernetes/deployments/.gitignore         |   1 -
 ops/kubernetes/deployments/dev-values.yaml    |  81 ---
 .../deployments/prod-develop-values.yaml      | 101 ----
 .../deployments/prod-process-values.yaml      |  95 ---
 .../deployments/prod-staging-values.yaml      | 101 ----
 .../deployments/prod-test-values.yaml         |  91 ---
 .../deployments/prod-util-values.yaml         |  87 ---
 ops/kubernetes/deployments/prod-values.yaml   |  87 ---
 ops/kubernetes/nomad-prod-staging.yaml        |  59 ++
 ops/kubernetes/nomad-prod-test.yaml           |  58 ++
 ops/kubernetes/nomad-prod.yaml                |  53 ++
 ops/kubernetes/nomad/.gitignore               |   4 +-
 ops/kubernetes/nomad/Chart.lock               |  15 +
 ops/kubernetes/nomad/Chart.yaml               |  50 +-
 ops/kubernetes/nomad/README.md                |  19 +-
 ops/kubernetes/nomad/templates/NOTES.txt      |  30 +-
 .../nomad/templates/api-deployment.yaml       | 223 -------
 .../nomad/templates/api-service.yaml          |  19 -
 .../nomad/templates/app/configmap.yml         |  40 ++
 .../nomad/templates/app/deployment.yaml       | 194 ++++++
 .../nomad/templates/app/service.yaml          |  20 +
 ops/kubernetes/nomad/templates/configmap.yml  | 135 +++++
 ops/kubernetes/nomad/templates/ingress.yaml   |  62 +-
 .../nomad/templates/nomad-configmap.yml       | 127 ----
 .../nomad/templates/proxy-service.yaml        |  26 -
 .../configmap.yml}                            | 105 +---
 .../nomad/templates/proxy/deployment.yaml     | 110 ++++
 .../nomad/templates/proxy/service.yaml        |  20 +
 .../nomad/templates/serviceaccount.yaml       |  13 +
 .../nomad/templates/worker-deployment.yaml    | 150 -----
 .../nomad/templates/worker/deployment.yaml    | 166 ++++++
 ops/kubernetes/nomad/values.yaml              | 557 ++++++++++++------
 ops/kubernetes/values.yaml                    | 201 +++++++
 36 files changed, 1667 insertions(+), 1626 deletions(-)
 create mode 100644 ops/kubernetes/README.md
 delete mode 100644 ops/kubernetes/deployments/.gitignore
 delete mode 100644 ops/kubernetes/deployments/dev-values.yaml
 delete mode 100644 ops/kubernetes/deployments/prod-develop-values.yaml
 delete mode 100644 ops/kubernetes/deployments/prod-process-values.yaml
 delete mode 100644 ops/kubernetes/deployments/prod-staging-values.yaml
 delete mode 100644 ops/kubernetes/deployments/prod-test-values.yaml
 delete mode 100644 ops/kubernetes/deployments/prod-util-values.yaml
 delete mode 100644 ops/kubernetes/deployments/prod-values.yaml
 create mode 100644 ops/kubernetes/nomad-prod-staging.yaml
 create mode 100644 ops/kubernetes/nomad-prod-test.yaml
 create mode 100644 ops/kubernetes/nomad-prod.yaml
 create mode 100644 ops/kubernetes/nomad/Chart.lock
 delete mode 100644 ops/kubernetes/nomad/templates/api-deployment.yaml
 delete mode 100644 ops/kubernetes/nomad/templates/api-service.yaml
 create mode 100644 ops/kubernetes/nomad/templates/app/configmap.yml
 create mode 100644 ops/kubernetes/nomad/templates/app/deployment.yaml
 create mode 100644 ops/kubernetes/nomad/templates/app/service.yaml
 create mode 100644 ops/kubernetes/nomad/templates/configmap.yml
 delete mode 100644 ops/kubernetes/nomad/templates/nomad-configmap.yml
 delete mode 100644 ops/kubernetes/nomad/templates/proxy-service.yaml
 rename ops/kubernetes/nomad/templates/{proxy-deployment.yml => proxy/configmap.yml} (50%)
 create mode 100644 ops/kubernetes/nomad/templates/proxy/deployment.yaml
 create mode 100644 ops/kubernetes/nomad/templates/proxy/service.yaml
 create mode 100644 ops/kubernetes/nomad/templates/serviceaccount.yaml
 delete mode 100644 ops/kubernetes/nomad/templates/worker-deployment.yaml
 create mode 100644 ops/kubernetes/nomad/templates/worker/deployment.yaml
 create mode 100644 ops/kubernetes/values.yaml

diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 8c7f0faa8f..ceca8e6a8e 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -86,6 +86,12 @@ build:
   rules:
     - when: on_success
 
+build helm chart:
+  stage: build
+  script:
+    - helm package -u ops/kubernetes/nomad -d ops/kubernetes
+    - 'curl --request POST --user gitlab-ci-token:$CI_JOB_TOKEN --form "chart=@ops/kubernetes/nomad-1.0.0.tgz" "${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/helm/api/latest/charts"'
+
 python linting:
   stage: test
   image: ${CI_REGISTRY_IMAGE}/dev_python:${DOCKER_TAG}
@@ -250,55 +256,33 @@ gui tests:
       when: never
     - when: on_success
 
-deploy dev:
+deploy prod:
   stage: deploy
   environment:
-    name: dev/$CI_COMMIT_REF_NAME
-    deployment_tier: development
-    url: https://nomad-lab.eu/dev/rae/${CI_ENVIRONMENT_SLUG}
-    auto_stop_in: 7 days
-    on_stop: stop deploy dev
+    name: staging
+    deployment_tier: production
+    url: https://cloud.nomad-lab.eu/prod/v1
   before_script:
     - mkdir ~/.kube/
-    - echo ${CI_K8S_CONFIG} | base64 -d > ~/.kube/config
-    - echo ${CI_ENVIRONMENT_SLUG}
-    - echo "https://nomad-lab.eu/dev/rae/${CI_ENVIRONMENT_SLUG}"
+    - echo ${CI_K8S_CLOUD_CONFIG} | base64 -d > ~/.kube/config
+    - echo "${CI_REGISTRY_PASSWORD}" | docker login ${CI_REGISTRY} --username ${CI_REGISTRY_USER} --password-stdin
   script:
+    - docker pull ${CI_REGISTRY_IMAGE}:${DOCKER_TAG}
+    - docker tag ${CI_REGISTRY_IMAGE}:${DOCKER_TAG} ${CI_REGISTRY_IMAGE}:cloud
+    - docker push ${CI_REGISTRY_IMAGE}:cloud
     - helm dependency update ops/kubernetes/nomad
-    - helm upgrade ${CI_ENVIRONMENT_SLUG} ops/kubernetes/nomad
+    - helm upgrade nomad-prod ops/kubernetes/nomad
       --install
-      --namespace nomad
-      --values ops/kubernetes/deployments/dev-values.yaml
-      --set proxy.external.path=/dev/rae/${CI_ENVIRONMENT_SLUG}
-      --set jupyterhub.hub.baseUrl=/dev/rae/${CI_ENVIRONMENT_SLUG}/north
-      --set jupyterhub.fullnameOverride=${CI_ENVIRONMENT_SLUG}-north
-      --set jupyterhub.singleuser.podNameTemplate="${CI_ENVIRONMENT_SLUG}-north-{username}--{servername}"
-      --set jupyterhub.hub.config.GenericOAuthenticator.oauth_callback_url=https://nomad-lab.eu/dev/rae/${CI_ENVIRONMENT_SLUG}/north/hub/oauth_callback
-      --set image.tag=${DOCKER_TAG}
+      --namespace nomad-prod
+      --values ops/kubernetes/values.yaml
+      --values ops/kubernetes/nomad-prod.yaml
+      --set nomad.image.tag=cloud
       --set roll=true
       --timeout=15m
       --wait
-      --cleanup-on-fail
-  needs:
-    - job: build
-  rules:
-    - when: manual
-      allow_failure: true
-
-stop deploy dev:
-  stage: deploy
-  variables:
-    GIT_STRATEGY: none
-  environment:
-    name: dev/$CI_COMMIT_REF_NAME
-    action: stop
-  before_script:
-    - mkdir ~/.kube/
-    - echo ${CI_K8S_CONFIG} | base64 -d > ~/.kube/config
-  script:
-    - helm uninstall ${CI_ENVIRONMENT_SLUG} --namespace nomad
-  needs:
-    - job: build
+    - docker pull ${CI_REGISTRY_IMAGE}:${DOCKER_TAG}
+    - docker run -t -e NOMAD_KEYCLOAK_REALM_NAME=fairdi_nomad_prod -e NOMAD_KEYCLOAK_SERVER_URL=https://cloud.nomad-lab.eu/fairdi/keycloak/auth/ ${CI_REGISTRY_IMAGE}/dev_python:${DOCKER_TAG}
+      nomad client -n https://cloud.nomad-lab.eu/prod/v1/api -u test -w $CI_NOMAD_TEST_PASSWORD integrationtests --skip-publish --skip-doi
   rules:
     - when: manual
       allow_failure: true
@@ -308,26 +292,28 @@ deploy prod staging:
   environment:
     name: staging
     deployment_tier: production
-    url: https://nomad-lab.eu/prod/v1/staging
+    url: https://cloud.nomad-lab.eu/prod/v1/staging
   before_script:
     - mkdir ~/.kube/
-    - echo ${CI_K8S_PROD_CONFIG} | base64 -d > ~/.kube/config
+    - echo ${CI_K8S_CLOUD_CONFIG} | base64 -d > ~/.kube/config
     - echo "${CI_REGISTRY_PASSWORD}" | docker login ${CI_REGISTRY} --username ${CI_REGISTRY_USER} --password-stdin
   script:
     - docker pull ${CI_REGISTRY_IMAGE}:${DOCKER_TAG}
-    - docker tag ${CI_REGISTRY_IMAGE}:${DOCKER_TAG} ${CI_REGISTRY_IMAGE}:staging
-    - docker push ${CI_REGISTRY_IMAGE}:staging
+    - docker tag ${CI_REGISTRY_IMAGE}:${DOCKER_TAG} ${CI_REGISTRY_IMAGE}:cloud
+    - docker push ${CI_REGISTRY_IMAGE}:cloud
     - helm dependency update ops/kubernetes/nomad
-    - helm upgrade nomad-staging-v1 ops/kubernetes/nomad
+    - helm upgrade nomad-prod-staging ops/kubernetes/nomad
       --install
-      --values ops/kubernetes/deployments/prod-staging-values.yaml
-      --set image.tag=staging
+      --namespace nomad-prod-staging
+      --values ops/kubernetes/values.yaml
+      --values ops/kubernetes/nomad-prod-staging.yaml
+      --set nomad.image.tag=cloud
       --set roll=true
       --timeout=15m
       --wait
     - docker pull ${CI_REGISTRY_IMAGE}:${DOCKER_TAG}
-    - docker run -t -e NOMAD_KEYCLOAK_REALM_NAME=fairdi_nomad_prod ${CI_REGISTRY_IMAGE}/dev_python:${DOCKER_TAG}
-      nomad client -n https://nomad-lab.eu/prod/v1/staging/api -u test -w $CI_NOMAD_TEST_PASSWORD integrationtests --skip-publish --skip-doi
+    - docker run -t -e NOMAD_KEYCLOAK_REALM_NAME=fairdi_nomad_prod -e NOMAD_KEYCLOAK_SERVER_URL=https://cloud.nomad-lab.eu/fairdi/keycloak/auth/ ${CI_REGISTRY_IMAGE}/dev_python:${DOCKER_TAG}
+      nomad client -n https://cloud.nomad-lab.eu/prod/v1/staging/api -u test -w $CI_NOMAD_TEST_PASSWORD integrationtests --skip-publish --skip-doi
   rules:
     - when: manual
       allow_failure: true
@@ -340,83 +326,26 @@ deploy prod test:
     url: https://nomad-lab.eu/prod/v1/test
   before_script:
     - mkdir ~/.kube/
-    - echo ${CI_K8S_PROD_CONFIG} | base64 -d > ~/.kube/config
+    - echo ${CI_K8S_CLOUD_CONFIG} | base64 -d > ~/.kube/config
+    - echo "${CI_REGISTRY_PASSWORD}" | docker login ${CI_REGISTRY} --username ${CI_REGISTRY_USER} --password-stdin
   script:
-    - helm dependency update ops/kubernetes/nomad
-    - helm upgrade nomad-test-v1 ops/kubernetes/nomad
-      --install
-      --values ops/kubernetes/deployments/prod-test-values.yaml
-      --set image.tag=${DOCKER_TAG}
-      --set roll=true
-      --timeout=15m
-      --wait
     - docker pull ${CI_REGISTRY_IMAGE}:${DOCKER_TAG}
-    - docker run -t -e NOMAD_KEYCLOAK_REALM_NAME=fairdi_nomad_prod ${CI_REGISTRY_IMAGE}/dev_python:${DOCKER_TAG}
-      nomad client -n https://nomad-lab.eu/prod/v1/test/api -u test -w $CI_NOMAD_TEST_PASSWORD integrationtests --skip-publish --skip-doi
-  rules:
-    - when: manual
-      allow_failure: true
-
-deploy prod util:
-  stage: deploy
-  before_script:
-    - mkdir ~/.kube/
-    - echo ${CI_K8S_PROD_CONFIG} | base64 -d > ~/.kube/config
-  script:
-    - helm dependency update ops/kubernetes/nomad
-    - helm upgrade nomad-util-v1 ops/kubernetes/nomad
-      --install
-      --values ops/kubernetes/deployments/prod-util-values.yaml
-      --set image.tag=${DOCKER_TAG}
-      --set roll=true
-      --timeout=15m
-      --wait
-  rules:
-    - when: manual
-      allow_failure: true
-
-deploy prod process:
-  stage: deploy
-  before_script:
-    - mkdir ~/.kube/
-    - echo ${CI_K8S_PROD_CONFIG} | base64 -d > ~/.kube/config
-  script:
-    - helm dependency update ops/kubernetes/nomad
-    - helm upgrade nomad-process-v1 ops/kubernetes/nomad
-      --install
-      --values ops/kubernetes/deployments/prod-process-values.yaml
-      --set image.tag=${DOCKER_TAG}
-      --set roll=true
-      --timeout=15m
-      --wait
-  rules:
-    - when: manual
-      allow_failure: true
-
-deploy prod develop:
-  stage: deploy
-  environment:
-    name: develop
-    deployment_tier: production
-    url: https://nomad-lab.eu/prod/v1/develop
-  before_script:
-    - mkdir ~/.kube/
-    - echo ${CI_K8S_PROD_CONFIG} | base64 -d > ~/.kube/config
-  script:
+    - docker tag ${CI_REGISTRY_IMAGE}:${DOCKER_TAG} ${CI_REGISTRY_IMAGE}:cloud
+    - docker push ${CI_REGISTRY_IMAGE}:cloud
     - helm dependency update ops/kubernetes/nomad
-    - helm upgrade nomad-develop-v1 ops/kubernetes/nomad
+    - helm upgrade nomad-prod-test ops/kubernetes/nomad
       --install
-      --namespace default
-      --values ops/kubernetes/deployments/prod-develop-values.yaml
-      --set image.tag=${DOCKER_TAG}
+      --namespace nomad-prod-test
+      --values ops/kubernetes/values.yaml
+      --values ops/kubernetes/nomad-prod-test.yaml
+      --set nomad.image.tag=cloud
       --set roll=true
       --timeout=15m
       --wait
     - docker pull ${CI_REGISTRY_IMAGE}:${DOCKER_TAG}
-    - docker run -t -e NOMAD_KEYCLOAK_REALM_NAME=fairdi_nomad_prod ${CI_REGISTRY_IMAGE}/dev_python:${DOCKER_TAG}
-      nomad client -n https://nomad-lab.eu/prod/v1/develop/api -u test -w $CI_NOMAD_TEST_PASSWORD integrationtests --skip-publish --skip-doi
+    - docker run -t -e NOMAD_KEYCLOAK_REALM_NAME=fairdi_nomad_prod -e NOMAD_KEYCLOAK_SERVER_URL=https://cloud.nomad-lab.eu/fairdi/keycloak/auth/ ${CI_REGISTRY_IMAGE}/dev_python:${DOCKER_TAG}
+      nomad client -n https://cloud.nomad-lab.eu/prod/v1/test/api -u test -w $CI_NOMAD_TEST_PASSWORD integrationtests --skip-publish --skip-doi
   rules:
-    - if: $CI_COMMIT_BRANCH == "develop" && $NIGHTLY
     - when: manual
       allow_failure: true
 
diff --git a/docs/howto/oasis/install.md b/docs/howto/oasis/install.md
index 0191e7d4fd..e6f0bb2316 100644
--- a/docs/howto/oasis/install.md
+++ b/docs/howto/oasis/install.md
@@ -12,7 +12,7 @@ central NOMAD installation.
 
     **Register your Oasis**
     If you installed (or even just plan to install) a NOMAD Oasis, please
-    [register your Oasis with FAIRmat](https://www.fairmat-nfdi.eu/fairmat/oasis_registration) 
+    [register your Oasis with FAIRmat](https://www.fairmat-nfdi.eu/fairmat/oasis_registration)
     and help us to assist you in the future.
 
 ## Quick-start
@@ -507,5 +507,27 @@ This should give you a working OASIS at `http://<your-host>/<your-path-prefix>`.
 
 !!! warning "Attention"
 
-    This is not yet documented.
+    This is just preliminary documentation and many details are missing.
 
+There is a NOMAD [Helm](https://helm.sh/) chart. First we need to add the
+NOMAD Helm chart repository:
+
+```sh
+helm repo add nomad https://gitlab.mpcdf.mpg.de/api/v4/projects/2187/packages/helm/latest
+```
+
+New we need a minimal `values.yaml` that configures the individual kubernetes resources
+created by our Helm chart:
+
+```yaml
+--8<-- "ops/kubernetes/example-values.yaml"
+```
+
+The `jupyterhub`, `mongodb`, `elasticsearch`, `rabbitmq` follow the respective official
+Helm charts configuration.
+
+Run the Helm chart and install NOMAD:
+
+```
+helm update --install nomad nomad/nomad -f values.yaml
+```
diff --git a/ops/kubernetes/README.md b/ops/kubernetes/README.md
new file mode 100644
index 0000000000..4177bb8bd2
--- /dev/null
+++ b/ops/kubernetes/README.md
@@ -0,0 +1,6 @@
+## The different deployments used for the central nomad-lab.eu NOMAD installations.
+
+Those files are used by the `.gitlab-ci.yml` to deploy to the respective
+installations.
+
+The `example-values.yaml` are only used in the k8s install documentation.
\ No newline at end of file
diff --git a/ops/kubernetes/deployments/.gitignore b/ops/kubernetes/deployments/.gitignore
deleted file mode 100644
index 41aa11ae75..0000000000
--- a/ops/kubernetes/deployments/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-prod-values-*.yaml
diff --git a/ops/kubernetes/deployments/dev-values.yaml b/ops/kubernetes/deployments/dev-values.yaml
deleted file mode 100644
index 8c26605799..0000000000
--- a/ops/kubernetes/deployments/dev-values.yaml
+++ /dev/null
@@ -1,81 +0,0 @@
-version:
-  label: develop
-  isBeta: true
-  usesBetaData: false
-  officialUrl: "https://nomad-lab.eu/prod/v1/gui"
-
-image:
-  pullPolicy: "Always"
-
-proxy:
-  external:
-    host: "nomad-lab.eu"
-
-ingress:
-  enabled: true
-
-app:
-  worker: 2
-
-gui:
-  debug: true
-
-worker:
-  replicas: 1
-  routing: "queue"
-  # request and limit in GB, good prod sizes are 64, 420
-  memrequest: 1
-
-elastic:
-  host: elasticsearch.elasticsearch-7.svc.cluster.local
-  # secret: nomad-elastic
-
-mongo:
-  host: mongodb://mongodb-0.mongo.mongodb.svc.cluster.local,mongodb-1.mongo.mongodb.svc.cluster.local,mongodb-2.mongo.mongodb.svc.cluster.local/?replicaSet=rs0
-  # secret: nomad-mongo
-
-logstash:
-  host: logstash.elk.svc.cluster.local
-
-dbname: nomad_dev_v1
-
-keycloak:
-  serverUrl: "https://nomad-lab.eu/fairdi/keycloak/auth/"
-  serverExternalUrl: "https://nomad-lab.eu/fairdi/keycloak/auth/"
-  passwordSecret: "nomad-keycloak-password"
-  realmName: "fairdi_nomad_prod"
-  clientId: "nomad_public"
-  admin_user_id: "82efac55-6187-408c-8027-b98580c0e1c5"
-
-volumes:
-  prefixSize: 1
-  public: /nomad/fairdi/dev/v1/fs/public
-  staging: /nomad/fairdi/dev/v1/fs/staging
-  north_home: /nomad/fairdi/dev/v1/fs/north/users
-  tmp: /nomad/fairdi/dev/v1/fs/tmp
-  nomad: /nomad
-
-north:
-  enabled: true
-  hubServiceApiTokenSecret: "nomad-hub-service-api-token"
-
-jupyterhub:
-  fullnameOverride: null
-  nameOverride: "north"
-  hub:
-    baseUrl:
-    db:
-      type: sqlite-memory
-    config:
-      GenericOAuthenticator:
-        client_id: nomad_public
-        oauth_callback_url: https://nomad-lab.eu/fairdi/nomad/latest/north/hub/oauth_callback
-        authorize_url: https://nomad-lab.eu/fairdi/keycloak/auth/realms/fairdi_nomad_prod/protocol/openid-connect/auth
-        token_url: https://nomad-lab.eu/fairdi/keycloak/auth/realms/fairdi_nomad_prod/protocol/openid-connect/token
-        userdata_url: https://nomad-lab.eu/fairdi/keycloak/auth/realms/fairdi_nomad_prod/protocol/openid-connect/userinfo
-  singleuser:
-    podNameTemplate:
-
-  prePuller:
-    hook:
-      enabled: false
diff --git a/ops/kubernetes/deployments/prod-develop-values.yaml b/ops/kubernetes/deployments/prod-develop-values.yaml
deleted file mode 100644
index 4e7dfe804e..0000000000
--- a/ops/kubernetes/deployments/prod-develop-values.yaml
+++ /dev/null
@@ -1,101 +0,0 @@
-version:
-  label: develop
-  isBeta: true
-  usesBetaData: false
-  officialUrl: "https://nomad-lab.eu/prod/v1/gui"
-
-image:
-  tag: "develop"
-  pullPolicy: "Always"
-
-proxy:
-  nodePort: 30019
-  nodeIP: "130.183.207.81"
-  external:
-    host: "nomad-lab.eu"
-    path: "/prod/v1/develop"
-
-gui:
-  debug: true
-  matomoEnabled: false
-  matomoSiteId: 2
-  encyclopediaBase: "https://nomad-lab.eu/prod/rae/encyclopedia/#"
-  aitoolkitEnabled: false
-
-app:
-  replicas: 2
-  nomadNodeType: "public"
-
-worker:
-  replicas: 1
-  routing: "queue"
-  processes: 12
-  nomadNodeType: "prod-worker"
-
-elastic:
-  host: elasticsearch.elasticsearch-7.svc.cluster.local
-  port: 9200
-  # secret: nomad-elastic
-
-mongo:
-  host: mongodb://mongodb-0.mongo.mongodb.svc.cluster.local,mongodb-1.mongo.mongodb.svc.cluster.local,mongodb-2.mongo.mongodb.svc.cluster.local/?replicaSet=rs0
-  # secret: nomad-mongo
-
-logstash:
-  host: logstash.elk.svc.cluster.local
-
-dbname: nomad_prod_v1
-
-uploadurl: "https://nomad-lab.eu/prod/v1/develop/api/uploads"
-
-client:
-  passwordSecret: "nomad-keycloak-password"
-
-keycloak:
-  serverUrl: "https://nomad-lab.eu/fairdi/keycloak/auth/"
-  serverExternalUrl: "https://nomad-lab.eu/fairdi/keycloak/auth/"
-  passwordSecret: "nomad-keycloak-password"
-  realmName: "fairdi_nomad_prod"
-  clientId: "nomad_public"
-  admin_user_id: "82efac55-6187-408c-8027-b98580c0e1c5"
-
-volumes:
-  prefixSize: 1
-  public: /nomad/fairdi/prod/fs/public
-  staging: /nomad/fairdi/prod/fs/staging
-  north_home: /nomad/fairdi/prod/fs/north/users
-  tmp: /nomad/fairdi/prod/fs/tmp
-  nomad: /nomad
-  archiveVersionSuffix: v1
-
-mail:
-  enabled: false
-  host: "mailrelay.mpcdf.mpg.de"
-  port: 25
-  from: "support@nomad-lab.eu"
-
-datacite:
-  secret: "nomad-datacite"
-  enabled: true
-
-north:
-  enabled: true
-  hubServiceApiTokenSecret: "nomad-hub-service-api-token"
-
-jupyterhub:
-  fullnameOverride: "nomad-develop-v1-north"
-  hub:
-    baseUrl: "/prod/v1/develop/north"
-    db:
-      type: sqlite-pvc
-      pvc:
-        storageClassName: nomad-develop-v1-north-hub-db
-    config:
-      GenericOAuthenticator:
-        client_id: nomad_public
-        oauth_callback_url: https://nomad-lab.eu/prod/v1/develop/north/hub/oauth_callback
-        authorize_url: https://nomad-lab.eu/fairdi/keycloak/auth/realms/fairdi_nomad_prod/protocol/openid-connect/auth
-        token_url: https://nomad-lab.eu/fairdi/keycloak/auth/realms/fairdi_nomad_prod/protocol/openid-connect/token
-        userdata_url: https://nomad-lab.eu/fairdi/keycloak/auth/realms/fairdi_nomad_prod/protocol/openid-connect/userinfo
-  singleuser:
-    podNameTemplate: "nomad-develop-v1-north-{username}--{servername}"
diff --git a/ops/kubernetes/deployments/prod-process-values.yaml b/ops/kubernetes/deployments/prod-process-values.yaml
deleted file mode 100644
index 4183da9da7..0000000000
--- a/ops/kubernetes/deployments/prod-process-values.yaml
+++ /dev/null
@@ -1,95 +0,0 @@
-version:
-  label: process
-  isBeta: true
-  usesBetaData: false
-  officialUrl: "https://nomad-lab.eu/prod/v1/gui"
-
-image:
-  tag: "process"
-  pullPolicy: "Always"
-
-proxy:
-  nodePort: 30020
-  nodeIP: "130.183.207.81"
-  external:
-    host: "nomad-lab.eu"
-    path: "/prod/v1/process"
-
-gui:
-  debug: true
-  matomoEnabled: false
-  matomoSiteId: 2
-  encyclopediaBase: "https://nomad-lab.eu/prod/rae/encyclopedia/#"
-  aitoolkitEnabled: false
-
-app:
-  replicas: 1
-  worker: 1
-  nomadNodeType: "public"
-
-celery:
-  timeout: 7200
-
-worker:
-  replicas: 2
-  routing: "worker"
-  processes: 8
-  memrequest: 256
-  memlimit: 488
-  nomadNodeType: "worker"
-
-process:
-  rfc3161_skip_published: true
-
-elastic:
-  host: elasticsearch.elasticsearch-7.svc.cluster.local
-  port: 9200
-  dbname: nomad_prod_v1.2
-  # secret: nomad-elastic
-
-mongo:
-  host: mongodb://mongodb-0.mongo.mongodb.svc.cluster.local,mongodb-1.mongo.mongodb.svc.cluster.local,mongodb-2.mongo.mongodb.svc.cluster.local/?replicaSet=rs0
-  # secret: nomad-mongo
-
-logstash:
-  host: logstash.elk.svc.cluster.local
-
-dbname: nomad_prod_v1
-
-uploadurl: "https://nomad-lab.eu/prod/v1/process/api/uploads"
-
-client:
-  passwordSecret: "nomad-keycloak-password"
-
-keycloak:
-  serverUrl: "https://nomad-lab.eu/fairdi/keycloak/auth/"
-  serverExternalUrl: "https://nomad-lab.eu/fairdi/keycloak/auth/"
-  passwordSecret: "nomad-keycloak-password"
-  realmName: "fairdi_nomad_prod"
-  clientId: "nomad_public"
-  admin_user_id: "82efac55-6187-408c-8027-b98580c0e1c5"
-
-volumes:
-  prefixSize: 1
-  public: /nomad/fairdi/prod/fs/public
-  staging: /scratch/fairdi/prod/fs/staging
-  north_home: /nomad/fairdi/prod/fs/north/users
-  tmp: /nomad/fairdi/prod/fs/tmp
-  nomad: /nomad
-  archiveVersionSuffix: v1.2
-
-mail:
-  enabled: false
-  host: "mailrelay.mpcdf.mpg.de"
-  port: 25
-  from: "support@nomad-lab.eu"
-
-datacite:
-  secret: "nomad-datacite"
-  enabled: false
-
-north:
-  enabled: false
-
-archive:
-  use_new_writer: true
diff --git a/ops/kubernetes/deployments/prod-staging-values.yaml b/ops/kubernetes/deployments/prod-staging-values.yaml
deleted file mode 100644
index c2fd7111db..0000000000
--- a/ops/kubernetes/deployments/prod-staging-values.yaml
+++ /dev/null
@@ -1,101 +0,0 @@
-version:
-  label: beta/staging
-  isBeta: true
-  usesBetaData: false
-  officialUrl: "https://nomad-lab.eu/prod/v1/gui"
-
-image:
-  tag: "latest"
-  pullPolicy: "Always"
-
-proxy:
-  nodePort: 30014
-  nodeIP: "130.183.207.81"
-  external:
-    host: "nomad-lab.eu"
-    path: "/prod/v1/staging"
-
-gui:
-  debug: true
-  matomoEnabled: false
-  matomoSiteId: 2
-  encyclopediaBase: "https://nomad-lab.eu/prod/rae/encyclopedia/#"
-  aitoolkitEnabled: false
-
-app:
-  replicas: 4
-  nomadNodeType: "public"
-
-worker:
-  replicas: 1
-  routing: "queue"
-  processes: 12
-  nomadNodeType: "prod-worker"
-
-elastic:
-  host: elasticsearch.elasticsearch-7.svc.cluster.local
-  port: 9200
-  # secret: nomad-elastic
-
-mongo:
-  host: mongodb://mongodb-0.mongo.mongodb.svc.cluster.local,mongodb-1.mongo.mongodb.svc.cluster.local,mongodb-2.mongo.mongodb.svc.cluster.local/?replicaSet=rs0
-  # secret: nomad-mongo
-
-logstash:
-  host: logstash.elk.svc.cluster.local
-
-dbname: nomad_prod_v1
-
-uploadurl: "https://nomad-lab.eu/prod/v1/staging/api/uploads"
-
-client:
-  passwordSecret: "nomad-keycloak-password"
-
-keycloak:
-  serverUrl: "https://nomad-lab.eu/fairdi/keycloak/auth/"
-  serverExternalUrl: "https://nomad-lab.eu/fairdi/keycloak/auth/"
-  passwordSecret: "nomad-keycloak-password"
-  realmName: "fairdi_nomad_prod"
-  clientId: "nomad_public"
-  admin_user_id: "82efac55-6187-408c-8027-b98580c0e1c5"
-
-volumes:
-  prefixSize: 1
-  public: /nomad/fairdi/prod/fs/public
-  staging: /nomad/fairdi/prod/fs/staging
-  north_home: /nomad/fairdi/prod/fs/north/users
-  tmp: /nomad/fairdi/prod/fs/tmp
-  nomad: /nomad
-  archiveVersionSuffix: v1
-
-mail:
-  enabled: false
-  host: "mailrelay.mpcdf.mpg.de"
-  port: 25
-  from: "support@nomad-lab.eu"
-
-datacite:
-  secret: "nomad-datacite"
-  enabled: true
-
-north:
-  enabled: true
-  hubServiceApiTokenSecret: "nomad-hub-service-api-token"
-
-jupyterhub:
-  fullnameOverride: "nomad-staging-v1-north"
-  hub:
-    baseUrl: "/prod/v1/staging/north"
-    db:
-      type: sqlite-pvc
-      pvc:
-        storageClassName: nomad-staging-v1-north-hub-db
-    config:
-      GenericOAuthenticator:
-        client_id: nomad_public
-        oauth_callback_url: https://nomad-lab.eu/prod/v1/staging/north/hub/oauth_callback
-        authorize_url: https://nomad-lab.eu/fairdi/keycloak/auth/realms/fairdi_nomad_prod/protocol/openid-connect/auth
-        token_url: https://nomad-lab.eu/fairdi/keycloak/auth/realms/fairdi_nomad_prod/protocol/openid-connect/token
-        userdata_url: https://nomad-lab.eu/fairdi/keycloak/auth/realms/fairdi_nomad_prod/protocol/openid-connect/userinfo
-  singleuser:
-    podNameTemplate: "nomad-staging-v1-north-{username}--{servername}"
diff --git a/ops/kubernetes/deployments/prod-test-values.yaml b/ops/kubernetes/deployments/prod-test-values.yaml
deleted file mode 100644
index b7229d4f23..0000000000
--- a/ops/kubernetes/deployments/prod-test-values.yaml
+++ /dev/null
@@ -1,91 +0,0 @@
-version:
-  label: test
-  isBeta: true
-  usesBetaData: true
-  officialUrl: "https://nomad-lab.eu/prod/v1/gui"
-
-image:
-  tag: "latest"
-  pullPolicy: "Always"
-
-proxy:
-  nodePort: 30008
-  nodeIP: "130.183.207.81"
-  external:
-    host: "nomad-lab.eu"
-    path: "/prod/v1/test"
-
-gui:
-  debug: true
-
-app:
-  replicas: 2
-
-worker:
-  replicas: 1
-  routing: "queue"
-  processes: 10
-  nomadNodeType: "prod-worker"
-
-elastic:
-  host: elasticsearch.elasticsearch-7.svc.cluster.local
-  port: 9200
-  # secret: nomad-elastic
-
-mongo:
-  host: mongodb://mongodb-0.mongo.mongodb.svc.cluster.local,mongodb-1.mongo.mongodb.svc.cluster.local,mongodb-2.mongo.mongodb.svc.cluster.local/?replicaSet=rs0
-  # secret: nomad-mongo
-
-logstash:
-  host: logstash.elk.svc.cluster.local
-
-dbname: nomad_test_v1
-
-uploadurl: "http://nomad-lab.eu/prod/rae/test/api/uploads"
-
-client:
-  passwordSecret: "nomad-keycloak-password"
-
-keycloak:
-  serverUrl: "https://nomad-lab.eu/fairdi/keycloak/auth/"
-  passwordSecret: "nomad-keycloak-password"
-  realmName: "fairdi_nomad_prod"
-  clientId: "nomad_public"
-  admin_user_id: "82efac55-6187-408c-8027-b98580c0e1c5"
-
-volumes:
-  prefixSize: 1
-  public: /nomad/fairdi/test/fs/public
-  staging: /nomad/fairdi/test/fs/staging
-  north_home: /nomad/fairdi/test/fs/north/users
-  tmp: /nomad/fairdi/test/fs/tmp
-  nomad: /nomad
-  archiveVersionSuffix: v1
-
-mail:
-  enabled: false
-  host: "mailrelay.mpcdf.mpg.de"
-  port: 25
-  from: "webmaster@nomad-coe.eu"
-
-north:
-  enabled: false
-  hubServiceApiTokenSecret: "nomad-hub-service-api-token"
-
-jupyterhub:
-  fullnameOverride: "nomad-test-v1-north"
-  hub:
-    baseUrl: "/prod/v1/test/"
-    db:
-      type: sqlite-pvc
-      pvc:
-        storageClassName: nomad-test-v1-north-hub-db
-    config:
-      GenericOAuthenticator:
-        client_id: nomad_public
-        oauth_callback_url: https://nomad-lab.eu/prod/v1/test/north/hub/oauth_callback
-        authorize_url: https://nomad-lab.eu/fairdi/keycloak/auth/realms/fairdi_nomad_prod/protocol/openid-connect/auth
-        token_url: https://nomad-lab.eu/fairdi/keycloak/auth/realms/fairdi_nomad_prod/protocol/openid-connect/token
-        userdata_url: https://nomad-lab.eu/fairdi/keycloak/auth/realms/fairdi_nomad_prod/protocol/openid-connect/userinfo
-  singleuser:
-    podNameTemplate: "nomad-test-v1-north-{username}--{servername}"
diff --git a/ops/kubernetes/deployments/prod-util-values.yaml b/ops/kubernetes/deployments/prod-util-values.yaml
deleted file mode 100644
index 224eacb42f..0000000000
--- a/ops/kubernetes/deployments/prod-util-values.yaml
+++ /dev/null
@@ -1,87 +0,0 @@
-version:
-  label: util
-  isBeta: true
-  usesBetaData: false
-  officialUrl: "https://nomad-lab.eu/prod/v1/gui"
-
-image:
-  tag: "latest"
-  pullPolicy: "Always"
-
-proxy:
-  nodePort: 30016
-  nodeIP: "130.183.207.81"
-  external:
-    host: "nomad-lab.eu"
-    path: "/prod/v1/util"
-
-gui:
-  debug: true
-  matomoEnabled: false
-  matomoSiteId: 2
-  encyclopediaBase: "https://nomad-lab.eu/prod/rae/encyclopedia/#"
-  aitoolkitEnabled: false
-
-app:
-  replicas: 1
-  nomadNodeType: "public"
-
-celery:
-  timeout: 7200
-
-worker:
-  replicas: 1
-  routing: "worker"
-  processes: 2
-  memrequest: 256
-  memlimit: 488
-  nomadNodeType: "worker"
-
-elastic:
-  host: elasticsearch.elasticsearch-7.svc.cluster.local
-  port: 9200
-  # secret: nomad-elastic
-
-mongo:
-  host: mongodb://mongodb-0.mongo.mongodb.svc.cluster.local,mongodb-1.mongo.mongodb.svc.cluster.local,mongodb-2.mongo.mongodb.svc.cluster.local/?replicaSet=rs0
-  # secret: nomad-mongo
-
-logstash:
-  host: logstash.elk.svc.cluster.local
-
-dbname: nomad_prod_v1
-
-uploadurl: "https://nomad-lab.eu/prod/v1/util/api/uploads"
-
-client:
-  passwordSecret: "nomad-keycloak-password"
-
-keycloak:
-  serverUrl: "https://nomad-lab.eu/fairdi/keycloak/auth/"
-  serverExternalUrl: "https://nomad-lab.eu/fairdi/keycloak/auth/"
-  passwordSecret: "nomad-keycloak-password"
-  realmName: "fairdi_nomad_prod"
-  clientId: "nomad_public"
-  admin_user_id: "82efac55-6187-408c-8027-b98580c0e1c5"
-
-volumes:
-  prefixSize: 1
-  public: /nomad/fairdi/prod/fs/public
-  staging: /nomad/fairdi/prod/fs/staging
-  north_home: /nomad/fairdi/prod/fs/north/users
-  tmp: /nomad/fairdi/prod/fs/tmp
-  nomad: /nomad
-  archiveVersionSuffix: v1
-
-mail:
-  enabled: false
-  host: "mailrelay.mpcdf.mpg.de"
-  port: 25
-  from: "support@nomad-lab.eu"
-
-datacite:
-  secret: "nomad-datacite"
-  enabled: false
-
-north:
-  enabled: false
diff --git a/ops/kubernetes/deployments/prod-values.yaml b/ops/kubernetes/deployments/prod-values.yaml
deleted file mode 100644
index 06c7b767cd..0000000000
--- a/ops/kubernetes/deployments/prod-values.yaml
+++ /dev/null
@@ -1,87 +0,0 @@
-version:
-  isBeta: false
-  usesBetaData: false
-  officialUrl: "https://nomad-lab.eu/prod/v1/gui"
-
-image:
-  # Replace with the right version tag
-  # tag: "stable"
-  pullPolicy: "Always"
-
-proxy:
-  # Replace with the right node port for deployment 1 (30011) and 2 (30012)
-  # nodePort: 3001x
-  nodeIP: "130.183.207.81"
-  external:
-    host: "nomad-lab.eu"
-    path: "/prod/v1"
-
-gui:
-  debug: false
-  matomoEnabled: false
-  matomoSiteId: 2
-  encyclopediaBase: "https://nomad-lab.eu/prod/rae/encyclopedia/#"
-  aitoolkitEnabled: false
-  config:
-    entry:
-      cards:
-        exclude: ["relatedResources", "workflow"]
-
-app:
-  replicas: 4
-  worker: 8
-  nomadNodeType: "public"
-
-worker:
-  replicas: 2
-  routing: "queue"
-  processes: 12
-  nomadNodeType: "prod-worker"
-
-elastic:
-  host: elasticsearch.elasticsearch-7.svc.cluster.local
-  port: 9200
-  # secret: nomad-elastic
-
-mongo:
-  host: mongodb://mongodb-0.mongo.mongodb.svc.cluster.local,mongodb-1.mongo.mongodb.svc.cluster.local,mongodb-2.mongo.mongodb.svc.cluster.local/?replicaSet=rs0
-  # secret: nomad-mongo
-
-logstash:
-  host: logstash.elk.svc.cluster.local
-
-dbname: nomad_prod_v1
-
-uploadurl: "https://nomad-lab.eu/prod/v1/gui/uploads"
-client:
-  passwordSecret: "nomad-keycloak-password"
-
-keycloak:
-  serverUrl: "https://nomad-lab.eu/fairdi/keycloak/auth/"
-  serverExternalUrl: "https://nomad-lab.eu/fairdi/keycloak/auth/"
-  passwordSecret: "nomad-keycloak-password"
-  realmName: "fairdi_nomad_prod"
-  clientId: "nomad_public"
-  admin_user_id: "82efac55-6187-408c-8027-b98580c0e1c5"
-
-volumes:
-  prefixSize: 1
-  public: /nomad/fairdi/prod/fs/public
-  staging: /nomad/fairdi/prod/fs/staging
-  north_home: /nomad/fairdi/prod/fs/north/users
-  tmp: /nomad/fairdi/prod/fs/tmp
-  nomad: /nomad
-  archiveVersionSuffix: v1
-
-mail:
-  enabled: true
-  host: "mailrelay.mpcdf.mpg.de"
-  port: 25
-  from: "support@nomad-lab.eu"
-
-datacite:
-  secret: "nomad-datacite"
-  enabled: true
-
-north:
-  enabled: false
diff --git a/ops/kubernetes/nomad-prod-staging.yaml b/ops/kubernetes/nomad-prod-staging.yaml
new file mode 100644
index 0000000000..2116cf6d94
--- /dev/null
+++ b/ops/kubernetes/nomad-prod-staging.yaml
@@ -0,0 +1,59 @@
+nomad:
+  config:
+    version:
+      label: beta/staging
+      isBeta: true
+      usesBetaData: false
+      officialUrl: "https://nomad-lab.eu/prod/v1/gui"
+
+    proxy:
+      external:
+        path: "/prod/v1/staging"
+
+    gui:
+      debug: true
+
+    dbname: nomad_prod_v1
+
+    uploadurl: "https://cloud.nomad-lab.eu/prod/v1/staging/api/uploads"
+
+    mail:
+      enabled: false
+
+    datacite:
+      enabled: false
+
+    north:
+      enabled: true
+
+  image:
+    tag: "cloud"
+
+  ingress:
+    hosts:
+      - host: cloud.nomad-lab.eu
+        paths:
+          - path: /prod/v1/staging/
+            pathType: ImplementationSpecific
+
+  app:
+    replicaCount: 8
+
+  worker:
+    replicaCount: 1
+    processes: 12
+    resources:
+      limits:
+        memory: "32Gi"
+      requests:
+        memory: "8Gi"
+
+jupyterhub:
+  fullnameOverride: "nomad-prod-staging-north"
+  hub:
+    baseUrl: "/prod/v1/staging/north"
+    config:
+      GenericOAuthenticator:
+        oauth_callback_url: https://cloud.nomad-lab.eu/prod/v1/staging/north/hub/oauth_callback
+  singleuser:
+    podNameTemplate: "nomad-prod-staging-north-{username}--{servername}"
diff --git a/ops/kubernetes/nomad-prod-test.yaml b/ops/kubernetes/nomad-prod-test.yaml
new file mode 100644
index 0000000000..90d0318832
--- /dev/null
+++ b/ops/kubernetes/nomad-prod-test.yaml
@@ -0,0 +1,58 @@
+nomad:
+  config:
+    version:
+      label: beta/test
+      isBeta: true
+      usesBetaData: true
+      officialUrl: "https://nomad-lab.eu/prod/v1/gui"
+
+    proxy:
+      external:
+        path: "/prod/v1/test"
+
+    gui:
+      debug: true
+
+    dbname: nomad_test_v1
+
+    uploadurl: "https://cloud.nomad-lab.eu/prod/v1/test/api/uploads"
+
+    volumes:
+      prefixSize: 1
+      public: /nomad/test/fs/public
+      staging: /nomad/test/fs/staging
+      north_home: /nomad/test/fs/north/users
+      tmp: /nomad/test/fs/tmp
+      nomad: /nomad
+      archiveVersionSuffix: v1
+
+    mail:
+      enabled: false
+
+    datacite:
+      enabled: false
+
+    north:
+      enabled: false
+
+  image:
+    tag: "cloud"
+
+  ingress:
+    hosts:
+      - host: cloud.nomad-lab.eu
+        paths:
+          - path: /prod/v1/test/
+            pathType: ImplementationSpecific
+
+  app:
+    replicaCount: 4
+
+  worker:
+    replicaCount: 1
+    processes: 4
+    resources:
+      limits:
+        memory: "32Gi"
+      requests:
+        memory: "8Gi"
diff --git a/ops/kubernetes/nomad-prod.yaml b/ops/kubernetes/nomad-prod.yaml
new file mode 100644
index 0000000000..20cbeea65a
--- /dev/null
+++ b/ops/kubernetes/nomad-prod.yaml
@@ -0,0 +1,53 @@
+nomad:
+  config:
+    proxy:
+      external:
+        path: "/prod/v1"
+
+    dbname: nomad_prod_v1
+
+    uploadurl: "https://cloud.nomad-lab.eu/prod/v1/api/uploads"
+
+    mail:
+      enabled: true
+
+    datacite:
+      enabled: true
+
+    north:
+      enabled: true
+
+  image:
+    tag: "cloud"
+
+  ingress:
+    annotations:
+      nginx.ingress.kubernetes.io/limit-rps: "10"
+      nginx.ingress.kubernetes.io/denylist-source-range: "141.35.40.36/32, 141.35.40.52/32"
+    hosts:
+      - host: cloud.nomad-lab.eu
+        paths:
+          - path: /prod/v1/
+            pathType: ImplementationSpecific
+
+  app:
+    replicaCount: 18
+
+  worker:
+    replicaCount: 1
+    processes: 12
+    resources:
+      limits:
+        memory: "32Gi"
+      requests:
+        memory: "8Gi"
+
+jupyterhub:
+  fullnameOverride: "nomad-prod-north"
+  hub:
+    baseUrl: "/prod/v1/north"
+    config:
+      GenericOAuthenticator:
+        oauth_callback_url: https://cloud.nomad-lab.eu/prod/v1/north/hub/oauth_callback
+  singleuser:
+    podNameTemplate: "nomad-prod-north-{username}--{servername}"
diff --git a/ops/kubernetes/nomad/.gitignore b/ops/kubernetes/nomad/.gitignore
index 1654d0dc35..a273dd7da1 100644
--- a/ops/kubernetes/nomad/.gitignore
+++ b/ops/kubernetes/nomad/.gitignore
@@ -1,2 +1,2 @@
-charts/
-Chart.lock
\ No newline at end of file
+# Chart dependencies
+**/charts/*.tgz
\ No newline at end of file
diff --git a/ops/kubernetes/nomad/Chart.lock b/ops/kubernetes/nomad/Chart.lock
new file mode 100644
index 0000000000..463f27f890
--- /dev/null
+++ b/ops/kubernetes/nomad/Chart.lock
@@ -0,0 +1,15 @@
+dependencies:
+- name: rabbitmq
+  repository: https://charts.bitnami.com/bitnami
+  version: 11.2.2
+- name: elasticsearch
+  repository: https://helm.elastic.co
+  version: 7.17.3
+- name: mongodb
+  repository: oci://registry-1.docker.io/bitnamicharts
+  version: 14.0.4
+- name: jupyterhub
+  repository: https://jupyterhub.github.io/helm-chart/
+  version: 1.2.0
+digest: sha256:95dc91062b14809cf6d1e5d8ee4f2298f1d307ba77e9c932c24de6bcff471b49
+generated: "2024-01-24T16:02:08.157535+01:00"
diff --git a/ops/kubernetes/nomad/Chart.yaml b/ops/kubernetes/nomad/Chart.yaml
index 7cd691af75..22860c0cae 100644
--- a/ops/kubernetes/nomad/Chart.yaml
+++ b/ops/kubernetes/nomad/Chart.yaml
@@ -1,13 +1,45 @@
 apiVersion: v2
 name: nomad
-description: A Helm chart for Kubernetes that only runs nomad services and uses externally hosted databases.
+description: A Helm chart for Kubernetes
+
+# A chart can be either an 'application' or a 'library' chart.
+#
+# Application charts are a collection of templates that can be packaged into versioned archives
+# to be deployed.
+#
+# Library charts provide useful utilities or functions for the chart developer. They're included as
+# a dependency of application charts to inject those utilities and functions into the rendering
+# pipeline. Library charts do not define any templates and therefore cannot be deployed.
 type: application
-version: 0.0.2
+
+# This is the chart version. This version number should be incremented each time you make changes
+# to the chart and its templates, including the app version.
+# Versions are expected to follow Semantic Versioning (https://semver.org/)
+version: 1.0.0
+
+# This is the version number of the application being deployed. This version number should be
+# incremented each time you make changes to the application. Versions are not expected to
+# follow Semantic Versioning. They should reflect the version the application is using.
+# It is recommended to use it with quotes.
+appVersion: "1.2.2"
+
 dependencies:
-  - name: rabbitmq
-    version: "11.2.2"
-    repository: "https://charts.bitnami.com/bitnami"
-  - name: jupyterhub
-    version: "1.2.0"
-    repository: "https://jupyterhub.github.io/helm-chart/"
-    condition: north.enabled
+- name: rabbitmq
+  version: "11.2.2"
+  repository: "https://charts.bitnami.com/bitnami"
+  condition: nomad.enabled
+- name: elasticsearch
+  condition: elasticsearch.enabled
+  version: 7.17.3
+  repository: https://helm.elastic.co
+- name: mongodb
+  condition: mongodb.enabled
+  version: 14.0.4
+  repository: oci://registry-1.docker.io/bitnamicharts
+- name: jupyterhub
+  version: "1.2.0"
+  repository: "https://jupyterhub.github.io/helm-chart/"
+  condition: nomad.config.north.enabled
+
+
+
diff --git a/ops/kubernetes/nomad/README.md b/ops/kubernetes/nomad/README.md
index 979dc192b5..0537b76a8c 100644
--- a/ops/kubernetes/nomad/README.md
+++ b/ops/kubernetes/nomad/README.md
@@ -1,16 +1,5 @@
-## Cluster deployment, using Kubernetes and Helm
+A first version for a `nomad` helm chart that includes all services,
+including elastic and mongo.
 
-We use helm charts to describe the deployment of nomad services in a kubernetes cluster.
-The NOMAD chart is part of the
-[NOMAD source code](https://gitlab.mpcdf.mpg.de/nomad-lab/nomad-FAIR)
-and can be found under `ops/helm/nomad`.
-
-This chart allows to run the nomad app, worker, gui, and proxy in a kubernetes cluster.
-The `values.yaml` contains more documentation on the different values.
-
-The chart can be used to run multiple nomad instances in parallel on the same cluster,
-by using different URL-path and database names.
-
-The chart does not run any databases and search engines. Those are supposed to run
-separately (see also *nomad-full* for an alternative approach) and their hosts, etc.
-can be configures via helm values.
+At the moment, everything is just loosely stitched together. Especially,
+`nomad.config.mongo.host` still does need to be configured manually.
\ No newline at end of file
diff --git a/ops/kubernetes/nomad/templates/NOTES.txt b/ops/kubernetes/nomad/templates/NOTES.txt
index c32a7790f1..ec8177584e 100644
--- a/ops/kubernetes/nomad/templates/NOTES.txt
+++ b/ops/kubernetes/nomad/templates/NOTES.txt
@@ -1,8 +1,22 @@
-Thank you for installing {{ .Chart.Name }}.
-
-Your release is named {{ .Release.Name }}.
-
-To learn more about the release, try:
-
-  $ helm status {{ .Release.Name }}
-  $ helm get all {{ .Release.Name }}
\ No newline at end of file
+1. Get the application URL by running these commands:
+{{- if .Values.nomad.ingress.enabled }}
+{{- range $host := .Values.nomad.ingress.hosts }}
+  {{- range .paths }}
+  http{{ if $.Values.nomad.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }}
+  {{- end }}
+{{- end }}
+{{- else if contains "NodePort" .Values.nomad.proxy.service.type }}
+  export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "nomad.fullname" . }})
+  export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
+  echo http://$NODE_IP:$NODE_PORT
+{{- else if contains "LoadBalancer" .Values.nomad.proxy.service.type }}
+     NOTE: It may take a few minutes for the LoadBalancer IP to be available.
+           You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "nomad.fullname" . }}'
+  export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "nomad.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
+  echo http://$SERVICE_IP:{{ .Values.nomad.proxy.service.port }}
+{{- else if contains "ClusterIP" .Values.nomad.proxy.service.type }}
+  export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "nomad.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
+  export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}")
+  echo "Visit http://127.0.0.1:8080 to use your application"
+  kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT
+{{- end }}
diff --git a/ops/kubernetes/nomad/templates/api-deployment.yaml b/ops/kubernetes/nomad/templates/api-deployment.yaml
deleted file mode 100644
index 5261d06020..0000000000
--- a/ops/kubernetes/nomad/templates/api-deployment.yaml
+++ /dev/null
@@ -1,223 +0,0 @@
-apiVersion: v1
-kind: ConfigMap
-metadata:
-  name: {{ include "nomad.fullname" . }}-app-uvicorn-log-config
-  labels:
-    app.kubernetes.io/name: {{ include "nomad.name" . }}-app-uvicorn-log-config
-    helm.sh/chart: {{ include "nomad.chart" . }}
-    app.kubernetes.io/instance: {{ .Release.Name }}
-    app.kubernetes.io/managed-by: {{ .Release.Service }}
-data:
-  uvicorn.log.conf: |
-    [loggers]
-    keys=root, uvicorn.access, gunicorn.access
-
-    [handlers]
-    keys=console, logstash
-
-    [formatters]
-    keys=generic, logstash
-
-    [logger_root]
-    level=INFO
-    handlers=console, logstash
-
-    [logger_uvicorn.access]
-    level=INFO
-    handlers=console, logstash
-    qualname=uvicorn.access
-
-    [logger_gunicorn.access]
-    level=INFO
-    handlers=console, logstash
-    qualname=gunicorn.access
-
-    [handler_console]
-    class=StreamHandler
-    formatter=generic
-    args=(sys.stdout, )
-
-    [handler_logstash]
-    class=nomad.utils.structlogging.LogstashHandler
-    formatter=logstash
-
-    [formatter_generic]
-    format=%(asctime)s [%(process)d] [%(levelname)s] %(message)s
-    datefmt=%Y-%m-%d %H:%M:%S
-    class=logging.Formatter
-
-    [formatter_logstash]
-    class=nomad.utils.structlogging.LogstashFormatter
----
-apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: {{ include "nomad.fullname" . }}-app
-  labels:
-    app.kubernetes.io/name: {{ include "nomad.name" . }}-app
-    helm.sh/chart: {{ include "nomad.chart" . }}
-    app.kubernetes.io/instance: {{ .Release.Name }}
-    app.kubernetes.io/managed-by: {{ .Release.Service }}
-spec:
-  replicas: {{ .Values.app.replicas }}
-  selector:
-    matchLabels:
-      app.kubernetes.io/name: {{ include "nomad.name" . }}-app
-      app.kubernetes.io/instance: {{ .Release.Name }}
-  template:
-    metadata:
-      labels:
-        app.kubernetes.io/name: {{ include "nomad.name" . }}-app
-        app.kubernetes.io/instance: {{ .Release.Name }}
-      {{ if .Values.roll }}
-      annotations:
-        rollme: {{ randAlphaNum 5 | quote }}
-      {{ end }}
-    spec:
-      containers:
-      - name: {{ include "nomad.name" . }}-app
-        image: "{{ .Values.image.name }}:{{ .Values.image.tag }}"
-        imagePullPolicy: {{ .Values.image.pullPolicy }}
-        volumeMounts:
-        - mountPath: /app/nomad.yaml
-          name: nomad-conf
-          subPath: nomad.yaml
-        - mountPath: /app/uvicorn.log.conf
-          name: uvicorn-log-conf
-          subPath: uvicorn.log.conf
-        - mountPath: /app/.volumes/fs/public
-          name: public-volume
-        - mountPath: /app/.volumes/fs/staging
-          name: staging-volume
-        - mountPath: /app/.volumes/fs/north/users
-          name: north-home-volume
-        - mountPath: /nomad
-          name: nomad-volume
-        env:
-        - name: NOMAD_META_SERVICE
-          value: "app"
-        - name: NOMAD_CONSOLE_LOGLEVEL
-          value: "{{ .Values.app.console_loglevel }}"
-        - name: NOMAD_LOGSTASH_LEVEL
-          value: "{{ .Values.app.logstash_loglevel }}"
-       {{ if .Values.api.apiSecret }}
-        - name: NOMAD_SERVICES_API_SECRET
-          valueFrom:
-            secretKeyRef:
-              name: {{ .Values.api.apiSecret }}
-              key: password
-        {{ end }}
-        {{ if .Values.keycloak.clientSecret }}
-        - name: NOMAD_KEYCLOAK_CLIENT_SECRET
-          valueFrom:
-            secretKeyRef:
-              name: {{ .Values.keycloak.clientSecret }}
-              key: password
-        {{ end }}
-        {{ if .Values.client.passwordSecret }}
-        - name: NOMAD_CLIENT_PASSWORD
-          valueFrom:
-            secretKeyRef:
-              name: {{ .Values.client.passwordSecret }}
-              key: password
-        {{ end }}
-        {{ if .Values.keycloak.passwordSecret }}
-        - name: NOMAD_KEYCLOAK_PASSWORD
-          valueFrom:
-            secretKeyRef:
-              name: {{ .Values.keycloak.passwordSecret }}
-              key: password
-        {{ end }}
-        {{ if .Values.datacite.secret }}
-        - name: NOMAD_DATACITE_PASSWORD
-          valueFrom:
-            secretKeyRef:
-              name: {{ .Values.datacite.secret }}
-              key: password
-        - name: NOMAD_DATACITE_USER
-          valueFrom:
-            secretKeyRef:
-              name: {{ .Values.datacite.secret }}
-              key: user
-        {{ end }}
-        {{ if .Values.elastic.secret }}
-        - name: NOMAD_ELASTIC_USERNAME
-          valueFrom:
-            secretKeyRef:
-              name: {{ .Values.elastic.secret }}
-              key: username
-        - name: NOMAD_ELASTIC_PASSWORD
-          valueFrom:
-            secretKeyRef:
-              name: {{ .Values.elastic.secret }}
-              key: password
-        {{ end }}
-        {{ if .Values.mongo.secret }}
-        - name: NOMAD_MONGO_USERNAME
-          valueFrom:
-            secretKeyRef:
-              name: {{ .Values.mongo.secret }}
-              key: username
-        - name: NOMAD_MONGO_PASSWORD
-          valueFrom:
-            secretKeyRef:
-              name: {{ .Values.mongo.secret }}
-              key: password
-        {{ end }}
-        {{ if .Values.north.hubServiceApiTokenSecret }}
-        - name: NOMAD_NORTH_HUB_SERVICE_API_TOKEN
-          valueFrom:
-            secretKeyRef:
-              name: {{ .Values.north.hubServiceApiTokenSecret }}
-              key: token
-        {{ end }}
-        command: ["/bin/sh", "run.sh", "--log-config", "uvicorn.log.conf", "--workers", "{{ .Values.app.worker }}"]
-        livenessProbe:
-          httpGet:
-            path: "{{ .Values.proxy.external.path }}/alive"
-            port: 8000
-          initialDelaySeconds: 60
-          periodSeconds: 60
-          timeoutSeconds: {{ .Values.proxy.timeout }}
-        readinessProbe:
-          httpGet:
-            path: "{{ .Values.proxy.external.path }}/alive"
-            port: 8000
-          initialDelaySeconds: 60
-          periodSeconds: 30
-          timeoutSeconds: 15
-      nodeSelector:
-        nomadtype: {{ .Values.app.nomadNodeType }}
-      imagePullSecrets:
-      - name: {{ .Values.image.secret }}
-      volumes:
-      - name: uvicorn-log-conf
-        configMap:
-          name: {{ include "nomad.fullname" . }}-app-uvicorn-log-config
-      - name: app-run-script
-        configMap:
-          name: {{ include "nomad.fullname" . }}-app-run-script
-      - name: nomad-conf
-        configMap:
-          name: {{ include "nomad.fullname" . }}-configmap
-      - name: public-volume
-        hostPath:
-          path: {{ .Values.volumes.public }}
-          type: Directory
-      - name: staging-volume
-        {{ if (eq .Values.worker.storage "memory") }}
-        emptyDir:
-          medium: 'Memory'
-        {{ else }}
-        hostPath:
-          path: {{ .Values.volumes.staging}}
-          type: Directory
-        {{ end }}
-      - name: north-home-volume
-        hostPath:
-          path: {{ .Values.volumes.north_home}}
-          type: Directory
-      - name: nomad-volume
-        hostPath:
-          path: {{ .Values.volumes.nomad }}
-          type: Directory
diff --git a/ops/kubernetes/nomad/templates/api-service.yaml b/ops/kubernetes/nomad/templates/api-service.yaml
deleted file mode 100644
index b2d59be505..0000000000
--- a/ops/kubernetes/nomad/templates/api-service.yaml
+++ /dev/null
@@ -1,19 +0,0 @@
-apiVersion: v1
-kind: Service
-metadata:
-  name: {{ include "nomad.fullname" . }}-app
-  labels:
-    app.kubernetes.io/name: {{ include "nomad.name" . }}-app
-    helm.sh/chart: {{ include "nomad.chart" . }}
-    app.kubernetes.io/instance: {{ .Release.Name }}
-    app.kubernetes.io/managed-by: {{ .Release.Service }}
-spec:
-  type: ClusterIP
-  ports:
-    - port: 8000
-      targetPort: 8000
-      protocol: TCP
-      name: http
-  selector:
-    app.kubernetes.io/name: {{ include "nomad.name" . }}-app
-    app.kubernetes.io/instance: {{ .Release.Name }}
diff --git a/ops/kubernetes/nomad/templates/app/configmap.yml b/ops/kubernetes/nomad/templates/app/configmap.yml
new file mode 100644
index 0000000000..bc05a4cf91
--- /dev/null
+++ b/ops/kubernetes/nomad/templates/app/configmap.yml
@@ -0,0 +1,40 @@
+{{- if .Values.nomad.enabled -}}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: {{ include "nomad.fullname" . }}-configmap-app-uvicorn-log-config
+  labels:
+    {{- include "nomad.labels" . | nindent 4 }}
+    app.kubernetes.io/component: app
+data:
+  uvicorn.log.conf: |
+    [loggers]
+    keys=root
+
+    [handlers]
+    keys=console, logstash
+
+    [formatters]
+    keys=generic, logstash
+
+    [logger_root]
+    level=INFO
+    handlers=console, logstash
+
+    [handler_console]
+    class=StreamHandler
+    formatter=generic
+    args=(sys.stdout, )
+
+    [handler_logstash]
+    class=nomad.utils.structlogging.LogstashHandler
+    formatter=logstash
+
+    [formatter_generic]
+    format=%(asctime)s [%(process)d] [%(levelname)s] %(message)s
+    datefmt=%Y-%m-%d %H:%M:%S
+    class=logging.Formatter
+
+    [formatter_logstash]
+    class=nomad.utils.structlogging.LogstashFormatter
+{{- end }}
\ No newline at end of file
diff --git a/ops/kubernetes/nomad/templates/app/deployment.yaml b/ops/kubernetes/nomad/templates/app/deployment.yaml
new file mode 100644
index 0000000000..f02c6287f6
--- /dev/null
+++ b/ops/kubernetes/nomad/templates/app/deployment.yaml
@@ -0,0 +1,194 @@
+{{- if .Values.nomad.enabled -}}
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: {{ include "nomad.fullname" . }}-app
+  labels:
+    {{- include "nomad.labels" . | nindent 4 }}
+    app.kubernetes.io/component: app
+spec:
+  replicas: {{ .Values.nomad.app.replicaCount }}
+  selector:
+    matchLabels:
+      {{- include "nomad.selectorLabels" . | nindent 6 }}
+      app.kubernetes.io/component: app
+  template:
+    metadata:
+      {{- with .Values.nomad.worker.podAnnotations }}
+      annotations:
+        {{- toYaml . | nindent 8 }}
+        {{- if .Values.roll }}
+        rollme: {{ randAlphaNum 5 | quote }}
+        {{- end }}
+      {{- else }}
+      {{- if .Values.roll }}
+      annotations:
+        rollme: {{ randAlphaNum 5 | quote }}
+      {{- end }}
+      {{- end }}
+      labels:
+        {{- include "nomad.labels" . | nindent 8 }}
+	      {{- with .Values.nomad.app.podLabels }}
+        {{- toYaml . | nindent 8 }}
+        {{- end }}
+        app.kubernetes.io/component: app
+    spec:
+      {{- with .Values.nomad.imagePullSecrets }}
+      imagePullSecrets:
+        {{- toYaml . | nindent 8 }}
+      {{- end }}
+      serviceAccountName: {{ include "nomad.serviceAccountName" . }}
+      {{- with .Values.nomad.app.podSecurityContext }}
+      securityContext:
+        {{- . | toYaml | nindent 8 }}
+      {{- end }}
+      containers:
+        - name: {{ .Chart.Name }}-app
+          {{- with .Values.nomad.app.securityContext }}
+          securityContext:
+            {{- . | toYaml | nindent 12 }}
+          {{- end }}
+          image: "{{ .Values.nomad.image.repository }}:{{ .Values.nomad.image.tag | default .Chart.AppVersion }}"
+          imagePullPolicy: {{ .Values.nomad.image.pullPolicy }}
+          ports:
+            - name: http
+              containerPort: {{ .Values.nomad.app.service.port }}
+              protocol: TCP
+          livenessProbe:
+            httpGet:
+              path: "{{ .Values.nomad.config.proxy.external.path }}/alive"
+              port: 8000
+            initialDelaySeconds: 60
+            periodSeconds: 30
+            timeoutSeconds: 5
+          readinessProbe:
+            httpGet:
+              path: "{{ .Values.nomad.config.proxy.external.path }}/alive"
+              port: 8000
+            initialDelaySeconds: 60
+            periodSeconds: 15
+            timeoutSeconds: 5
+          {{- with .Values.nomad.app.resources }}
+          resources:
+            {{- . | toYaml | nindent 12 }}
+          {{- end }}
+          volumeMounts:
+            - mountPath: /app/nomad.yaml
+              name: nomad-conf
+              subPath: nomad.yaml
+            - mountPath: /app/uvicorn.log.conf
+              name: uvicorn-log-conf
+              subPath: uvicorn.log.conf
+            - mountPath: /app/.volumes/fs/public
+              name: public-volume
+            - mountPath: /app/.volumes/fs/staging
+              name: staging-volume
+            - mountPath: /app/.volumes/fs/north/users
+              name: north-home-volume
+            - mountPath: /nomad
+              name: nomad-volume
+            {{- with .Values.nomad.volumeMounts }}
+            {{- toYaml . | nindent 12 }}
+            {{- end }}
+          env:
+            - name: NOMAD_META_SERVICE
+              value: "app"
+            - name: NOMAD_CONSOLE_LOGLEVEL
+              value: "{{ .Values.nomad.config.app.console_loglevel }}"
+            - name: NOMAD_LOGSTASH_LEVEL
+              value: "{{ .Values.nomad.config.app.logstash_loglevel }}"
+            {{- if .Values.nomad.config.api.apiSecret }}
+            - name: NOMAD_SERVICES_API_SECRET
+              valueFrom:
+                secretKeyRef:
+                  name: {{ .Values.nomad.config.api.apiSecret }}
+                  key: password
+            {{- end }}
+            {{- if .Values.nomad.config.keycloak.clientSecret }}
+            - name: NOMAD_KEYCLOAK_CLIENT_SECRET
+              valueFrom:
+                secretKeyRef:
+                  name: {{ .Values.nomad.config.keycloak.clientSecret }}
+                  key: password
+            {{- end }}
+            {{- if .Values.nomad.config.client.passwordSecret }}
+            - name: NOMAD_CLIENT_PASSWORD
+              valueFrom:
+                secretKeyRef:
+                  name: {{ .Values.nomad.config.client.passwordSecret }}
+                  key: password
+            {{- end }}
+            {{- if .Values.nomad.config.keycloak.passwordSecret }}
+            - name: NOMAD_KEYCLOAK_PASSWORD
+              valueFrom:
+                secretKeyRef:
+                  name: {{ .Values.nomad.config.keycloak.passwordSecret }}
+                  key: password
+            {{- end }}
+            {{- if and .Values.nomad.config.datacite.enabled .Values.nomad.config.datacite.secret }}
+            - name: NOMAD_DATACITE_PASSWORD
+              valueFrom:
+                secretKeyRef:
+                  name: {{ .Values.nomad.config.datacite.secret }}
+                  key: password
+            - name: NOMAD_DATACITE_USER
+              valueFrom:
+                secretKeyRef:
+                  name: {{ .Values.nomad.config.datacite.secret }}
+                  key: user
+            {{- end }}
+            {{- if and .Values.nomad.config.north.enabled .Values.nomad.config.north.hubServiceApiTokenSecret }}
+            - name: NOMAD_NORTH_HUB_SERVICE_API_TOKEN
+              valueFrom:
+                secretKeyRef:
+                  name: {{ .Values.nomad.config.north.hubServiceApiTokenSecret }}
+                  key: token
+            {{- end }}
+          command: ["python", "-m", "nomad.cli", "admin", "run", "app", "--log-config", "uvicorn.log.conf", "--with-gui", "--host", "0.0.0.0"]
+      volumes:
+        {{- with .Values.nomad.volumes }}
+        {{- toYaml . | nindent 8 }}
+        {{- end }}
+        - name: uvicorn-log-conf
+          configMap:
+            name: {{ include "nomad.fullname" . }}-configmap-app-uvicorn-log-config
+        - name: app-run-script
+          configMap:
+            name: {{ include "nomad.fullname" . }}-app-run-script
+        - name: nomad-conf
+          configMap:
+            name: {{ include "nomad.fullname" . }}-configmap
+        - name: public-volume
+          hostPath:
+            path: {{ .Values.nomad.config.volumes.public }}
+            # type: Directory
+        - name: staging-volume
+          {{ if (eq .Values.nomad.config.worker.storage "memory") }}
+          emptyDir:
+            medium: 'Memory'
+          {{ else }}
+          hostPath:
+            path: {{ .Values.nomad.config.volumes.staging}}
+            # type: Directory
+          {{ end }}
+        - name: north-home-volume
+          hostPath:
+            path: {{ .Values.nomad.config.volumes.north_home}}
+            # type: Directory
+        - name: nomad-volume
+          hostPath:
+            path: {{ .Values.nomad.config.volumes.nomad }}
+            # type: Directory
+      {{- with .Values.nomad.app.nodeSelector }}
+      nodeSelector:
+        {{- toYaml . | nindent 8 }}
+      {{- end }}
+      {{- with .Values.nomad.app.affinity }}
+      affinity:
+        {{- toYaml . | nindent 8 }}
+      {{- end }}
+      {{- with .Values.nomad.app.tolerations }}
+      tolerations:
+        {{- toYaml . | nindent 8 }}
+      {{- end }}
+{{- end}}
\ No newline at end of file
diff --git a/ops/kubernetes/nomad/templates/app/service.yaml b/ops/kubernetes/nomad/templates/app/service.yaml
new file mode 100644
index 0000000000..4f90ba6869
--- /dev/null
+++ b/ops/kubernetes/nomad/templates/app/service.yaml
@@ -0,0 +1,20 @@
+{{- if .Values.nomad.enabled -}}
+apiVersion: v1
+kind: Service
+metadata:
+  name: {{ include "nomad.fullname" . }}-app
+  labels:
+    {{- include "nomad.labels" . | nindent 4 }}
+    app.kubernetes.io/component: app
+spec:
+  type: {{ .Values.nomad.app.service.type }}
+  ports:
+    - port: {{ .Values.nomad.app.service.port }}
+      # targetPort: http
+      targetPort: 8000
+      protocol: TCP
+      name: http
+  selector:
+    {{- include "nomad.selectorLabels" . | nindent 4 }}
+    app.kubernetes.io/component: app
+{{- end }}
diff --git a/ops/kubernetes/nomad/templates/configmap.yml b/ops/kubernetes/nomad/templates/configmap.yml
new file mode 100644
index 0000000000..f1983e7e6f
--- /dev/null
+++ b/ops/kubernetes/nomad/templates/configmap.yml
@@ -0,0 +1,135 @@
+{{- if .Values.nomad.enabled -}}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: {{ include "nomad.fullname" . }}-configmap
+  labels:
+    app.kubernetes.io/name: {{ include "nomad.name" . }}-configmap
+    {{- include "nomad.labels" . | nindent 4 }}
+data:
+  nomad.yaml: |
+    meta:
+      deployment: "{{ .Release.Name }}"
+      service: "{{ .Values.nomad.config.meta.service }}"
+      homepage: "{{ .Values.nomad.config.meta.homepage }}"
+      source_url: "{{ .Values.nomad.config.meta.source_url }}"
+      maintainer_email: "{{ .Values.nomad.config.meta.maintainer_email }}"
+      beta:
+        label: "{{ .Values.nomad.config.version.label }}"
+        isBeta: {{ .Values.nomad.config.version.isBeta }}
+        isTest: {{ .Values.nomad.config.version.isTest }}
+        usesBetaData: {{ .Values.nomad.config.version.usesBetaData }}
+        officialUrl: "{{ .Values.nomad.config.version.officialUrl }}"
+    process:
+      reuse_parser: {{ .Values.nomad.config.process.reuseParser }}
+      index_materials: {{ .Values.nomad.config.process.indexMaterials }}
+      rfc3161_skip_published: {{ .Values.nomad.config.process.rfc3161_skip_published }}
+    reprocess:
+      rematch_published: {{ .Values.nomad.config.reprocess.rematchPublished }}
+      reprocess_existing_entries: {{ .Values.nomad.config.reprocess.reprocessExistingEntries }}
+      use_original_parser: {{ .Values.nomad.config.reprocess.useOriginalParser }}
+      add_matched_entries_to_published:  {{ .Values.nomad.config.reprocess.addMatchedEntriesToPublished }}
+      delete_unmatched_published_entries: {{ .Values.nomad.config.reprocess.deleteUnmatchedPublishedEntries }}
+      index_individual_entries: {{ .Values.nomad.config.reprocess.indexIndividualEntries }}
+    fs:
+      tmp: ".volumes/fs/staging/tmp"
+      staging_external: {{ .Values.nomad.config.volumes.staging }}
+      public_external: {{ .Values.nomad.config.volumes.public }}
+      north_home_external: {{ .Values.nomad.config.volumes.north_home }}
+      prefix_size: {{ .Values.nomad.config.volumes.prefixSize }}
+      working_directory: /app
+      {{ if .Values.nomad.config.volumes.archiveVersionSuffix }}
+      archive_version_suffix: "{{ .Values.nomad.config.volumes.archiveVersionSuffix }}"
+      {{ end }}
+    logstash:
+      enabled: {{ .Values.nomad.config.logstash.enabled }}
+      host: "{{ .Values.nomad.config.logstash.host }}"
+      tcp_port: {{ .Values.nomad.config.logstash.port }}
+    services:
+      api_host: "{{ .Values.nomad.config.proxy.external.host }}"
+      api_port: {{ .Values.nomad.config.proxy.external.port }}
+      api_base_path: "{{ .Values.nomad.config.proxy.external.path }}"
+      api_secret: "{{ .Values.nomad.config.api.secret }}"
+      https: {{ .Values.nomad.config.proxy.external.https }}
+      upload_limit: {{ .Values.nomad.config.api.uploadLimit }}
+      admin_user_id: {{ .Values.nomad.config.keycloak.admin_user_id }}
+      aitoolkit_enabled: {{ .Values.nomad.config.services.aitoolkit.enabled }}
+    rabbitmq:
+      host: "{{ .Release.Name }}-rabbitmq"
+    elastic:
+      {{- if .Values.nomad.config.elastic.host }}
+      host: "{{ .Values.nomad.config.elastic.host }}"
+      {{- else }}
+      host: "elasticsearch-master"
+      {{- end }}
+      port: {{ .Values.nomad.config.elastic.port }}
+      timeout: {{ .Values.nomad.config.elastic.timeout }}
+      bulk_timeout: {{ .Values.nomad.config.elastic.bulkTimeout }}
+      bulk_size: {{ .Values.nomad.config.elastic.bulkSize }}
+      entries_per_material_cap: {{ .Values.nomad.config.elastic.entriesPerMaterialCap }}
+      {{ if .Values.nomad.config.elastic.dbname }}
+      entries_index: "{{ .Values.nomad.config.elastic.dbname }}_entries_v1"
+      materials_index: "{{ .Values.nomad.config.elastic.dbname }}_materials_v1"
+      {{ else }}
+      entries_index: "{{ .Values.nomad.config.dbname }}_entries_v1"
+      materials_index: "{{ .Values.nomad.config.dbname }}_materials_v1"
+      {{ end }}
+    mongo:
+      {{ $secret := (lookup "v1" "Secret" .Release.Namespace (print .Release.Name "-mongodb")) }}
+      {{ if $secret }}
+      host: "mongodb://root:{{ index $secret.data "mongodb-root-password" | b64dec }}@{{ .Values.nomad.config.mongo.host }}"
+      {{ else }}
+      host: "mongodb://{{ .Values.nomad.config.mongo.host }}"
+      {{ end }}
+      port: {{ .Values.nomad.config.mongo.port }}
+      db_name: "{{ .Values.nomad.config.dbname }}"
+    mail:
+      enabled: {{ .Values.nomad.config.mail.enabled }}
+      host: "{{ .Values.nomad.config.mail.host }}"
+      {{ if .Values.nomad.config.mail.port }}
+      port: {{ .Values.nomad.config.mail.port }}
+      {{ end }}
+      {{ if .Values.nomad.config.mail.user }}
+      user: "{{ .Values.nomad.config.mail.user }}"
+      {{ end }}
+      {{ if .Values.nomad.config.mail.password }}
+      password: "{{ .Values.nomad.config.mail.password }}"
+      {{ end }}
+      from_address: "{{ .Values.nomad.config.mail.from }}"
+      {{ if .Values.nomad.config.mail.cc_adress }}
+      cc_address: "{{ .Values.nomad.config.mail.cc_adress }}"
+      {{ else }}
+      cc_address: null
+      {{ end }}
+    celery:
+      routing: "{{ .Values.nomad.config.worker.routing }}"
+      timeout: {{ .Values.nomad.config.worker.timeout }}
+      acks_late: {{ .Values.nomad.config.worker.acks_late }}
+    client:
+      user: "{{ .Values.nomad.config.client.username }}"
+    keycloak:
+      server_url: "{{ .Values.nomad.config.keycloak.serverUrl }}"
+      realm_name: "{{ .Values.nomad.config.keycloak.realmName }}"
+      username: "{{ .Values.nomad.config.keycloak.username }}"
+      client_id: "{{ .Values.nomad.config.keycloak.clientId }}"
+    datacite:
+      enabled: {{ .Values.nomad.config.datacite.enabled }}
+      prefix: "{{ .Values.nomad.config.datacite.prefix }}"
+    {{ if .Values.nomad.gui.config }}
+    ui: {{ .Values.nomad.gui.config | toYaml | nindent 6 }}
+    {{ end }}
+    north:
+      enabled: {{ .Values.nomad.config.north.enabled }}
+      hub_host: "{{ .Values.nomad.config.proxy.external.host }}"
+      hub_port: {{ .Values.nomad.config.proxy.external.port }}
+      hub_service_api_token: "{{ .Values.nomad.config.north.hubServiceApiToken }}"
+    {{ if .Values.nomad.archive }}
+    archive: {{ .Values.nomad.archive | toYaml | nindent 6 }}
+    {{ end }}
+    {{ if .Values.nomad.plugins }}
+    plugins: {{ .Values.nomad.plugins | toYaml | nindent 6 }}
+    {{ end }}
+    {{ if .Values.nomad.normalize }}
+    normalize: {{ .Values.nomad.normalize | toYaml | nindent 6 }}
+    {{ end }}
+{{- end }}
\ No newline at end of file
diff --git a/ops/kubernetes/nomad/templates/ingress.yaml b/ops/kubernetes/nomad/templates/ingress.yaml
index 5d4f7afa97..9bb37fa0e0 100644
--- a/ops/kubernetes/nomad/templates/ingress.yaml
+++ b/ops/kubernetes/nomad/templates/ingress.yaml
@@ -1,35 +1,61 @@
-{{- if .Values.ingress.enabled -}}
+{{- if .Values.nomad.ingress.enabled -}}
 {{- $fullName := include "nomad.fullname" . -}}
-{{- $servicePort := 80 -}}
-{{- $ingressPath := .Values.proxy.external.path -}}
+{{- $svcPort := .Values.nomad.service.port -}}
+{{- if and .Values.nomad.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }}
+  {{- if not (hasKey .Values.nomad.ingress.annotations "kubernetes.io/ingress.class") }}
+  {{- $_ := set .Values.nomad.ingress.annotations "kubernetes.io/ingress.class" .Values.nomad.ingress.className}}
+  {{- end }}
+{{- end }}
+{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}}
 apiVersion: networking.k8s.io/v1
+{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
+apiVersion: networking.k8s.io/v1beta1
+{{- else -}}
+apiVersion: extensions/v1beta1
+{{- end }}
 kind: Ingress
 metadata:
   name: {{ $fullName }}
   labels:
-    app: {{ .Chart.Name }}
-    release: {{ .Release.Name }}
-    heritage: {{ .Release.Service }}
-{{- with .Values.ingress.annotations }}
+    {{- include "nomad.labels" . | nindent 4 }}
+  {{- with .Values.nomad.ingress.annotations }}
   annotations:
-{{ toYaml . | indent 4 }}
-{{- end }}
+    {{- toYaml . | nindent 4 }}
+  {{- end }}
 spec:
-{{- if .Values.ingress.tls }}
+  {{- if and .Values.nomad.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }}
+  ingressClassName: {{ .Values.nomad.ingress.className }}
+  {{- end }}
+  {{- if .Values.nomad.ingress.tls }}
   tls:
-{{ toYaml .Values.ingress.tls | indent 4 }}
-{{- end }}
+    {{- range .Values.nomad.ingress.tls }}
+    - hosts:
+        {{- range .hosts }}
+        - {{ . | quote }}
+        {{- end }}
+      secretName: {{ .secretName }}
+    {{- end }}
+  {{- end }}
   rules:
-  {{- range .Values.ingress.hosts }}
-    - host: {{ . }}
+    {{- range .Values.nomad.ingress.hosts }}
+    - host: {{ .host | quote }}
       http:
         paths:
-          - path: {{ $ingressPath }}
-            pathType: ImplementationSpecific
+          {{- range .paths }}
+          - path: {{ .path }}
+            {{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }}
+            pathType: {{ .pathType }}
+            {{- end }}
             backend:
+              {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }}
               service:
                 name: {{ $fullName }}-proxy
                 port:
-                  number: {{ $servicePort }}
-  {{- end }}
+                  number: {{ $svcPort }}
+              {{- else }}
+              serviceName: {{ $fullName }}-proxy
+              servicePort: {{ $svcPort }}
+              {{- end }}
+          {{- end }}
+    {{- end }}
 {{- end }}
diff --git a/ops/kubernetes/nomad/templates/nomad-configmap.yml b/ops/kubernetes/nomad/templates/nomad-configmap.yml
deleted file mode 100644
index b65ed46228..0000000000
--- a/ops/kubernetes/nomad/templates/nomad-configmap.yml
+++ /dev/null
@@ -1,127 +0,0 @@
-apiVersion: v1
-kind: ConfigMap
-metadata:
-  name: {{ include "nomad.fullname" . }}-configmap
-  labels:
-    app.kubernetes.io/name: {{ include "nomad.name" . }}-configmap
-    helm.sh/chart: {{ include "nomad.chart" . }}
-    app.kubernetes.io/instance: {{ .Release.Name }}
-    app.kubernetes.io/managed-by: {{ .Release.Service }}
-data:
-  nomad.yaml: |
-    meta:
-      deployment: "{{ .Release.Name }}"
-      service: "{{ .Values.meta.service }}"
-      homepage: "{{ .Values.meta.homepage }}"
-      source_url: "{{ .Values.meta.source_url }}"
-      maintainer_email: "{{ .Values.meta.maintainer_email }}"
-      beta:
-        label: "{{ .Values.version.label }}"
-        isBeta: {{ .Values.version.isBeta }}
-        isTest: {{ .Values.version.isTest }}
-        usesBetaData: {{ .Values.version.usesBetaData }}
-        officialUrl: "{{ .Values.version.officialUrl }}"
-    process:
-      reuse_parser: {{ .Values.process.reuseParser }}
-      index_materials: {{ .Values.process.indexMaterials }}
-      rfc3161_skip_published: {{ .Values.process.rfc3161_skip_published }}
-    reprocess:
-      rematch_published: {{ .Values.reprocess.rematchPublished }}
-      reprocess_existing_entries: {{ .Values.reprocess.reprocessExistingEntries }}
-      use_original_parser: {{ .Values.reprocess.useOriginalParser }}
-      add_matched_entries_to_published:  {{ .Values.reprocess.addMatchedEntriesToPublished }}
-      delete_unmatched_published_entries: {{ .Values.reprocess.deleteUnmatchedPublishedEntries }}
-      index_individual_entries: {{ .Values.reprocess.indexIndividualEntries }}
-    fs:
-      tmp: ".volumes/fs/staging/tmp"
-      staging_external: {{ .Values.volumes.staging }}
-      public_external: {{ .Values.volumes.public }}
-      north_home_external: {{ .Values.volumes.north_home }}
-      prefix_size: {{ .Values.volumes.prefixSize }}
-      working_directory: /app
-      {{ if .Values.volumes.archiveVersionSuffix }}
-      archive_version_suffix: "{{ .Values.volumes.archiveVersionSuffix }}"
-      {{ end }}
-    logstash:
-      enabled: {{ .Values.logstash.enabled }}
-      host: "{{ .Values.logstash.host }}"
-      tcp_port: {{ .Values.logstash.port }}
-    services:
-      api_host: "{{ .Values.proxy.external.host }}"
-      api_port: {{ .Values.proxy.external.port }}
-      api_base_path: "{{ .Values.proxy.external.path }}"
-      api_secret: "{{ .Values.api.secret }}"
-      api_timeout: "{{ max .Values.proxy.timeout .Values.proxy.editTimeout }}"
-      https: {{ .Values.proxy.external.https }}
-      upload_limit: {{ .Values.api.uploadLimit }}
-      admin_user_id: {{ .Values.keycloak.admin_user_id }}
-      aitoolkit_enabled: {{ .Values.services.aitoolkit.enabled }}
-    rabbitmq:
-      host: "{{ .Release.Name }}-rabbitmq"
-    elastic:
-      host: "{{ .Values.elastic.host }}"
-      port: {{ .Values.elastic.port }}
-      timeout: {{ .Values.elastic.timeout }}
-      bulk_timeout: {{ .Values.elastic.bulkTimeout }}
-      bulk_size: {{ .Values.elastic.bulkSize }}
-      entries_per_material_cap: {{ .Values.elastic.entriesPerMaterialCap }}
-      {{ if .Values.elastic.dbname }}
-      entries_index: "{{ .Values.elastic.dbname }}_entries_v1"
-      materials_index: "{{ .Values.elastic.dbname }}_materials_v1"
-      {{ else }}
-      entries_index: "{{ .Values.dbname }}_entries_v1"
-      materials_index: "{{ .Values.dbname }}_materials_v1"
-      {{ end }}
-    mongo:
-      host: "{{ .Values.mongo.host }}"
-      port: {{ .Values.mongo.port }}
-      db_name: "{{ .Values.dbname }}"
-    mail:
-      enabled: {{ .Values.mail.enabled }}
-      host: "{{ .Values.mail.host }}"
-      {{ if .Values.mail.port }}
-      port: {{ .Values.mail.port }}
-      {{ end }}
-      {{ if .Values.mail.user }}
-      user: "{{ .Values.mail.user }}"
-      {{ end }}
-      {{ if .Values.mail.password }}
-      password: "{{ .Values.mail.password }}"
-      {{ end }}
-      from_address: "{{ .Values.mail.from }}"
-      {{ if .Values.mail.cc_adress }}
-      cc_address: "{{ .Values.mail.cc_adress }}"
-      {{ else }}
-      cc_address: null
-      {{ end }}
-    celery:
-      routing: "{{ .Values.worker.routing }}"
-      timeout: {{ .Values.worker.timeout }}
-      acks_late: {{ .Values.worker.acks_late }}
-    client:
-      user: "{{ .Values.client.username }}"
-    keycloak:
-      server_url: "{{ .Values.keycloak.serverUrl }}"
-      realm_name: "{{ .Values.keycloak.realmName }}"
-      username: "{{ .Values.keycloak.username }}"
-      client_id: "{{ .Values.keycloak.clientId }}"
-    datacite:
-      enabled: {{ .Values.datacite.enabled }}
-      prefix: "{{ .Values.datacite.prefix }}"
-    {{ if .Values.gui.config }}
-    ui: {{ .Values.gui.config | toYaml | nindent 6 }}
-    {{ end }}
-    north:
-      enabled: {{ .Values.north.enabled }}
-      hub_host: "{{ .Values.proxy.external.host }}"
-      hub_port: {{ .Values.proxy.external.port }}
-      hub_service_api_token: "{{ .Values.north.hubServiceApiToken }}"
-    {{ if .Values.archive }}
-    archive: {{ .Values.archive | toYaml | nindent 6 }}
-    {{ end }}
-    {{ if .Values.plugins }}
-    plugins: {{ .Values.plugins | toYaml | nindent 6 }}
-    {{ end }}
-    {{ if .Values.normalize }}
-    normalize: {{ .Values.normalize | toYaml | nindent 6 }}
-    {{ end }}
\ No newline at end of file
diff --git a/ops/kubernetes/nomad/templates/proxy-service.yaml b/ops/kubernetes/nomad/templates/proxy-service.yaml
deleted file mode 100644
index 1953532fc8..0000000000
--- a/ops/kubernetes/nomad/templates/proxy-service.yaml
+++ /dev/null
@@ -1,26 +0,0 @@
-apiVersion: v1
-kind: Service
-metadata:
-  name: {{ include "nomad.fullname" . }}-proxy
-  labels:
-    app.kubernetes.io/name: {{ include "nomad.name" . }}-proxy
-    helm.sh/chart: {{ include "nomad.chart" . }}
-    app.kubernetes.io/instance: {{ .Release.Name }}
-    app.kubernetes.io/managed-by: {{ .Release.Service }}
-spec:
-  {{ if .Values.proxy.nodePort }}
-  type: NodePort
-  externalIPs:
-    - {{ .Values.proxy.nodeIP }}
-  {{ end }}
-  ports:
-    - port: 80
-      targetPort: 80
-      protocol: TCP
-      name: http
-      {{ if .Values.proxy.nodePort }}
-      nodePort: {{ .Values.proxy.nodePort }}
-      {{ end }}
-  selector:
-    app.kubernetes.io/name: {{ include "nomad.name" . }}-proxy
-    app.kubernetes.io/instance: {{ .Release.Name }}
diff --git a/ops/kubernetes/nomad/templates/proxy-deployment.yml b/ops/kubernetes/nomad/templates/proxy/configmap.yml
similarity index 50%
rename from ops/kubernetes/nomad/templates/proxy-deployment.yml
rename to ops/kubernetes/nomad/templates/proxy/configmap.yml
index 3b414b29a8..c1992939ed 100644
--- a/ops/kubernetes/nomad/templates/proxy-deployment.yml
+++ b/ops/kubernetes/nomad/templates/proxy/configmap.yml
@@ -1,15 +1,14 @@
+{{- if .Values.nomad.enabled -}}
 apiVersion: v1
 kind: ConfigMap
 metadata:
-  name: {{ include "nomad.fullname" . }}-proxy-config
+  name: {{ include "nomad.fullname" . }}-configmap-proxy
   labels:
-    app.kubernetes.io/name: {{ include "nomad.name" . }}-proxy-config
-    helm.sh/chart: {{ include "nomad.chart" . }}
-    app.kubernetes.io/instance: {{ .Release.Name }}
-    app.kubernetes.io/managed-by: {{ .Release.Service }}
+    {{- include "nomad.labels" . | nindent 4 }}
+    app.kubernetes.io/component: proxy
 data:
   nginx.conf: |
-    {{ if .Values.north.enabled }}
+    {{- if .Values.nomad.config.north.enabled }}
     # top-level http config for websocket headers
     # If Upgrade is defined, Connection = upgrade
     # If Upgrade is empty, Connection = close
@@ -17,19 +16,19 @@ data:
         default upgrade;
         ''      close;
     }
-    {{ end }}
 
+    {{- end }}
     server {
       listen        80;
       server_name   www.example.com;
       proxy_set_header Host $host;
 
-      proxy_connect_timeout {{ .Values.proxy.timeout }};
-      proxy_read_timeout {{ .Values.proxy.timeout }};
+      proxy_connect_timeout {{ .Values.nomad.config.proxy.timeout }};
+      proxy_read_timeout {{ .Values.nomad.config.proxy.timeout }};
       proxy_pass_request_headers      on;
       underscores_in_headers          on;
 
-      {{ if .Values.gui.gzip }}
+      {{- if .Values.nomad.config.gui.gzip }}
       gzip_min_length     1000;
       gzip_buffers        4 8k;
       gzip_http_version   1.0;
@@ -45,35 +44,35 @@ data:
           application/javascript
           application/x-javascript
           application/json;
-      {{ end }}
+      {{- end }}
 
       location / {
         proxy_pass http://{{ include "nomad.fullname" . }}-app:8000;
       }
 
-      location ~ {{ .Values.proxy.external.path }}\/?(gui)?$ {
-        rewrite ^ {{ .Values.proxy.external.path }}/gui/ permanent;
+      location ~ {{ .Values.nomad.config.proxy.external.path }}\/?(gui)?$ {
+        rewrite ^ {{ .Values.nomad.config.proxy.external.path }}/gui/ permanent;
       }
 
-      location {{ .Values.proxy.external.path }}/gui/ {
+      location {{ .Values.nomad.config.proxy.external.path }}/gui/ {
         proxy_intercept_errors on;
         error_page 404 = @redirect_to_index;
         proxy_pass http://{{ include "nomad.fullname" . }}-app:8000;
       }
 
       location @redirect_to_index {
-        rewrite ^ {{ .Values.proxy.external.path }}/gui/index.html break;
+        rewrite ^ {{ .Values.nomad.config.proxy.external.path }}/gui/index.html break;
         proxy_pass http://{{ include "nomad.fullname" . }}-app:8000;
       }
 
-      location {{ .Values.proxy.external.path }}/docs/ {
+      location {{ .Values.nomad.config.proxy.external.path }}/docs/ {
         proxy_intercept_errors on;
         error_page 404 = @redirect_to_index_docs;
         proxy_pass http://{{ include "nomad.fullname" . }}-app:8000;
       }
 
       location @redirect_to_index_docs {
-        rewrite ^ {{ .Values.proxy.external.path }}/docs/index.html break;
+        rewrite ^ {{ .Values.nomad.config.proxy.external.path }}/docs/index.html break;
         proxy_pass http://{{ include "nomad.fullname" . }}-app:8000;
       }
 
@@ -99,12 +98,12 @@ data:
 
       location ~ /api/v1/entries/edit {
         proxy_buffering off;
-        proxy_read_timeout {{ .Values.proxy.editTimeout }};
+        proxy_read_timeout {{ .Values.nomad.config.proxy.editTimeout }};
         proxy_pass http://{{ include "nomad.fullname" . }}-app:8000;
       }
 
-      {{ if .Values.north.enabled }}
-      location {{ .Values.proxy.external.path }}/north/ {
+      {{- if .Values.nomad.config.north.enabled }}
+      location {{ .Values.nomad.config.proxy.external.path }}/north/ {
           client_max_body_size 500m;
           proxy_pass http://{{ include "jupyterhub.fullname" . }}-proxy-public;
 
@@ -120,69 +119,7 @@ data:
 
           proxy_buffering off;
       }
-      {{ end }}
+      {{- end }}
 
     }
----
-apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: {{ include "nomad.fullname" . }}-proxy
-  labels:
-    app.kubernetes.io/name: {{ include "nomad.name" . }}-proxy
-    helm.sh/chart: {{ include "nomad.chart" . }}
-    app.kubernetes.io/instance: {{ .Release.Name }}
-    app.kubernetes.io/managed-by: {{ .Release.Service }}
-spec:
-  replicas: {{ .Values.gui.replicas }}
-  selector:
-    matchLabels:
-      app.kubernetes.io/name: {{ include "nomad.name" . }}-proxy
-      app.kubernetes.io/instance: {{ .Release.Name }}
-  template:
-    metadata:
-      labels:
-        app.kubernetes.io/name: {{ include "nomad.name" . }}-proxy
-        app.kubernetes.io/instance: {{ .Release.Name }}
-      {{ if .Values.roll }}
-      annotations:
-        rollme: {{ randAlphaNum 5 | quote }}
-      {{ end }}
-    spec:
-      containers:
-      - name: {{ include "nomad.name" . }}-proxy
-        image: "nginx:1.13.9-alpine"
-        command: ["nginx", "-g", "daemon off;"]
-        ports:
-        - containerPort: 80
-        volumeMounts:
-        - mountPath: /etc/nginx/conf.d
-          readOnly: true
-          name: nginx-conf
-        - mountPath: /var/log/nginx
-          name: log
-        livenessProbe:
-          httpGet:
-            path: "{{ .Values.proxy.external.path }}/alive"
-            port: 80
-          initialDelaySeconds: 60
-          periodSeconds: 60
-          timeoutSeconds: {{ .Values.proxy.timeout }}
-        readinessProbe:
-          httpGet:
-            path: "{{ .Values.proxy.external.path }}/alive"
-            port: 80
-          initialDelaySeconds: 60
-          periodSeconds: 30
-          timeoutSeconds: 10
-      nodeSelector:
-        nomadtype: public
-      volumes:
-      - name: nginx-conf
-        configMap:
-          name: {{ include "nomad.fullname" . }}-proxy-config
-          items:
-          - key: nginx.conf
-            path: default.conf
-      - name: log
-        emptyDir: {}
+{{- end}}
\ No newline at end of file
diff --git a/ops/kubernetes/nomad/templates/proxy/deployment.yaml b/ops/kubernetes/nomad/templates/proxy/deployment.yaml
new file mode 100644
index 0000000000..bcbebd6045
--- /dev/null
+++ b/ops/kubernetes/nomad/templates/proxy/deployment.yaml
@@ -0,0 +1,110 @@
+{{- if .Values.nomad.enabled -}}
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: {{ include "nomad.fullname" . }}-proxy
+  labels:
+    {{- include "nomad.labels" . | nindent 4 }}
+    app.kubernetes.io/component: proxy
+spec:
+  replicas: {{ .Values.nomad.proxy.replicaCount }}
+  selector:
+    matchLabels:
+      {{- include "nomad.selectorLabels" . | nindent 6 }}
+      app.kubernetes.io/component: proxy
+  template:
+    metadata:
+      {{- with .Values.nomad.proxy.podAnnotations }}
+      annotations:
+        {{- toYaml . | nindent 8 }}
+      {{- end }}
+      labels:
+        {{- include "nomad.labels" . | nindent 8 }}
+	      {{- with .Values.nomad.podLabels }}
+        {{- toYaml . | nindent 8 }}
+        {{- end }}
+        app.kubernetes.io/component: proxy
+    spec:
+      {{- with .Values.nomad.proxy.imagePullSecrets }}
+      imagePullSecrets:
+        {{- toYaml . | nindent 8 }}
+      {{- end }}
+      # serviceAccountName: {{ include "nomad.serviceAccountName" . }}
+      {{- with .Values.nomad.proxy.podSecurityContext }}
+      securityContext:
+        {{- . | toYaml | nindent 8 }}
+      {{- end }}
+      containers:
+        - name: {{ .Chart.Name }}-proxy
+          {{- with .Values.nomad.proxy.securityContext }}
+          securityContext:
+            {{- . | toYaml | nindent 12 }}
+          {{- end }}
+          image: "{{ .Values.nomad.proxy.image.repository }}:{{ .Values.nomad.proxy.image.tag }}"
+          imagePullPolicy: {{ .Values.nomad.proxy.image.pullPolicy }}
+          {{- with .Values.nomad.proxy.command }}
+          command:
+            {{- range . }}
+            - {{ tpl . $ }}
+            {{- end }}
+          {{- end }}
+          {{- with .Values.nomad.proxy.args }}
+          args:
+            {{- range . }}
+            - {{ tpl . $ }}
+            {{- end }}
+          {{- end }}
+          ports:
+            - name: http
+              containerPort: {{ .Values.nomad.proxy.service.port }}
+              protocol: TCP
+          livenessProbe:
+            httpGet:
+              path: "{{ .Values.nomad.config.proxy.external.path }}/gui/index.html"
+              port: http
+            initialDelaySeconds: 60
+            periodSeconds: 15
+          readinessProbe:
+            httpGet:
+              path: "{{ .Values.nomad.config.proxy.external.path }}/gui/index.html"
+              port: http
+            initialDelaySeconds: 60
+            periodSeconds: 3
+          {{- with .Values.nomad.proxy.resources }}
+          resources:
+            {{- . | toYaml | nindent 12 }}
+          {{- end }}
+          volumeMounts:
+            - mountPath: /etc/nginx/conf.d
+              readOnly: true
+              name: nginx-conf
+            - mountPath: /var/log/nginx
+              name: log
+          {{- with .Values.nomad.volumeMounts }}
+            {{- toYaml . | nindent 12 }}
+          {{- end }}
+      volumes:
+        - name: nginx-conf
+          configMap:
+            name: {{ include "nomad.fullname" . }}-configmap-proxy
+            items:
+            - key: nginx.conf
+              path: default.conf
+        - name: log
+          emptyDir: {}
+      {{- with .Values.nomad.volumes }}
+        {{- toYaml . | nindent 8 }}
+      {{- end }}
+      {{- with .Values.nomad.proxy.nodeSelector }}
+      nodeSelector:
+        {{- toYaml . | nindent 8 }}
+      {{- end }}
+      {{- with .Values.nomad.proxy.affinity }}
+      affinity:
+        {{- toYaml . | nindent 8 }}
+      {{- end }}
+      {{- with .Values.nomad.proxy.tolerations }}
+      tolerations:
+        {{- toYaml . | nindent 8 }}
+      {{- end }}
+{{- end}}
\ No newline at end of file
diff --git a/ops/kubernetes/nomad/templates/proxy/service.yaml b/ops/kubernetes/nomad/templates/proxy/service.yaml
new file mode 100644
index 0000000000..2c3205234b
--- /dev/null
+++ b/ops/kubernetes/nomad/templates/proxy/service.yaml
@@ -0,0 +1,20 @@
+{{- if .Values.nomad.enabled -}}
+apiVersion: v1
+kind: Service
+metadata:
+  name: {{ include "nomad.fullname" . }}-proxy
+  labels:
+    {{- include "nomad.labels" . | nindent 4 }}
+    app.kubernetes.io/component: proxy
+spec:
+  type: {{ .Values.nomad.proxy.service.type }}
+  ports:
+    - port: {{ .Values.nomad.proxy.service.port }}
+      # targetPort: http
+      targetPort: 80
+      protocol: TCP
+      name: http
+  selector:
+    {{- include "nomad.selectorLabels" . | nindent 4 }}
+    app.kubernetes.io/component: proxy
+{{- end}}
\ No newline at end of file
diff --git a/ops/kubernetes/nomad/templates/serviceaccount.yaml b/ops/kubernetes/nomad/templates/serviceaccount.yaml
new file mode 100644
index 0000000000..2fe49cfbeb
--- /dev/null
+++ b/ops/kubernetes/nomad/templates/serviceaccount.yaml
@@ -0,0 +1,13 @@
+{{- if .Values.serviceAccount.create -}}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: {{ include "nomad.serviceAccountName" . }}
+  labels:
+    {{- include "nomad.labels" . | nindent 4 }}
+  {{- with .Values.serviceAccount.annotations }}
+  annotations:
+    {{- toYaml . | nindent 4 }}
+  {{- end }}
+  # automountServiceAccountToken: {{ .Values.serviceAccount.automount }}
+{{- end }}
diff --git a/ops/kubernetes/nomad/templates/worker-deployment.yaml b/ops/kubernetes/nomad/templates/worker-deployment.yaml
deleted file mode 100644
index ce56a3ea45..0000000000
--- a/ops/kubernetes/nomad/templates/worker-deployment.yaml
+++ /dev/null
@@ -1,150 +0,0 @@
-apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: {{ include "nomad.fullname" . }}-worker
-  labels:
-    app.kubernetes.io/name: {{ include "nomad.name" . }}-worker
-    helm.sh/chart: {{ include "nomad.chart" . }}
-    app.kubernetes.io/instance: {{ .Release.Name }}
-    app.kubernetes.io/managed-by: {{ .Release.Service }}
-spec:
-  replicas: {{ .Values.worker.replicas }}
-  selector:
-    matchLabels:
-      app.kubernetes.io/name: {{ include "nomad.name" . }}-worker
-      app.kubernetes.io/instance: {{ .Release.Name }}
-  template:
-    metadata:
-      labels:
-        app.kubernetes.io/name: {{ include "nomad.name" . }}-worker
-        app.kubernetes.io/instance: {{ .Release.Name }}
-      {{ if .Values.roll }}
-      annotations:
-        rollme: {{ randAlphaNum 5 | quote }}
-      {{ end }}
-    spec:
-      affinity:
-        podAntiAffinity:
-          requiredDuringSchedulingIgnoredDuringExecution:
-          - topologyKey: kubernetes.io/hostname
-            labelSelector:
-              matchLabels:
-                app.kubernetes.io/name: {{ include "nomad.name" . }}-worker
-                app.kubernetes.io/instance: {{ .Release.Name }}
-      containers:
-      - name: {{ include "nomad.name" . }}-worker
-        image: "{{ .Values.image.name }}:{{ .Values.image.tag }}"
-        imagePullPolicy: {{ .Values.image.pullPolicy }}
-        resources:
-          limits:
-            memory: "{{ .Values.worker.memlimit }}Gi"
-          requests:
-            memory: "{{ .Values.worker.memrequest }}Gi"
-        volumeMounts:
-        - mountPath: /app/nomad.yaml
-          name: nomad-conf
-          subPath: nomad.yaml
-        - mountPath: /app/.volumes/fs/public
-          name: public-volume
-        - mountPath: /app/.volumes/fs/staging
-          name: staging-volume
-        - mountPath: /nomad
-          name: nomad-volume
-        env:
-        - name: NOMAD_META_SERVICE
-          value: "worker"
-        - name: NOMAD_CONSOLE_LOG_LEVEL
-          value: "{{ .Values.worker.console_loglevel }}"
-        - name: NOMAD_LOGSTASH_LEVEL
-          value: "{{ .Values.worker.logstash_loglevel }}"
-        - name: NOMAD_CELERY_NODE_NAME
-          valueFrom:
-            fieldRef:
-              fieldPath: spec.nodeName
-        {{ if .Values.api.apiSecret }}
-        - name: NOMAD_SERVICES_API_SECRET
-          valueFrom:
-            secretKeyRef:
-              name: {{ .Values.api.apiSecret}}
-              key: password
-        {{ end }}
-        {{ if .Values.keycloak.clientSecret }}
-        - name: NOMAD_KEYCLOAK_CLIENT_SECRET
-          valueFrom:
-            secretKeyRef:
-              name: {{ .Values.keycloak.clientSecret }}
-              key: password
-        {{ end }}
-        {{ if .Values.keycloak.passwordSecret }}
-        - name: NOMAD_KEYCLOAK_PASSWORD
-          valueFrom:
-            secretKeyRef:
-              name: {{ .Values.keycloak.passwordSecret }}
-              key: password
-        {{ end }}
-        {{ if .Values.elastic.secret }}
-        - name: NOMAD_ELASTIC_USERNAME
-          valueFrom:
-            secretKeyRef:
-              name: {{ .Values.elastic.secret }}
-              key: username
-        - name: NOMAD_ELASTIC_PASSWORD
-          valueFrom:
-            secretKeyRef:
-              name: {{ .Values.elastic.secret }}
-              key: password
-        {{ end }}
-        {{ if .Values.mongo.secret }}
-        - name: NOMAD_MONGO_USERNAME
-          valueFrom:
-            secretKeyRef:
-              name: {{ .Values.mongo.secret }}
-              key: username
-        - name: NOMAD_MONGO_PASSWORD
-          valueFrom:
-            secretKeyRef:
-              name: {{ .Values.mongo.secret }}
-              key: password
-        {{ end }}
-        command: ["python", "-m", "celery", "-A", "nomad.processing", "worker", "-n", "$(NOMAD_CELERY_NODE_NAME)" {{ if .Values.worker.processes }}, "-c", "{{ .Values.worker.processes }}"{{ end }}{{ if .Values.worker.maxTasksPerChild }}, "--max-tasks-per-child", "{{ .Values.worker.maxTasksPerChild }}"{{ end }}]
-        livenessProbe:
-          exec:
-            command:
-            - bash
-            - -c
-            - NOMAD_LOGSTASH_LEVEL=WARNING python -m celery -A nomad.processing status | grep "$(NOMAD_CELERY_NODE_NAME):.*OK"
-          initialDelaySeconds: 30
-          periodSeconds: 30
-        readinessProbe:
-          exec:
-            command:
-            - bash
-            - -c
-            - NOMAD_LOGSTASH_LEVEL=WARNING python -m celery -A nomad.processing status | grep "${NOMAD_CELERY_NODE_NAME}:.*OK"
-          initialDelaySeconds: 15
-          periodSeconds: 30
-      nodeSelector:
-        nomadtype: {{ .Values.worker.nomadNodeType }}
-      imagePullSecrets:
-      - name: {{ .Values.image.secret }}
-      volumes:
-      - name: nomad-conf
-        configMap:
-          name: {{ include "nomad.fullname" . }}-configmap
-      - name: public-volume
-        hostPath:
-          path: {{ .Values.volumes.public }}
-          type: Directory
-      - name: staging-volume
-        {{ if (eq .Values.worker.storage "memory") }}
-        emptyDir:
-          medium: 'Memory'
-        {{ else }}
-        hostPath:
-          path: {{ .Values.volumes.staging}}
-          type: Directory
-        {{ end }}
-      - name: nomad-volume
-        hostPath:
-          path: {{ .Values.volumes.nomad }}
-          type: Directory
diff --git a/ops/kubernetes/nomad/templates/worker/deployment.yaml b/ops/kubernetes/nomad/templates/worker/deployment.yaml
new file mode 100644
index 0000000000..10a987d189
--- /dev/null
+++ b/ops/kubernetes/nomad/templates/worker/deployment.yaml
@@ -0,0 +1,166 @@
+{{- if .Values.nomad.enabled -}}
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: {{ include "nomad.fullname" . }}-worker
+  labels:
+    {{- include "nomad.labels" . | nindent 4 }}
+    app.kubernetes.io/component: worker
+spec:
+  replicas: {{ .Values.nomad.worker.replicaCount }}
+  selector:
+    matchLabels:
+      {{- include "nomad.selectorLabels" . | nindent 6 }}
+      app.kubernetes.io/component: worker
+  template:
+    metadata:
+      {{- with .Values.nomad.worker.podAnnotations }}
+      annotations:
+        {{- toYaml . | nindent 8 }}
+        {{- if .Values.roll }}
+        rollme: {{ randAlphaNum 5 | quote }}
+        {{- end }}
+      {{- else }}
+      {{- if .Values.roll }}
+      annotations:
+        rollme: {{ randAlphaNum 5 | quote }}
+      {{- end }}
+      {{- end }}
+      labels:
+        {{- include "nomad.labels" . | nindent 8 }}
+        {{- with .Values.nomad.podLabels }}
+        {{- toYaml . | nindent 8 }}
+        {{- end }}
+        app.kubernetes.io/component: worker
+    spec:
+      {{- with .Values.nomad.imagePullSecrets }}
+      imagePullSecrets:
+        {{- toYaml . | nindent 8 }}
+      {{- end }}
+      serviceAccountName: {{ include "nomad.serviceAccountName" . }}
+      {{- with .Values.nomad.worker.podSecurityContext }}
+      securityContext:
+        {{- toYaml . | nindent 8 }}
+      {{- end }}
+      containers:
+        - name: {{ .Chart.Name }}-worker
+          {{- with .Values.nomad.worker.securityContext }}
+          securityContext:
+            {{- toYaml . | nindent 12 }}
+          {{- end }}
+          image: "{{ .Values.nomad.image.repository }}:{{ .Values.nomad.image.tag | default .Chart.AppVersion }}"
+          imagePullPolicy: {{ .Values.nomad.image.pullPolicy }}
+          {{- with .Values.nomad.worker.resources }}
+          resources:
+            {{- toYaml . | nindent 12 }}
+          {{- end }}
+          volumeMounts:
+            - mountPath: /app/nomad.yaml
+              name: nomad-conf
+              subPath: nomad.yaml
+            - mountPath: /app/.volumes/fs/public
+              name: public-volume
+            - mountPath: /app/.volumes/fs/staging
+              name: staging-volume
+            - mountPath: /nomad
+              name: nomad-volume
+            {{- with .Values.nomad.volumeMounts }}
+            {{- toYaml . | nindent 12 }}
+            {{- end }}
+          env:
+            - name: CELERY_ACKS_LATE
+              value: "True"
+            - name: NOMAD_META_SERVICE
+              value: "worker"
+            - name: NOMAD_CONSOLE_LOG_LEVEL
+              value: "{{ .Values.nomad.config.worker.console_loglevel }}"
+            - name: NOMAD_LOGSTASH_LEVEL
+              value: "{{ .Values.nomad.config.worker.logstash_loglevel }}"
+            - name: NOMAD_CELERY_NODE_NAME
+              valueFrom:
+                fieldRef:
+                  fieldPath: spec.nodeName
+            {{- if .Values.nomad.config.api.apiSecret }}
+            - name: NOMAD_SERVICES_API_SECRET
+              valueFrom:
+                secretKeyRef:
+                  name: {{ .Values.nomad.config.api.apiSecret}}
+                  key: password
+            {{- end }}
+            {{- if .Values.nomad.config.keycloak.clientSecret }}
+            - name: NOMAD_KEYCLOAK_CLIENT_SECRET
+              valueFrom:
+                secretKeyRef:
+                  name: {{ .Values.nomad.config.keycloak.clientSecret }}
+                  key: password
+            {{- end }}
+            {{- if .Values.nomad.config.keycloak.passwordSecret }}
+            - name: NOMAD_KEYCLOAK_PASSWORD
+              valueFrom:
+                secretKeyRef:
+                  name: {{ .Values.nomad.config.keycloak.passwordSecret }}
+                  key: password
+            {{- end }}
+          command: ["python", "-m", "celery", "-A", "nomad.processing", "worker", "-n", "$(NOMAD_CELERY_NODE_NAME)" {{ if .Values.nomad.worker.processes }}, "-c", "{{ .Values.nomad.worker.processes }}"{{ end }}{{ if .Values.nomad.worker.maxTasksPerChild }}, "--max-tasks-per-child", "{{ .Values.nomad.worker.maxTasksPerChild }}"{{ end }}]
+          livenessProbe:
+            # httpGet:
+            #   path: /
+            #   port: http
+            exec:
+              command:
+              - bash
+              - -c
+              - NOMAD_LOGSTASH_LEVEL=WARNING python -m celery -A nomad.processing status | grep "${NOMAD_CELERY_NODE_NAME}:.*OK"
+            initialDelaySeconds: 30
+            periodSeconds: 30
+            timeoutSeconds: 30
+          readinessProbe:
+            # httpGet:
+            #   path: /
+            #   port: http
+            exec:
+              command:
+              - bash
+              - -c
+              - NOMAD_LOGSTASH_LEVEL=WARNING python -m celery -A nomad.processing status | grep "${NOMAD_CELERY_NODE_NAME}:.*OK"
+            initialDelaySeconds: 15
+            periodSeconds: 30
+            timeoutSeconds: 30
+      volumes:
+      {{- with .Values.nomad.volumes }}
+        {{- toYaml . | nindent 8 }}
+      {{- end }}
+        - name: nomad-conf
+          configMap:
+            name: {{ include "nomad.fullname" . }}-configmap
+        - name: public-volume
+          hostPath:
+            path: {{ .Values.nomad.config.volumes.public }}
+            # type: Directory
+        - name: staging-volume
+          {{ if (eq .Values.nomad.config.worker.storage "memory") }}
+          emptyDir:
+            medium: 'Memory'
+          {{ else }}
+          hostPath:
+            path: {{ .Values.nomad.config.volumes.staging}}
+            # type: Directory
+          {{ end }}
+        - name: nomad-volume
+          hostPath:
+            path: {{ .Values.nomad.config.volumes.nomad }}
+            # type: Directory
+
+      {{- with .Values.nomad.worker.nodeSelector }}
+      nodeSelector:
+        {{- toYaml . | nindent 8 }}
+      {{- end }}
+      {{- with .Values.nomad.worker.affinity }}
+      affinity:
+        {{- toYaml . | nindent 8 }}
+      {{- end }}
+      {{- with .Values.nomad.worker.tolerations }}
+      tolerations:
+        {{- toYaml . | nindent 8 }}
+      {{- end }}
+{{- end }}
\ No newline at end of file
diff --git a/ops/kubernetes/nomad/values.yaml b/ops/kubernetes/nomad/values.yaml
index d61da1d655..ccd1e0b213 100644
--- a/ops/kubernetes/nomad/values.yaml
+++ b/ops/kubernetes/nomad/values.yaml
@@ -1,198 +1,382 @@
-## Default values for nomad@FAIRDI
-version:
-  isTest: false
-  isBeta: false
-  usesBetaData: false
-  officialUrl: "https://nomad-lab.eu/prod/v1/gui"
-
-meta:
-  service: "app"
-  homepage: "https://nomad-lab.eu"
-  source_url: "https://gitlab.mpcdf.mpg.de/nomad-lab/nomad-FAIR"
-  maintainer_email: "markus.scheidgen@physik.hu-berlin.de"
-
-## Everything concerning the container images to be used
-image:
-  ## The kubernetes docker-registry secret that can be used to access the registry
-  #  with the container image in it.
-  #  It can be created via:
-  #    kubectl create secret docker-registry gitlab-mpcdf --docker-server=gitlab-registry.mpcdf.mpg.de --docker-username=<your-user-name > --docker-password=<yourpass> --docker-email=<email>
-  secret: gitlab-mpcdf
-
-  ## The docker container image name without tag
-  name: gitlab-registry.mpcdf.mpg.de/nomad-lab/nomad-fair
-  ## The docker container image tag
-  tag: latest
-  pullPolicy: IfNotPresent
-
-## Ingress can be unable to provide gui and api access through kubernetes ingress (only k8s 1.18+)
-ingress:
-  enabled: false
-  annotations:
-    kubernetes.io/ingress.class: nginx
-    nginx.ingress.kubernetes.io/proxy-body-size: "32g"
-    nginx.ingress.kubernetes.io/proxy-request-buffering: "off"
-    nginx.ingress.kubernetes.io/proxy-connect-timeout: "10"
-    nginx.ingress.kubernetes.io/proxy-send-timeout: "3600"
-    nginx.ingress.kubernetes.io/proxy-read-timeout: "3600"
-  hosts:
-    - ""
-
-## Everything concerning the nomad app
-app:
-  replicas: 1
-  ## Number of gunicorn worker.
-  worker: 4
-  console_loglevel: INFO
-  logstash_loglevel: INFO
-  nomadNodeType: "public"
-
-## Everything concerning the nomad api
-api:
-  ## Secret used as cryptographic seed
-  secret: "defaultApiSecret"
-  ## Limit of unpublished uploads per user, except admin user
-  uploadLimit: 10
-
-## Everything concerning the nomad worker
-worker:
-  replicas: 1
-  # request and limit in GB, good prod sizes are 64, 420
-  memrequest: 8
-  memlimit: 32
-  maxTasksPerChild: 128
-  console_loglevel: ERROR
-  logstash_loglevel: INFO
-  ## There are two routing modes "queue" and "worker". The "queue" routing will use a general
-  # task queue and spread calc processing task over worker instances. The "worker" routing
-  # will send all tasks related to an upload to the same worker
-  routing: "queue"
-  storage: "disk"
-  nomadNodeType: "worker"
-  timeout: 7200
-  acks_late: false
-
-## Everthing concerning the nomad gui
-gui:
-  replicas: 1
-  ## This variable is used in the GUI to show or hide additional information
-  debug: false
-  ## automatically gz based on header
-  gzip: true
-  ## configuration for the interface, menus, options, etc.
-  config: {}
-
-encyclopedia:
-  ## enable links to the 'new' encyclopedia
+# Default values for nomad.
+# This is a YAML-formatted file.
+# Declare variables to be passed into your templates.
+roll: false
+nameOverride: ""
+fullnameOverride: ""
+
+serviceAccount:
+  # Specifies whether a service account should be created
+  create: true
+  # Automatically mount a ServiceAccount's API credentials?
+  automount: true
+  # Annotations to add to the service account
+  annotations: {}
+  # The name of the service account to use.
+  # If not set and create is true, a name is generated using the fullname template
+  name: ""
+
+nomad:
   enabled: true
+  image:
+    repository: gitlab-registry.mpcdf.mpg.de/nomad-lab/nomad-fair
+    tag: "latest"
+    pullPolicy: Always
 
-## Everything concerning the nginx that serves the gui, proxies the api
-#  It is run via NodePort service
-proxy:
-  # Set a nodePort to create a NodePort service instead of ClusterIP. Also set a nodeIP for the externalIP.
-  nodePort:
-  nodeIP:
-  timeout: 120
-  editTimeout: 1800
-  external:
-    host: "nomad-lab.eu"
-    port: 80
-    path: "/fairdi/nomad/latest"
-    https: true
+  imagePullSecrets: []
 
-## configuration of the chart dependency for rabbitmq
-rabbitmq:
-  persistence:
-    enabled: false
-  nodeSelector:
-    nomadtype: public
-  image.pullSecrets: nil
-  auth:
-    username: rabbitmq
-    password: rabbitmq
-    erlangCookie: SWQOKODSQALRPCLNMEQG
+  # Default values for nomad.
+  # This is a YAML-formatted file.
+  # Declare variables to be passed into your templates.
 
-## A common name/prefix for all dbs and indices.
-dbname: fairdi_nomad_latest
+  # GLOBAL parameters
+  enabled: false
 
-## Databases that are not run within the cluster.
-#  To run databases in the cluster, use the nomad-full helm chart.
+  ## Default values for nomad@FAIRDI
+  config:
+    version:
+      label: "latest"
+      isTest: false
+      isBeta: false
+      usesBetaData: false
+      officialUrl: "https://nomad-lab.eu/prod/v1/gui"
+
+    meta:
+      service: "app"
+      homepage: "https://nomad-lab.eu"
+      source_url: "https://gitlab.mpcdf.mpg.de/nomad-lab/nomad-FAIR"
+      maintainer_email: "markus.scheidgen@physik.hu-berlin.de"
+
+    api:
+      ## Secret used as cryptographic seed
+      secret: "defaultApiSecret"
+      ## Limit of unpublished uploads per user, except admin user
+      uploadLimit: 10
+
+    app:
+      console_loglevel: INFO
+      logstash_loglevel: INFO
+
+    worker:
+      console_loglevel: ERROR
+      logstash_loglevel: INFO
+      ## There are two routing modes "queue" and "worker". The "queue" routing will use a general
+      # task queue and spread calc processing task over worker instances. The "worker" routing
+      # will send all tasks related to an upload to the same worker
+      routing: "queue"
+      timeout: 7200
+      acks_late: false
+
+    mail:
+      enabled: false
+      host: "localhost"
+      port: 25
+      from: "support@nomad-lab.eu"
 
-mongo:
-  host: nomad-flink-01.esc
-  port: 27017
+    client:
+      username: admin
 
-elastic:
-  host: nomad-flink-01.esc
-  port: 9200
-  timeout: 60
-  bulkTimeout: 600
-  bulkSize: 1000
-  entriesPerMaterialCap: 1000
+    springerDbPath: /nomad/fairdi/db/data/springer.msg
 
-logstash:
-  enabled: true
-  port: 5000
-  host: nomad-flink-01.esc
+    reprocess:
+      rematchPublished: true
+      reprocessExistingEntries: true
+      useOriginalParser: false
+      addMatchedEntriesToPublished: false
+      deleteUnmatchedPublishedEntries: false
+      indexIndividualEntries: false
 
-kibana:
-  port: 5601
-  host: nomad-flink-01.esc
+    process:
+      reuseParser: true
+      indexMaterials: true
+      rfc3161_skip_published: false
 
-mail:
-  enabled: false
-  host: "localhost"
-  port: 25
-  from: "support@nomad-lab.eu"
-
-client:
-  username: admin
-
-keycloak:
-  serverExternalUrl: "https://nomad-lab.eu/fairdi/keycloak/auth/"
-  serverUrl: "https://nomad-lab.eu/keycloak/auth/"
-  realmName: "fairdi_nomad_test"
-  username: "admin"
-  clientId: "nomad_public"
-  guiClientId: "nomad_public"
-  admin_user_id: "00000000-0000-0000-0000-000000000000"
-
-## Everything concerning the data that is used by the service
-volumes:
-  prefixSize: 1
-  public: /nomad/fairdi/latest/fs/public
-  staging: /nomad/fairdi/latest/fs/staging
-  north_home: /nomad/fairdi/latest/fs/north/users
-  tmp: /nomad/fairdi/latest/fs/tmp
-  nomad: /nomad
-
-springerDbPath: /nomad/fairdi/db/data/springer.msg
-
-reprocess:
-  rematchPublished: true
-  reprocessExistingEntries: true
-  useOriginalParser: false
-  addMatchedEntriesToPublished: false
-  deleteUnmatchedPublishedEntries: false
-  indexIndividualEntries: false
-
-process:
-  reuseParser: true
-  indexMaterials: true
-  rfc3161_skip_published: false
-
-datacite:
-  enabled: false
-  prefix: "10.17172"
+    datacite:
+      enabled: false
+      prefix: "10.17172"
+
+    ## A common name/prefix for all dbs and indices.
+    dbname: fairdi_nomad_latest
+
+    mongo:
+      host: ""
+      port: 27017
+
+    elastic:
+      host: ""
+      port: 9200
+      timeout: 60
+      bulkTimeout: 600
+      bulkSize: 1000
+      entriesPerMaterialCap: 1000
 
-services:
-  aitoolkit:
-    ## enable aitoolkit references
+    logstash:
+      enabled: true
+      port: 5000
+      host: ""
+
+    keycloak:
+      serverExternalUrl: "https://nomad-lab.eu/fairdi/keycloak/auth/"
+      serverUrl: "https://nomad-lab.eu/keycloak/auth/"
+      realmName: "fairdi_nomad_test"
+      username: "admin"
+      clientId: "nomad_public"
+      guiClientId: "nomad_public"
+      admin_user_id: "00000000-0000-0000-0000-000000000000"
+
+    ## Everything concerning the data that is used by the service
+    volumes:
+      prefixSize: 1
+      public: /nomad/fairdi/latest/fs/public
+      staging: /nomad/fairdi/latest/fs/staging
+      north_home: /nomad/fairdi/latest/fs/north/users
+      tmp: /nomad/fairdi/latest/fs/tmp
+      nomad: /nomad
+
+    services:
+      aitoolkit:
+        ## enable aitoolkit references
+        enabled: false
+
+    north:
+      enabled: false
+      hubServiceApiToken: "secret-token"
+
+    gui:
+      ## This variable is used in the GUI to show or hide additional information
+      debug: false
+      ## automatically gz based on header
+      gzip: true
+
+    proxy:
+      # Set a nodePort to create a NodePort service instead of ClusterIP. Also set a nodeIP for the externalIP.
+      timeout: 120
+      editTimeout: 1800
+      external:
+        host: "nomad-lab.eu"
+        port: 80
+        path: "/fairdi/nomad/latest"
+        https: true
+
+
+  ingress:
     enabled: false
+    className: ""
+    annotations:
+      nginx.ingress.kubernetes.io/ssl-redirect: "false"
+    hosts:
+      - host: chart-example.local
+        paths:
+          - path: /
+            pathType: ImplementationSpecific
+    tls: []
+    #  - secretName: chart-example-tls
+    #    hosts:
+    #      - chart-example.local
+
+  # Additional volumes on the output Deployment definition.
+  volumes: []
+  # - name: foo
+  #   secret:
+  #     secretName: mysecret
+  #     optional: false
+
+  # Additional volumeMounts on the output Deployment definition.
+  volumeMounts: []
+  # - name: foo
+  #   mountPath: "/etc/foo"
+  #   readOnly: true
+
+  # APPLICATION SPECIFIC parameters
+  service:
+    type: ClusterIP
+    port: 80
 
-north:
-  enabled: false
-  hubServiceApiToken: "secret-token"
+  # TODO: Do we really need this? different app could bave their own ingress config. Eventually prozy is just another nginx ssrver
+  ## Everything concerning the nginx that serves the gui, proxies the api
+  #  It is run via NodePort service
+  proxy:
+    replicaCount: 1
+    # Set a nodePort to create a NodePort service instead of ClusterIP. Also set a nodeIP for the externalIP.
+
+    image:
+      repository: nginx
+      # Overrides the image tag whose default is the chart appVersion.
+      tag: 1.13.9-alpine
+      pullPolicy: IfNotPresent
+
+    command: ["nginx"]
+    args: ["-g", "daemon off;"]
+
+    imagePullSecrets: []
+
+    service:
+      type: ClusterIP
+      port: 80
+
+    podSecurityContext: {}
+      # fsGroup: 2000
+
+    securityContext: {}
+      # capabilities:
+      #   drop:
+      #   - ALL
+      # readOnlyRootFilesystem: true
+      # runAsNonRoot: true
+      # runAsUser: 1000
+
+    resources: {}
+      # We usually recommend not to specify default resources and to leave this as a conscious
+      # choice for the user. This also increases chances charts run on environments with little
+      # resources, such as Minikube. If you do want to specify resources, uncomment the following
+      # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
+      # limits:
+      #   cpu: 100m
+      #   memory: 128Mi
+      # requests:
+      #   cpu: 100m
+      #   memory: 128Mi
+
+    # Additional volumes on the output Deployment definition.
+    volumes: []
+    # - name: foo
+    #   secret:
+    #     secretName: mysecret
+    #     optional: false
+
+    # Additional volumeMounts on the output Deployment definition.
+    volumeMounts: []
+    # - name: foo
+    #   mountPath: "/etc/foo"
+    #   readOnly: true
+
+    nodeSelector: {}
+    tolerations: []
+    affinity: {}
+
+    podAnnotations: {}
+    podLabels: {}
+
+  ## Everything concerning the nomad app
+  app:
+    replicaCount: 1
+
+    # options: {}
+    service:
+      type: ClusterIP
+      port: 8000
+
+    podSecurityContext: {}
+      # fsGroup: 2000
+
+    securityContext: {}
+      # capabilities:
+      #   drop:
+      #   - ALL
+      # readOnlyRootFilesystem: true
+      # runAsNonRoot: true
+      # runAsUser: 1000
+
+    resources: {}
+      # We usually recommend not to specify default resources and to leave this as a conscious
+      # choice for the user. This also increases chances charts run on environments with little
+      # resources, such as Minikube. If you do want to specify resources, uncomment the following
+      # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
+      # limits:
+      #   cpu: 100m
+      #   memory: 128Mi
+      # requests:
+      #   cpu: 100m
+      #   memory: 128Mi
+
+    # Additional volumes on the output Deployment definition.
+    volumes: []
+    # - name: foo
+    #   secret:
+    #     secretName: mysecret
+    #     optional: false
+
+    # Additional volumeMounts on the output Deployment definition.
+    volumeMounts: []
+    # - name: foo
+    #   mountPath: "/etc/foo"
+    #   readOnly: true
+
+    nodeSelector: {}
+    tolerations: []
+    affinity: {}
+
+    podAnnotations: {}
+    podLabels: {}
+
+  ## Everything concerning the nomad worker
+  worker:
+    replicaCount: 1
+
+    maxTasksPerChild: 128
+    # storage: "disk"
+
+    podSecurityContext: {}
+      # fsGroup: 2000
+
+    securityContext: {}
+      # capabilities:
+      #   drop:
+      #   - ALL
+      # readOnlyRootFilesystem: true
+      # runAsNonRoot: true
+      # runAsUser: 1000
+
+    # request and limit in GB, good prod sizes are 64, 420
+    resources: {}
+      # We usually recommend not to specify default resources and to leave this as a conscious
+      # choice for the user. This also increases chances charts run on environments with little
+      # resources, such as Minikube. If you do want to specify resources, uncomment the following
+      # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
+      # limits:
+      #   cpu: 100m
+      #   memory: 128Mi
+      # requests:
+      #   cpu: 100m
+      #   memory: 128Mi
+
+    # Additional volumes on the output Deployment definition.
+    volumes: []
+    # - name: foo
+    #   secret:
+    #     secretName: mysecret
+    #     optional: false
+
+    # Additional volumeMounts on the output Deployment definition.
+    volumeMounts: []
+    # - name: foo
+    #   mountPath: "/etc/foo"
+    #   readOnly: true
+
+    nodeSelector: {}
+    tolerations: []
+    affinity: {}
+
+    podAnnotations: {}
+    podLabels: {}
+
+  ## Everthing concerning the nomad gui
+  gui:
+    ## This variable is used in the GUI to show or hide additional information
+    debug: false
+    ## automatically gz based on header
+    gzip: true
+    ## configuration for the interface, menus, options, etc.
+    config: {}
+
+rabbitmq:
+  persistence:
+    enabled: false
+  image.pullSecrets: nil
+  auth:
+    username: rabbitmq
+    password: rabbitmq
+    erlangCookie: SWQOKODSQALRPCLNMEQG
 
 jupyterhub:
   debug:
@@ -305,12 +489,12 @@ jupyterhub:
 
         c.Spawner.pre_spawn_hook = pre_spawn_hook
 
-
   cull:
     enabled: true
     timeout: 3600
     every: 600
     removeNamedServers: true
+
   prePuller:
     hook:
       enabled: true
@@ -318,6 +502,7 @@ jupyterhub:
         pullPolicy: "Always"
     continuous:
       enabled: false
+
   scheduling:
     userScheduler:
       enabled: false
@@ -326,3 +511,9 @@ jupyterhub:
     userPlaceholder:
       enabled: false
       replicas: 0
+
+mongodb:
+  enabled: true
+
+elasticsearch:
+  enabled: true
diff --git a/ops/kubernetes/values.yaml b/ops/kubernetes/values.yaml
new file mode 100644
index 0000000000..3697e94d39
--- /dev/null
+++ b/ops/kubernetes/values.yaml
@@ -0,0 +1,201 @@
+mongodb:
+  enabled: false
+
+elasticsearch:
+  enabled: false
+
+nomad:
+  enabled: true
+  config:
+    version:
+      isBeta: false
+      usesBetaData: false
+
+    proxy:
+      external:
+        host: "cloud.nomad-lab.eu"
+        path: "/prod/v1"
+
+    gui:
+      debug: false
+      encyclopediaBase: "https://nomad-lab.eu/prod/rae/encyclopedia/#"
+      aitoolkitEnabled: false
+
+    elastic:
+      host: elasticsearch-master.nomad-infrastructure.svc.cluster.local
+      port: 9200
+
+    mongo:
+      host: nomad-infrastructure-mongodb-0.nomad-infrastructure-mongodb-headless.nomad-infrastructure.svc.cluster.local,nomad-infrastructure-mongodb-1.nomad-infrastructure-mongodb-headless.nomad-infrastructure.svc.cluster.local,nomad-infrastructure-mongodb-0.nomad-infrastructure-mongodb-headless.nomad-infrastructure.svc.cluster.local/?replicaSet=rs0
+
+    logstash:
+      enabled: true
+      host: eck-stack-eck-logstash-ls-logs.nomad-system.svc.cluster.local
+
+    dbname: nomad_prod_v1
+
+    uploadurl: "https://cloud.nomad-lab.eu/prod/v1/api/uploads"
+
+    client:
+      passwordSecret: "nomad-keycloak-password"
+
+    keycloak:
+      serverUrl: "https://cloud.nomad-lab.eu/fairdi/keycloak/auth/"
+      serverExternalUrl: "https://cloud.nomad-lab.eu/fairdi/keycloak/auth/"
+      passwordSecret: "nomad-keycloak-password"
+      realmName: "fairdi_nomad_prod"
+      clientId: "nomad_public"
+      admin_user_id: "82efac55-6187-408c-8027-b98580c0e1c5"
+
+    volumes:
+      prefixSize: 1
+      public: /nomad/prod/fs/public
+      staging: /nomad/prod/fs/staging
+      north_home: /nomad/prod/fs/north/users
+      tmp: /nomad/prod/fs/tmp
+      nomad: /nomad
+      archiveVersionSuffix: v1
+
+    mail:
+      enabled: true
+      host: "mailrelay.mpcdf.mpg.de"
+      port: 25
+      from: "support@nomad-lab.eu"
+
+    datacite:
+      enabled: true
+      secret: "nomad-datacite"
+
+    north:
+      enabled: true
+      hubServiceApiTokenSecret: "nomad-hub-service-api-token"
+
+  image:
+    tag: "cloud"
+    pullPolicy: "Always"
+
+  volumeMounts:
+    - mountPath: /app/run
+      name: nomad-gui-configured
+
+  volumes:
+    - name: nomad-gui-configured
+      emptyDir: {}
+
+  ingress:
+    enabled: true
+    className: "nginx"
+    annotations:
+      cert-manager.io/cluster-issuer: "letsencrypt-production"
+      nginx.ingress.kubernetes.io/proxy-body-size: "32g"
+      nginx.ingress.kubernetes.io/proxy-request-buffering: "off"
+      nginx.ingress.kubernetes.io/proxy-connect-timeout: "10"
+      nginx.ingress.kubernetes.io/proxy-send-timeout: "3600"
+      nginx.ingress.kubernetes.io/proxy-read-timeout: "3600"
+      nginx.ingress.kubernetes.io/limit-rps: "10"
+      nginx.ingress.kubernetes.io/denylist-source-range: "141.35.40.36/32, 141.35.40.52/32"
+    hosts:
+      - host: cloud.nomad-lab.eu
+        paths:
+          - path: /prod/v1/
+            pathType: ImplementationSpecific
+    tls:
+      - secretName: cloud-nomad-lab-eu-tls
+        hosts:
+          - cloud.nomad-lab.eu
+
+  proxy:
+    nodeSelector:
+      environment: prod
+      "nomad-lab.eu/app": ""
+
+  app:
+    replicaCount: 4
+    nodeSelector:
+      environment: prod
+      "nomad-lab.eu/app": ""
+    resources:
+      limits:
+        memory: "8Gi"
+      requests:
+        memory: "1Gi"
+    podSecurityContext:
+      runAsUser: 25249
+      runAsGroup: 11320
+      fsGroup: 11320
+
+  worker:
+    replicaCount: 1
+    processes: 12
+    nodeSelector:
+      environment: prod
+      "nomad-lab.eu/worker": ""
+    podSecurityContext:
+      runAsUser: 25249
+      runAsGroup: 11320
+      fsGroup: 11320
+    # affinity:
+    #   podAntiAffinity:
+    #     requiredDuringSchedulingIgnoredDuringExecution:
+    #     - topologyKey: kubernetes.io/hostname
+    #       labelSelector:
+    #         matchLabels:
+    #           app.kubernetes.io/component: worker
+    #           app.kubernetes.io/instance: nomad-staging
+    resources:
+      limits:
+        memory: "32Gi"
+      requests:
+        memory: "8Gi"
+
+rabbitmq:
+  nodeSelector:
+    environment: prod
+    "nomad-lab.eu/db": ""
+
+jupyterhub:
+  fullnameOverride: "nomad-prod-north"
+  proxy:
+    chp:
+      nodeSelector:
+        environment: prod
+        "nomad-lab.eu/app": ""
+  hub:
+    containerSecurityContext:
+      runAsUser: 25249
+      runAsGroup: 11320
+    baseUrl: "/prod/v1/north"
+    nodeSelector:
+      environment: prod
+      "nomad-lab.eu/app": ""
+    db:
+      type: sqlite-pvc
+      pvc:
+        storageClassName: csi-sc-cinderplugin
+    config:
+      GenericOAuthenticator:
+        client_id: nomad_public
+        oauth_callback_url: https://cloud.nomad-lab.eu/prod/v1/north/hub/oauth_callback
+        authorize_url: https://cloud.nomad-lab.eu/fairdi/keycloak/auth/realms/fairdi_nomad_prod/protocol/openid-connect/auth
+        token_url: https://cloud.nomad-lab.eu/fairdi/keycloak/auth/realms/fairdi_nomad_prod/protocol/openid-connect/token
+        userdata_url: https://cloud.nomad-lab.eu/fairdi/keycloak/auth/realms/fairdi_nomad_prod/protocol/openid-connect/userinfo
+  singleuser:
+    podNameTemplate: "nomad-prod-north-{username}--{servername}"
+    uid: 1000
+    fsGid: 11320
+    # gid: 11320  for some reason this is not in the chart. It exists on the KubeSpawner,
+    # but there is not pass though config like for uid and fsGid. Therefore, we
+    # need the extraPodConfig to override the securityContext created by KubeSpawner.
+    extraPodConfig:
+      securityContext:
+        runAsUser: 1000
+        runAsGroup: 11320
+        fsGroup: 11320
+    nodeSelector:
+        environment: prod
+        "nomad-lab.eu/worker-north": ""
+  prePuller:
+    hook:
+      nodeSelector:
+        environment: prod
+        "nomad-lab.eu/worker-north": ""
-- 
GitLab