From 7d7e34b1323801c6955b86c440da76ec1e64f490 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Robert=20Sch=C3=A4fer?= Date: Mon, 28 Jan 2019 18:33:21 +0100 Subject: [PATCH 01/26] Add deployment for db-migation-worker --- README.md | 11 +++- staging/.gitignore | 1 + ...b-migration-worker-configmap.template.yaml | 12 ++++ staging/db-migration-worker-deployment.yaml | 63 +++++++++++++++++++ 4 files changed, 85 insertions(+), 2 deletions(-) create mode 100644 staging/.gitignore create mode 100644 staging/db-migration-worker-configmap.template.yaml create mode 100644 staging/db-migration-worker-deployment.yaml diff --git a/README.md b/README.md index db0007d92..17fbe5780 100644 --- a/README.md +++ b/README.md @@ -22,10 +22,17 @@ There are many Kubernetes distributions, but if you're just getting started, Min kubectl create -f namespace-staging.json ``` +## Change config maps according to your needs +```shell +cd ./staging +cp db-migration-worker-configmap.template.yaml db-migration-worker-configmap.yaml +# edit all variables according to the setup of the remote legacy server +``` + ## Apply the config map to staging namespace ```shell cd ./staging -kubectl apply -f neo4j-configmap.yaml -f backend-configmap.yaml -f web-configmap.yaml +kubectl apply -f neo4j-configmap.yaml -f backend-configmap.yaml -f web-configmap.yaml -f db-migration-worker-configmap.yaml ``` ## Setup secrets and deploy themn @@ -39,7 +46,7 @@ kubectl apply -f secrets.yaml ## Deploy the app ```shell cd ./staging -kubectl apply -f neo4j-deployment.yaml -f backend-deployment.yaml -f web-deployment.yaml +kubectl apply -f neo4j-deployment.yaml -f backend-deployment.yaml -f web-deployment.yaml -f db-migration-worker-deployment.yaml ``` This can take a while. Sit back and relax and have a look into your minikube dashboard: diff --git a/staging/.gitignore b/staging/.gitignore new file mode 100644 index 000000000..d9c5e61bf --- /dev/null +++ b/staging/.gitignore @@ -0,0 +1 @@ +db-migration-worker-configmap.yaml diff --git a/staging/db-migration-worker-configmap.template.yaml b/staging/db-migration-worker-configmap.template.yaml new file mode 100644 index 000000000..e00077577 --- /dev/null +++ b/staging/db-migration-worker-configmap.template.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: ConfigMap +data: + SSH_USERNAME: "" + SSH_HOST: "" + MONGODB_USERNAME: "hc-api" + MONGODB_AUTH_DB: "hc_api" + MONGODB_DATABASE: "hc_api" + UPLOADS_DIRECTORY: "/var/www/api/uploads" +metadata: + name: staging-db-migration-worker + namespace: staging diff --git a/staging/db-migration-worker-deployment.yaml b/staging/db-migration-worker-deployment.yaml new file mode 100644 index 000000000..7b06dcb6d --- /dev/null +++ b/staging/db-migration-worker-deployment.yaml @@ -0,0 +1,63 @@ +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: nitro-db-migration-worker + namespace: staging +spec: + replicas: 1 + minReadySeconds: 15 + progressDeadlineSeconds: 60 + selector: + matchLabels: + workload.user.cattle.io/workloadselector: deployment-staging-db-migration-worker + template: + metadata: + labels: + workload.user.cattle.io/workloadselector: deployment-staging-db-migration-worker + name: "nitro-db-migration-worker" + spec: + containers: + - env: + - name: SSH_USERNAME + valueFrom: + configMapKeyRef: + name: staging-db-migration-worker + key: SSH_USERNAME + - name: SSH_HOST + valueFrom: + configMapKeyRef: + name: staging-db-migration-worker + key: SSH_HOST + - name: MONGODB_USERNAME + valueFrom: + configMapKeyRef: + name: staging-db-migration-worker + key: MONGODB_USERNAME + - name: MONGODB_AUTH_DB + valueFrom: + configMapKeyRef: + name: staging-db-migration-worker + key: MONGODB_AUTH_DB + - name: MONGODB_DATABASE + valueFrom: + configMapKeyRef: + name: staging-db-migration-worker + key: MONGODB_DATABASE + - name: UPLOADS_DIRECTORY + valueFrom: + configMapKeyRef: + name: staging-db-migration-worker + key: UPLOADS_DIRECTORY + - name: MONGODB_PASSWORD + valueFrom: + secretKeyRef: + name: staging + key: MONGODB_PASSWORD + optional: false + image: humanconnection/db-migration-worker:latest + name: nitro-db-migration-worker + resources: {} + imagePullPolicy: Always + restartPolicy: Always + terminationGracePeriodSeconds: 30 +status: {} From ad55cc03e521fc728a8174b53da86a2f7996683d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Robert=20Sch=C3=A4fer?= Date: Mon, 28 Jan 2019 18:46:52 +0100 Subject: [PATCH 02/26] Remove obsolete deployment files --- staging/deployment.yaml | 260 ---------------------------------------- 1 file changed, 260 deletions(-) delete mode 100644 staging/deployment.yaml diff --git a/staging/deployment.yaml b/staging/deployment.yaml deleted file mode 100644 index 9d68db535..000000000 --- a/staging/deployment.yaml +++ /dev/null @@ -1,260 +0,0 @@ -apiVersion: v1 -items: -- apiVersion: extensions/v1beta1 - kind: Deployment - metadata: - name: backend - namespace: staging - spec: - minReadySeconds: 15 - progressDeadlineSeconds: 60 - replicas: 1 - revisionHistoryLimit: 10 - selector: - matchLabels: - cattle.io/creator: norman - workload.user.cattle.io/workloadselector: deployment-staging-backend - strategy: - rollingUpdate: - maxSurge: 1 - maxUnavailable: 0 - type: RollingUpdate - template: - spec: - containers: - - env: - - name: MOCK - valueFrom: - configMapKeyRef: - key: MOCK - name: staging-backend - optional: false - - name: NEO4J_URI - valueFrom: - configMapKeyRef: - key: NEO4J_URI - name: staging-neo4j - optional: false - - name: JWT_SECRET - valueFrom: - secretKeyRef: - key: JWT_SECRET - name: staging - optional: false - - name: NEO4J_AUTH - valueFrom: - configMapKeyRef: - key: NEO4J_AUTH - name: staging-neo4j - optional: false - - name: CLIENT_URI - valueFrom: - configMapKeyRef: - key: CLIENT_URI - name: staging-web - optional: false - - name: GRAPHQL_PORT - valueFrom: - configMapKeyRef: - key: GRAPHQL_PORT - name: staging-backend - optional: false - - name: GRAPHQL_URI - valueFrom: - configMapKeyRef: - key: GRAPHQL_URI - name: staging-backend - optional: false - image: humanconnection/nitro-backend:latest - imagePullPolicy: Always - name: backend - resources: {} - tty: true - restartPolicy: Always - terminationGracePeriodSeconds: 30 -#- apiVersion: extensions/v1beta1 -# kind: Deployment -# metadata: -# annotations: -# deployment.kubernetes.io/revision: "2" -# field.cattle.io/creatorId: user-x8jr4 -# field.cattle.io/publicEndpoints: '[{"nodeName":"c-2kbhr:m-bmgq4","addresses":["104.248.30.130"],"port":7687,"protocol":"TCP","podName":"staging:neo4j-2-6589cbc4d5-q4bxl","allNodes":false},{"nodeName":"c-2kbhr:m-bmgq4","addresses":["104.248.30.130"],"port":7474,"protocol":"TCP","podName":"staging:neo4j-2-6589cbc4d5-q4bxl","allNodes":false},{"nodeName":"c-2kbhr:m-bmgq4","addresses":["104.248.30.130"],"port":7473,"protocol":"TCP","podName":"staging:neo4j-2-6589cbc4d5-q4bxl","allNodes":false}]' -# creationTimestamp: 2018-12-10T19:07:58Z -# generation: 8 -# labels: -# cattle.io/creator: norman -# workload.user.cattle.io/workloadselector: deployment-staging-neo4j-2 -# name: neo4j-2 -# namespace: staging -# resourceVersion: "2380945" -# selfLink: /apis/extensions/v1beta1/namespaces/staging/deployments/neo4j-2 -# uid: e80460f6-fcae-11e8-943a-c6c288d5f6fa -# spec: -# progressDeadlineSeconds: 600 -# replicas: 1 -# revisionHistoryLimit: 10 -# selector: -# matchLabels: -# workload.user.cattle.io/workloadselector: deployment-staging-neo4j-2 -# strategy: -# rollingUpdate: -# maxSurge: 1 -# maxUnavailable: 0 -# type: RollingUpdate -# template: -# metadata: -# annotations: -# cattle.io/timestamp: 2018-12-11T11:11:09Z -# field.cattle.io/ports: '[[{"containerPort":7687,"dnsName":"neo4j-2-hostport","hostPort":7687,"kind":"HostPort","name":"7687tcp76870","protocol":"TCP","sourcePort":7687},{"containerPort":7474,"dnsName":"neo4j-2-hostport","hostPort":7474,"kind":"HostPort","name":"7474tcp74740","protocol":"TCP","sourcePort":7474},{"containerPort":7473,"dnsName":"neo4j-2-hostport","hostPort":7473,"kind":"HostPort","name":"7473tcp74730","protocol":"TCP","sourcePort":7473}]]' -# creationTimestamp: null -# labels: -# workload.user.cattle.io/workloadselector: deployment-staging-neo4j-2 -# spec: -# containers: -# - env: -# - name: NEO4J_AUTH -# value: none -# image: humanconnection/neo4j:latest -# imagePullPolicy: IfNotPresent -# name: neo4j-2 -# ports: -# - containerPort: 7687 -# hostPort: 7687 -# name: 7687tcp76870 -# protocol: TCP -# - containerPort: 7474 -# hostPort: 7474 -# name: 7474tcp74740 -# protocol: TCP -# - containerPort: 7473 -# hostPort: 7473 -# name: 7473tcp74730 -# protocol: TCP -# resources: {} -# securityContext: -# allowPrivilegeEscalation: false -# capabilities: {} -# privileged: false -# readOnlyRootFilesystem: false -# runAsNonRoot: false -# stdin: true -# terminationMessagePath: /dev/termination-log -# terminationMessagePolicy: File -# tty: true -# dnsPolicy: ClusterFirst -# restartPolicy: Always -# schedulerName: default-scheduler -# securityContext: {} -# terminationGracePeriodSeconds: 30 -# status: -# availableReplicas: 1 -# conditions: -# - lastTransitionTime: 2018-12-10T19:07:58Z -# lastUpdateTime: 2018-12-11T11:11:18Z -# message: ReplicaSet "neo4j-2-6589cbc4d5" has successfully progressed. -# reason: NewReplicaSetAvailable -# status: "True" -# type: Progressing -# - lastTransitionTime: 2018-12-11T12:12:41Z -# lastUpdateTime: 2018-12-11T12:12:41Z -# message: Deployment has minimum availability. -# reason: MinimumReplicasAvailable -# status: "True" -# type: Available -# observedGeneration: 8 -# readyReplicas: 1 -# replicas: 1 -# updatedReplicas: 1 -##- apiVersion: extensions/v1beta1 -# kind: Deployment -# metadata: -# annotations: -# deployment.kubernetes.io/revision: "15" -# field.cattle.io/creatorId: user-x8jr4 -# field.cattle.io/publicEndpoints: '[{"addresses":["68.183.211.116"],"port":31726,"protocol":"TCP","serviceName":"staging:web-nodeport","allNodes":true},{"addresses":["104.248.25.205"],"port":80,"protocol":"HTTP","serviceName":"staging:ingress-ef72b2ceebfff95d50b0537c0e9e98d8","ingressName":"staging:web","hostname":"web.staging.104.248.25.205.xip.io","allNodes":true}]' -# creationTimestamp: 2018-11-30T13:56:41Z -# generation: 56 -# labels: -# cattle.io/creator: norman -# workload.user.cattle.io/workloadselector: deployment-staging-web -# name: web -# namespace: staging -# resourceVersion: "2401610" -# selfLink: /apis/extensions/v1beta1/namespaces/staging/deployments/web -# uid: c3870196-f4a7-11e8-943a-c6c288d5f6fa -# spec: -# progressDeadlineSeconds: 600 -# replicas: 1 -# revisionHistoryLimit: 10 -# selector: -# matchLabels: -# workload.user.cattle.io/workloadselector: deployment-staging-web -# strategy: -# rollingUpdate: -# maxSurge: 1 -# maxUnavailable: 0 -# type: RollingUpdate -# template: -# metadata: -# labels: -# workload.user.cattle.io/workloadselector: deployment-staging-web -# spec: -# containers: -# - env: -# - name: HOST -# value: 0.0.0.0 -# - name: JWT_SECRET -# valueFrom: -# secretKeyRef: -# key: JWT_SECRET -# name: jwt-secret -# optional: false -# - name: BACKEND_URL -# valueFrom: -# configMapKeyRef: -# key: GRAPHQL_URI -# name: staging-configs -# optional: false -# image: humanconnection/nitro-web:latest -# imagePullPolicy: Always -# name: web -# ports: -# - containerPort: 3000 -# name: 3000tcp01 -# protocol: TCP -# resources: {} -# securityContext: -# allowPrivilegeEscalation: false -# capabilities: {} -# privileged: false -# readOnlyRootFilesystem: false -# runAsNonRoot: false -# stdin: true -# terminationMessagePath: /dev/termination-log -# terminationMessagePolicy: File -# tty: true -# dnsPolicy: ClusterFirst -# restartPolicy: Always -# schedulerName: default-scheduler -# securityContext: {} -# terminationGracePeriodSeconds: 30 -# status: -# availableReplicas: 1 -# conditions: -# - lastTransitionTime: 2018-11-30T14:53:36Z -# lastUpdateTime: 2018-12-11T11:17:34Z -# message: ReplicaSet "web-5864d6db9c" has successfully progressed. -# reason: NewReplicaSetAvailable -# status: "True" -# type: Progressing -# - lastTransitionTime: 2018-12-11T11:23:17Z -# lastUpdateTime: 2018-12-11T11:23:17Z -# message: Deployment has minimum availability. -# reason: MinimumReplicasAvailable -# status: "True" -# type: Available -# observedGeneration: 56 -# readyReplicas: 1 -# replicas: 1 -# updatedReplicas: 1 -kind: List From 8a74f452a9a71ad2a30e5f4117aa1e71f5fc3c4f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Robert=20Sch=C3=A4fer?= Date: Wed, 30 Jan 2019 19:18:23 +0100 Subject: [PATCH 03/26] Create persistent volumes for minikube Running the import works in minikube! --- README.md | 18 ++++++++++++++++++ staging/db-migration-worker-deployment.yaml | 12 ++++++++++++ staging/neo4j-deployment.yaml | 7 +++++++ staging/volumes/mongo-export-claim.yaml | 11 +++++++++++ staging/volumes/mongo-export-volume.yaml | 12 ++++++++++++ staging/volumes/ssh-keys-volume-claim.yml | 11 +++++++++++ staging/volumes/ssh-keys-volume.yaml | 12 ++++++++++++ staging/volumes/uploads-claim.yaml | 11 +++++++++++ staging/volumes/uploads-volume.yaml | 12 ++++++++++++ 9 files changed, 106 insertions(+) create mode 100644 staging/volumes/mongo-export-claim.yaml create mode 100644 staging/volumes/mongo-export-volume.yaml create mode 100644 staging/volumes/ssh-keys-volume-claim.yml create mode 100644 staging/volumes/ssh-keys-volume.yaml create mode 100644 staging/volumes/uploads-claim.yaml create mode 100644 staging/volumes/uploads-volume.yaml diff --git a/README.md b/README.md index 17fbe5780..29faf84d5 100644 --- a/README.md +++ b/README.md @@ -46,6 +46,7 @@ kubectl apply -f secrets.yaml ## Deploy the app ```shell cd ./staging +kubectl apply -f ./volumes kubectl apply -f neo4j-deployment.yaml -f backend-deployment.yaml -f web-deployment.yaml -f db-migration-worker-deployment.yaml ``` This can take a while. @@ -68,3 +69,20 @@ kubectl expose deployment nitro-web --namespace=staging --type=LoadBalancer minikube service nitro-backend --namespace=staging minikube service nitro-web --namespace=staging ``` + + +## Provisioning db-migration-worker +Copy your private ssh key and the `.known-hosts` file of your remote legacy server. +```shell + +# check the corresponding db-migration-worker pod +kubectl --namespace=staging get pods +# change below +kubectl cp path/to/your/ssh/keys/folder staging/nitro-db-migration-worker-:/root/ +``` + +Run the migration: +```shell +# change below +kubectl --namespace=staging exec -it nitro-db-migration-worker- ./import.sh +``` diff --git a/staging/db-migration-worker-deployment.yaml b/staging/db-migration-worker-deployment.yaml index 7b06dcb6d..509f98093 100644 --- a/staging/db-migration-worker-deployment.yaml +++ b/staging/db-migration-worker-deployment.yaml @@ -58,6 +58,18 @@ spec: name: nitro-db-migration-worker resources: {} imagePullPolicy: Always + volumeMounts: + - mountPath: /root/ + name: ssh-keys-directory + - mountPath: /mongo-export/ + name: mongo-export restartPolicy: Always + volumes: + - name: ssh-keys-directory + persistentVolumeClaim: + claimName: ssh-keys-claim + - name: mongo-export + persistentVolumeClaim: + claimName: mongo-export-claim terminationGracePeriodSeconds: 30 status: {} diff --git a/staging/neo4j-deployment.yaml b/staging/neo4j-deployment.yaml index 66cf5a966..e8268b047 100644 --- a/staging/neo4j-deployment.yaml +++ b/staging/neo4j-deployment.yaml @@ -46,5 +46,12 @@ spec: # - containerPort: 7473 resources: {} imagePullPolicy: IfNotPresent + volumeMounts: + - mountPath: /mongo-export/ + name: mongo-export restartPolicy: Always + volumes: + - name: mongo-export + persistentVolumeClaim: + claimName: mongo-export-claim status: {} diff --git a/staging/volumes/mongo-export-claim.yaml b/staging/volumes/mongo-export-claim.yaml new file mode 100644 index 000000000..1c91996db --- /dev/null +++ b/staging/volumes/mongo-export-claim.yaml @@ -0,0 +1,11 @@ +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: mongo-export-claim + namespace: staging +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 1Gi diff --git a/staging/volumes/mongo-export-volume.yaml b/staging/volumes/mongo-export-volume.yaml new file mode 100644 index 000000000..945c28765 --- /dev/null +++ b/staging/volumes/mongo-export-volume.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: mongo-export-volume + namespace: staging +spec: + accessModes: + - ReadWriteMany + capacity: + storage: 1Gi + hostPath: + path: /data/shared/mongo-exports/ diff --git a/staging/volumes/ssh-keys-volume-claim.yml b/staging/volumes/ssh-keys-volume-claim.yml new file mode 100644 index 000000000..30191a500 --- /dev/null +++ b/staging/volumes/ssh-keys-volume-claim.yml @@ -0,0 +1,11 @@ +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: ssh-keys-claim + namespace: staging +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Mi diff --git a/staging/volumes/ssh-keys-volume.yaml b/staging/volumes/ssh-keys-volume.yaml new file mode 100644 index 000000000..9c0353b74 --- /dev/null +++ b/staging/volumes/ssh-keys-volume.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: ssh-keys-volume + namespace: staging +spec: + accessModes: + - ReadWriteOnce + capacity: + storage: 1Mi + hostPath: + path: /data/pv0001/ diff --git a/staging/volumes/uploads-claim.yaml b/staging/volumes/uploads-claim.yaml new file mode 100644 index 000000000..c1b11ed4e --- /dev/null +++ b/staging/volumes/uploads-claim.yaml @@ -0,0 +1,11 @@ +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: uploads-claim + namespace: staging +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 8Gi diff --git a/staging/volumes/uploads-volume.yaml b/staging/volumes/uploads-volume.yaml new file mode 100644 index 000000000..4600a76a4 --- /dev/null +++ b/staging/volumes/uploads-volume.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: uploads-volume + namespace: staging +spec: + accessModes: + - ReadWriteMany + capacity: + storage: 8Gi + hostPath: + path: /data/shared/uploads/ From 330fd9a8e9b0d95e833e91aa62cfdc887ed6c354 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Robert=20Sch=C3=A4fer?= Date: Wed, 30 Jan 2019 20:15:40 +0100 Subject: [PATCH 04/26] Enable apoc to run neo4j import --- staging/neo4j-deployment.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/staging/neo4j-deployment.yaml b/staging/neo4j-deployment.yaml index e8268b047..2b3485c77 100644 --- a/staging/neo4j-deployment.yaml +++ b/staging/neo4j-deployment.yaml @@ -17,6 +17,8 @@ spec: spec: containers: - env: + - name: NEO4J_apoc_import_file_enabled + value: "true" - name: NEO4J_dbms_memory_pagecache_size value: 1G - name: NEO4J_dbms_memory_heap_max__size From 5cd0485117061624070e1758435e1816f8d022e9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Robert=20Sch=C3=A4fer?= Date: Thu, 31 Jan 2019 18:19:19 +0100 Subject: [PATCH 05/26] Point the backend to the local neo4j service --- README.md | 3 +-- services/backend.yml | 22 ++++++++++++++++++++++ services/neo4j.yml | 23 +++++++++++++++++++++++ services/web.yml | 21 +++++++++++++++++++++ services/webapp.yml | 21 +++++++++++++++++++++ staging/backend-deployment.yaml | 5 ----- staging/neo4j-configmap.yaml | 2 +- staging/neo4j-service.yaml | 22 ---------------------- 8 files changed, 89 insertions(+), 30 deletions(-) create mode 100644 services/backend.yml create mode 100644 services/neo4j.yml create mode 100644 services/web.yml create mode 100644 services/webapp.yml delete mode 100644 staging/neo4j-service.yaml diff --git a/README.md b/README.md index 29faf84d5..00ddea2d4 100644 --- a/README.md +++ b/README.md @@ -59,8 +59,7 @@ Wait until all pods turn green and they don't show a warning `Waiting: Container ## Expose the services ```shell -kubectl expose deployment nitro-backend --namespace=staging --type=LoadBalancer --port=4000 -kubectl expose deployment nitro-web --namespace=staging --type=LoadBalancer --port=3000 +kubectl create -f services/ ``` ## Access the service diff --git a/services/backend.yml b/services/backend.yml new file mode 100644 index 000000000..0d4246275 --- /dev/null +++ b/services/backend.yml @@ -0,0 +1,22 @@ +apiVersion: v1 +kind: Service +metadata: + name: nitro-backend + namespace: staging + labels: + workload.user.cattle.io/workloadselector: deployment-staging-backend +spec: + ports: + - name: web + protocol: TCP + port: 4000 + targetPort: 4000 + nodePort: 32612 + selector: + workload.user.cattle.io/workloadselector: deployment-staging-backend + type: LoadBalancer + sessionAffinity: None + externalTrafficPolicy: Cluster +status: + loadBalancer: {} + diff --git a/services/neo4j.yml b/services/neo4j.yml new file mode 100644 index 000000000..681ba2e40 --- /dev/null +++ b/services/neo4j.yml @@ -0,0 +1,23 @@ +apiVersion: v1 +kind: Service +metadata: + name: neo4j + namespace: staging + labels: + workload.user.cattle.io/workloadselector: deployment-staging-neo4j +spec: + selector: + workload.user.cattle.io/workloadselector: deployment-staging-neo4j + ports: + - name: bolt + protocol: TCP + port: 7687 + targetPort: 7687 + - name: web + protocol: TCP + port: 7474 + targetPort: 7474 + type: LoadBalancer + sessionAffinity: None + type: ClusterIP + diff --git a/services/web.yml b/services/web.yml new file mode 100644 index 000000000..a46e27dea --- /dev/null +++ b/services/web.yml @@ -0,0 +1,21 @@ +apiVersion: v1 +kind: Service +metadata: + name: nitro-web + namespace: staging + labels: + workload.user.cattle.io/workloadselector: deployment-staging-web +spec: + ports: + - name: web + protocol: "TCP" + port: 3000 + targetPort: 3000 + selector: + workload.user.cattle.io/workloadselector: deployment-staging-web + type: LoadBalancer + sessionAffinity: None + externalTrafficPolicy: Cluster +status: + loadBalancer: {} + diff --git a/services/webapp.yml b/services/webapp.yml new file mode 100644 index 000000000..a46e27dea --- /dev/null +++ b/services/webapp.yml @@ -0,0 +1,21 @@ +apiVersion: v1 +kind: Service +metadata: + name: nitro-web + namespace: staging + labels: + workload.user.cattle.io/workloadselector: deployment-staging-web +spec: + ports: + - name: web + protocol: "TCP" + port: 3000 + targetPort: 3000 + selector: + workload.user.cattle.io/workloadselector: deployment-staging-web + type: LoadBalancer + sessionAffinity: None + externalTrafficPolicy: Cluster +status: + loadBalancer: {} + diff --git a/staging/backend-deployment.yaml b/staging/backend-deployment.yaml index b5f379ddb..f2ebffb9c 100644 --- a/staging/backend-deployment.yaml +++ b/staging/backend-deployment.yaml @@ -7,11 +7,6 @@ spec: replicas: 2 minReadySeconds: 15 progressDeadlineSeconds: 60 - # strategy: - # rollingUpdate: - # maxSurge: 1 - # maxUnavailable: 0 - # type: RollingUpdate selector: matchLabels: workload.user.cattle.io/workloadselector: deployment-staging-backend diff --git a/staging/neo4j-configmap.yaml b/staging/neo4j-configmap.yaml index 2f5ece848..78d1ba3cd 100644 --- a/staging/neo4j-configmap.yaml +++ b/staging/neo4j-configmap.yaml @@ -1,7 +1,7 @@ apiVersion: v1 kind: ConfigMap data: - NEO4J_URI: "bolt://neo4j:7687" + NEO4J_URI: "bolt://neo4j.staging:7687" NEO4J_USER: "neo4j" NEO4J_AUTH: none metadata: diff --git a/staging/neo4j-service.yaml b/staging/neo4j-service.yaml deleted file mode 100644 index 0f66d7474..000000000 --- a/staging/neo4j-service.yaml +++ /dev/null @@ -1,22 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - annotations: - field.cattle.io/ipAddresses: "null" - field.cattle.io/targetDnsRecordIds: "null" - field.cattle.io/targetWorkloadIds: '["deployment:staging:nitro-neo4j"]' - labels: - cattle.io/creator: norman - name: neo4j - namespace: staging -spec: - clusterIP: None - ports: - - name: default - port: 42 - protocol: TCP - targetPort: 42 - selector: - workloadID_neo4j: "true" - sessionAffinity: None - type: ClusterIP From bbfe39e0766ce01f35aad45ce7c869b10d0979a5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Robert=20Sch=C3=A4fer?= Date: Thu, 31 Jan 2019 23:24:20 +0100 Subject: [PATCH 06/26] Use folders for groups of YAML files This keeps our configuration DRY and helps us to save keystrokes. --- README.md | 23 ++++++++--------- config/.gitignore | 1 + .../backend.yml | 0 .../neo4j-configmap.yaml => config/neo4j.yml | 0 staging/web-configmap.yaml => config/web.yml | 0 ...e.yaml => db-migration-worker.template.yml | 0 .../backend.yml | 0 .../db-migration-worker.yml | 0 .../neo4j.yml | 0 .../web.yml | 0 dummies/backend-service.yaml | 13 ---------- dummies/do-loadbalancer.yaml | 12 --------- dummies/ingress-backend.yaml | 15 ----------- dummies/nginx.yaml | 22 ---------------- dummies/web-service.yaml | 13 ---------- namespace-staging.json | 10 -------- namespace-staging.yml | 6 +++++ ...rets.yaml.template => secrets.template.yml | 0 services/backend.yml | 2 -- services/neo4j.yml | 2 -- services/web.yml | 1 - services/webapp.yml | 21 ---------------- staging/.gitignore | 1 - staging/volumes/mongo-export-claim.yaml | 11 -------- staging/volumes/mongo-export-volume.yaml | 12 --------- staging/volumes/ssh-keys-volume-claim.yml | 11 -------- staging/volumes/ssh-keys-volume.yaml | 12 --------- staging/volumes/uploads-claim.yaml | 11 -------- staging/volumes/uploads-volume.yaml | 12 --------- volumes/mongo-export.yml | 25 +++++++++++++++++++ volumes/ssh-keys.yml | 25 +++++++++++++++++++ volumes/uploads.yml | 25 +++++++++++++++++++ 32 files changed, 93 insertions(+), 193 deletions(-) create mode 100644 config/.gitignore rename staging/backend-configmap.yaml => config/backend.yml (100%) rename staging/neo4j-configmap.yaml => config/neo4j.yml (100%) rename staging/web-configmap.yaml => config/web.yml (100%) rename staging/db-migration-worker-configmap.template.yaml => db-migration-worker.template.yml (100%) rename staging/backend-deployment.yaml => deployments/backend.yml (100%) rename staging/db-migration-worker-deployment.yaml => deployments/db-migration-worker.yml (100%) rename staging/neo4j-deployment.yaml => deployments/neo4j.yml (100%) rename staging/web-deployment.yaml => deployments/web.yml (100%) delete mode 100644 dummies/backend-service.yaml delete mode 100644 dummies/do-loadbalancer.yaml delete mode 100644 dummies/ingress-backend.yaml delete mode 100644 dummies/nginx.yaml delete mode 100644 dummies/web-service.yaml delete mode 100644 namespace-staging.json create mode 100644 namespace-staging.yml rename staging/secrets.yaml.template => secrets.template.yml (100%) delete mode 100644 services/webapp.yml delete mode 100644 staging/.gitignore delete mode 100644 staging/volumes/mongo-export-claim.yaml delete mode 100644 staging/volumes/mongo-export-volume.yaml delete mode 100644 staging/volumes/ssh-keys-volume-claim.yml delete mode 100644 staging/volumes/ssh-keys-volume.yaml delete mode 100644 staging/volumes/uploads-claim.yaml delete mode 100644 staging/volumes/uploads-volume.yaml create mode 100644 volumes/mongo-export.yml create mode 100644 volumes/ssh-keys.yml create mode 100644 volumes/uploads.yml diff --git a/README.md b/README.md index 00ddea2d4..48d644622 100644 --- a/README.md +++ b/README.md @@ -19,35 +19,33 @@ There are many Kubernetes distributions, but if you're just getting started, Min ## Create a namespace locally ```shell -kubectl create -f namespace-staging.json +kubectl create -f namespace-staging.yml ``` ## Change config maps according to your needs ```shell -cd ./staging +cd config/ cp db-migration-worker-configmap.template.yaml db-migration-worker-configmap.yaml # edit all variables according to the setup of the remote legacy server +cd .. ``` ## Apply the config map to staging namespace ```shell -cd ./staging -kubectl apply -f neo4j-configmap.yaml -f backend-configmap.yaml -f web-configmap.yaml -f db-migration-worker-configmap.yaml +kubectl apply -f config/ ``` ## Setup secrets and deploy themn ```shell -cd ./staging cp secrets.yaml.template secrets.yaml # change all vars as needed and deploy it afterwards kubectl apply -f secrets.yaml ``` -## Deploy the app +## Create volumes and deployments ```shell -cd ./staging -kubectl apply -f ./volumes -kubectl apply -f neo4j-deployment.yaml -f backend-deployment.yaml -f web-deployment.yaml -f db-migration-worker-deployment.yaml +kubectl apply -f volumes/ +kubectl apply -f deployments/ ``` This can take a while. Sit back and relax and have a look into your minikube dashboard: @@ -59,7 +57,7 @@ Wait until all pods turn green and they don't show a warning `Waiting: Container ## Expose the services ```shell -kubectl create -f services/ +kubectl apply -f services/ ``` ## Access the service @@ -77,11 +75,12 @@ Copy your private ssh key and the `.known-hosts` file of your remote legacy serv # check the corresponding db-migration-worker pod kubectl --namespace=staging get pods # change below -kubectl cp path/to/your/ssh/keys/folder staging/nitro-db-migration-worker-:/root/ +kubectl cp path/to/your/ssh/keys/.ssh staging/nitro-db-migration-worker-:/root/ ``` Run the migration: ```shell -# change below +# change below kubectl --namespace=staging exec -it nitro-db-migration-worker- ./import.sh +kubectl --namespace=staging exec -it nitro-neo4j- ./import/import.sh ``` diff --git a/config/.gitignore b/config/.gitignore new file mode 100644 index 000000000..6fe22561d --- /dev/null +++ b/config/.gitignore @@ -0,0 +1 @@ +db-migration-worker.yml diff --git a/staging/backend-configmap.yaml b/config/backend.yml similarity index 100% rename from staging/backend-configmap.yaml rename to config/backend.yml diff --git a/staging/neo4j-configmap.yaml b/config/neo4j.yml similarity index 100% rename from staging/neo4j-configmap.yaml rename to config/neo4j.yml diff --git a/staging/web-configmap.yaml b/config/web.yml similarity index 100% rename from staging/web-configmap.yaml rename to config/web.yml diff --git a/staging/db-migration-worker-configmap.template.yaml b/db-migration-worker.template.yml similarity index 100% rename from staging/db-migration-worker-configmap.template.yaml rename to db-migration-worker.template.yml diff --git a/staging/backend-deployment.yaml b/deployments/backend.yml similarity index 100% rename from staging/backend-deployment.yaml rename to deployments/backend.yml diff --git a/staging/db-migration-worker-deployment.yaml b/deployments/db-migration-worker.yml similarity index 100% rename from staging/db-migration-worker-deployment.yaml rename to deployments/db-migration-worker.yml diff --git a/staging/neo4j-deployment.yaml b/deployments/neo4j.yml similarity index 100% rename from staging/neo4j-deployment.yaml rename to deployments/neo4j.yml diff --git a/staging/web-deployment.yaml b/deployments/web.yml similarity index 100% rename from staging/web-deployment.yaml rename to deployments/web.yml diff --git a/dummies/backend-service.yaml b/dummies/backend-service.yaml deleted file mode 100644 index 48fffbc24..000000000 --- a/dummies/backend-service.yaml +++ /dev/null @@ -1,13 +0,0 @@ -kind: Service -apiVersion: v1 -metadata: - labels: - k8s-app: nitro-backend - name: nitro-backend - namespace: staging -spec: - ports: - - port: 4000 - targetPort: 4000 - selector: - k8s-app: nitro-backend diff --git a/dummies/do-loadbalancer.yaml b/dummies/do-loadbalancer.yaml deleted file mode 100644 index 9c700e082..000000000 --- a/dummies/do-loadbalancer.yaml +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: sample-load-balancer - namespace: staging -spec: - type: LoadBalancer - ports: - - protocol: TCP - port: 80 - targetPort: 80 - name: http diff --git a/dummies/ingress-backend.yaml b/dummies/ingress-backend.yaml deleted file mode 100644 index 0640b49fd..000000000 --- a/dummies/ingress-backend.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: backend-ingress - namespace: staging - annotations: - nginx.ingress.kubernetes.io/rewrite-target: / -spec: - rules: - - http: - paths: - - path: / - backend: - serviceName: backend - servicePort: 4000 diff --git a/dummies/nginx.yaml b/dummies/nginx.yaml deleted file mode 100644 index 1f5136b4b..000000000 --- a/dummies/nginx.yaml +++ /dev/null @@ -1,22 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: ingress-nginx - namespace: staging - labels: - app.kubernetes.io/name: ingress-nginx - app.kubernetes.io/part-of: ingress-nginx -spec: - type: NodePort - ports: - - name: http - port: 80 - targetPort: 80 - protocol: TCP - - name: https - port: 443 - targetPort: 443 - protocol: TCP - selector: - app.kubernetes.io/name: ingress-nginx - app.kubernetes.io/part-of: ingress-nginx diff --git a/dummies/web-service.yaml b/dummies/web-service.yaml deleted file mode 100644 index 847ba3c05..000000000 --- a/dummies/web-service.yaml +++ /dev/null @@ -1,13 +0,0 @@ -kind: Service -apiVersion: v1 -metadata: - labels: - k8s-app: nitro-web - name: nitro-web - namespace: staging -spec: - ports: - - port: 3000 - targetPort: 3000 - selector: - k8s-app: nitro-web diff --git a/namespace-staging.json b/namespace-staging.json deleted file mode 100644 index 6b71bc772..000000000 --- a/namespace-staging.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "kind": "Namespace", - "apiVersion": "v1", - "metadata": { - "name": "staging", - "labels": { - "name": "staging" - } - } -} \ No newline at end of file diff --git a/namespace-staging.yml b/namespace-staging.yml new file mode 100644 index 000000000..d63b4e0f9 --- /dev/null +++ b/namespace-staging.yml @@ -0,0 +1,6 @@ +kind: Namespace +apiVersion: v1 +metadata: + name: staging + labels: + name: staging diff --git a/staging/secrets.yaml.template b/secrets.template.yml similarity index 100% rename from staging/secrets.yaml.template rename to secrets.template.yml diff --git a/services/backend.yml b/services/backend.yml index 0d4246275..d4f01286a 100644 --- a/services/backend.yml +++ b/services/backend.yml @@ -8,10 +8,8 @@ metadata: spec: ports: - name: web - protocol: TCP port: 4000 targetPort: 4000 - nodePort: 32612 selector: workload.user.cattle.io/workloadselector: deployment-staging-backend type: LoadBalancer diff --git a/services/neo4j.yml b/services/neo4j.yml index 681ba2e40..e071f78bb 100644 --- a/services/neo4j.yml +++ b/services/neo4j.yml @@ -10,11 +10,9 @@ spec: workload.user.cattle.io/workloadselector: deployment-staging-neo4j ports: - name: bolt - protocol: TCP port: 7687 targetPort: 7687 - name: web - protocol: TCP port: 7474 targetPort: 7474 type: LoadBalancer diff --git a/services/web.yml b/services/web.yml index a46e27dea..e1bd542f1 100644 --- a/services/web.yml +++ b/services/web.yml @@ -8,7 +8,6 @@ metadata: spec: ports: - name: web - protocol: "TCP" port: 3000 targetPort: 3000 selector: diff --git a/services/webapp.yml b/services/webapp.yml deleted file mode 100644 index a46e27dea..000000000 --- a/services/webapp.yml +++ /dev/null @@ -1,21 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: nitro-web - namespace: staging - labels: - workload.user.cattle.io/workloadselector: deployment-staging-web -spec: - ports: - - name: web - protocol: "TCP" - port: 3000 - targetPort: 3000 - selector: - workload.user.cattle.io/workloadselector: deployment-staging-web - type: LoadBalancer - sessionAffinity: None - externalTrafficPolicy: Cluster -status: - loadBalancer: {} - diff --git a/staging/.gitignore b/staging/.gitignore deleted file mode 100644 index d9c5e61bf..000000000 --- a/staging/.gitignore +++ /dev/null @@ -1 +0,0 @@ -db-migration-worker-configmap.yaml diff --git a/staging/volumes/mongo-export-claim.yaml b/staging/volumes/mongo-export-claim.yaml deleted file mode 100644 index 1c91996db..000000000 --- a/staging/volumes/mongo-export-claim.yaml +++ /dev/null @@ -1,11 +0,0 @@ -kind: PersistentVolumeClaim -apiVersion: v1 -metadata: - name: mongo-export-claim - namespace: staging -spec: - accessModes: - - ReadWriteMany - resources: - requests: - storage: 1Gi diff --git a/staging/volumes/mongo-export-volume.yaml b/staging/volumes/mongo-export-volume.yaml deleted file mode 100644 index 945c28765..000000000 --- a/staging/volumes/mongo-export-volume.yaml +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: v1 -kind: PersistentVolume -metadata: - name: mongo-export-volume - namespace: staging -spec: - accessModes: - - ReadWriteMany - capacity: - storage: 1Gi - hostPath: - path: /data/shared/mongo-exports/ diff --git a/staging/volumes/ssh-keys-volume-claim.yml b/staging/volumes/ssh-keys-volume-claim.yml deleted file mode 100644 index 30191a500..000000000 --- a/staging/volumes/ssh-keys-volume-claim.yml +++ /dev/null @@ -1,11 +0,0 @@ -kind: PersistentVolumeClaim -apiVersion: v1 -metadata: - name: ssh-keys-claim - namespace: staging -spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 1Mi diff --git a/staging/volumes/ssh-keys-volume.yaml b/staging/volumes/ssh-keys-volume.yaml deleted file mode 100644 index 9c0353b74..000000000 --- a/staging/volumes/ssh-keys-volume.yaml +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: v1 -kind: PersistentVolume -metadata: - name: ssh-keys-volume - namespace: staging -spec: - accessModes: - - ReadWriteOnce - capacity: - storage: 1Mi - hostPath: - path: /data/pv0001/ diff --git a/staging/volumes/uploads-claim.yaml b/staging/volumes/uploads-claim.yaml deleted file mode 100644 index c1b11ed4e..000000000 --- a/staging/volumes/uploads-claim.yaml +++ /dev/null @@ -1,11 +0,0 @@ -kind: PersistentVolumeClaim -apiVersion: v1 -metadata: - name: uploads-claim - namespace: staging -spec: - accessModes: - - ReadWriteMany - resources: - requests: - storage: 8Gi diff --git a/staging/volumes/uploads-volume.yaml b/staging/volumes/uploads-volume.yaml deleted file mode 100644 index 4600a76a4..000000000 --- a/staging/volumes/uploads-volume.yaml +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: v1 -kind: PersistentVolume -metadata: - name: uploads-volume - namespace: staging -spec: - accessModes: - - ReadWriteMany - capacity: - storage: 8Gi - hostPath: - path: /data/shared/uploads/ diff --git a/volumes/mongo-export.yml b/volumes/mongo-export.yml new file mode 100644 index 000000000..a5ef064cc --- /dev/null +++ b/volumes/mongo-export.yml @@ -0,0 +1,25 @@ +--- + kind: PersistentVolume + apiVersion: v1 + metadata: + name: mongo-export-volume + namespace: staging + spec: + accessModes: + - ReadWriteMany + capacity: + storage: 1Gi + hostPath: + path: /data/shared/mongo-exports/ +--- + kind: PersistentVolumeClaim + apiVersion: v1 + metadata: + name: mongo-export-claim + namespace: staging + spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 1Gi diff --git a/volumes/ssh-keys.yml b/volumes/ssh-keys.yml new file mode 100644 index 000000000..4ffd83e80 --- /dev/null +++ b/volumes/ssh-keys.yml @@ -0,0 +1,25 @@ +--- + apiVersion: v1 + kind: PersistentVolume + metadata: + name: ssh-keys-volume + namespace: staging + spec: + accessModes: + - ReadWriteOnce + capacity: + storage: 1Mi + hostPath: + path: /data/pv0001/ +--- + kind: PersistentVolumeClaim + apiVersion: v1 + metadata: + name: ssh-keys-claim + namespace: staging + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Mi diff --git a/volumes/uploads.yml b/volumes/uploads.yml new file mode 100644 index 000000000..34b600aab --- /dev/null +++ b/volumes/uploads.yml @@ -0,0 +1,25 @@ +--- + apiVersion: v1 + kind: PersistentVolume + metadata: + name: uploads-volume + namespace: staging + spec: + accessModes: + - ReadWriteMany + capacity: + storage: 8Gi + hostPath: + path: /data/shared/uploads/ +--- + kind: PersistentVolumeClaim + apiVersion: v1 + metadata: + name: uploads-claim + namespace: staging + spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 8Gi From c6661def314f80582170664333eca6defeb12614 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Robert=20Sch=C3=A4fer?= Date: Thu, 31 Jan 2019 23:52:24 +0100 Subject: [PATCH 07/26] Improve README --- .gitignore | 2 +- README.md | 78 ++++++++++++++++++++++++++++++++---------------------- 2 files changed, 47 insertions(+), 33 deletions(-) diff --git a/.gitignore b/.gitignore index da61c76ef..32cfb3b9e 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1 @@ -*secrets*.yaml +*secrets*.yml diff --git a/README.md b/README.md index 48d644622..1c9212c07 100644 --- a/README.md +++ b/README.md @@ -4,9 +4,9 @@ ## Todo`s - [ ] check labels and selectors if they all are correct -- [ ] configure NGINX from yaml -- [ ] configure Let's Encrypt cert-manager from yaml -- [ ] configure ingress form yaml +- [ ] configure NGINX from yml +- [ ] configure Let's Encrypt cert-manager from yml +- [ ] configure ingress form yml - [ ] configure persistent & shared storage between nodes - [ ] reproduce setup locally @@ -17,70 +17,84 @@ There are many Kubernetes distributions, but if you're just getting started, Min [Install Minikube](https://kubernetes.io/docs/tasks/tools/install-minikube/) +# Open minikube dashboard +``` +$ minikube dashboard +``` +This will give you an overview. +Some of the steps below need some timing to make ressources available to other +dependent deployments. Keeping an eye on the dashboard is a great way to check +that. + ## Create a namespace locally ```shell -kubectl create -f namespace-staging.yml +$ kubectl create -f namespace-staging.yml ``` +Switch to the namespace `staging` in your kubernetes dashboard. -## Change config maps according to your needs +## Setup config maps ```shell -cd config/ -cp db-migration-worker-configmap.template.yaml db-migration-worker-configmap.yaml +$ cp db-migration-worker.template.yml config/db-migration-worker.yml # edit all variables according to the setup of the remote legacy server -cd .. -``` -## Apply the config map to staging namespace -```shell -kubectl apply -f config/ +$ kubectl apply -f config/ ``` ## Setup secrets and deploy themn +If you want to edit secrets, you have to `base64` encode them. See [kubernetes +documentation](https://kubernetes.io/docs/concepts/configuration/secret/#creating-a-secret-manually). ```shell -cp secrets.yaml.template secrets.yaml -# change all vars as needed and deploy it afterwards -kubectl apply -f secrets.yaml +# example how to base64 a string: +$ echo -n 'admin' | base64 +YWRtaW4= + +$ cp secrets.yml.template secrets.yml +# change all variables as needed and deploy them +$ kubectl apply -f secrets.yml ``` -## Create volumes and deployments +## Create volumes ```shell -kubectl apply -f volumes/ -kubectl apply -f deployments/ +$ kubectl apply -f volumes/ ``` -This can take a while. -Sit back and relax and have a look into your minikube dashboard: +Wait until the persistent volumes get available. + +## Create deployments +```shell +$ kubectl apply -f deployments/ ``` -minikube dashboard -``` -Wait until all pods turn green and they don't show a warning `Waiting: ContainerCreating` anymore. +This can take a while because kubernetes will download the docker images. +Sit back and relax and have a look into your kubernetes dashboard. +Wait until all pods turn green and they don't show a warning +`Waiting: ContainerCreating` anymore. ## Expose the services ```shell -kubectl apply -f services/ +$ kubectl apply -f services/ ``` -## Access the service +## Access the services ```shell -minikube service nitro-backend --namespace=staging -minikube service nitro-web --namespace=staging +$ minikube service nitro-backend --namespace=staging +$ minikube service nitro-web --namespace=staging ``` -## Provisioning db-migration-worker +## Provision db-migration-worker Copy your private ssh key and the `.known-hosts` file of your remote legacy server. ```shell # check the corresponding db-migration-worker pod -kubectl --namespace=staging get pods +$ kubectl --namespace=staging get pods # change below -kubectl cp path/to/your/ssh/keys/.ssh staging/nitro-db-migration-worker-:/root/ +$ kubectl cp path/to/your/ssh/keys/.ssh staging/nitro-db-migration-worker-:/root/ ``` Run the migration: ```shell # change below -kubectl --namespace=staging exec -it nitro-db-migration-worker- ./import.sh -kubectl --namespace=staging exec -it nitro-neo4j- ./import/import.sh +$ kubectl --namespace=staging exec -it nitro-db-migration-worker- ./import.sh +$ kubectl --namespace=staging exec -it nitro-neo4j- ./import/import.sh ``` From 7b981c06d08835fce95a4376c546472e78880af5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Robert=20Sch=C3=A4fer?= Date: Fri, 1 Feb 2019 00:23:53 +0100 Subject: [PATCH 08/26] Point web service to backend of internal network --- config/backend.yml | 2 +- services/backend.yml | 4 ---- services/neo4j.yml | 6 ++---- services/web.yml | 4 ---- 4 files changed, 3 insertions(+), 13 deletions(-) diff --git a/config/backend.yml b/config/backend.yml index ba7c819bc..cfb19b538 100644 --- a/config/backend.yml +++ b/config/backend.yml @@ -2,7 +2,7 @@ apiVersion: v1 kind: ConfigMap data: GRAPHQL_PORT: "4000" - GRAPHQL_URI: "https://api-nitro-staging.human-connection.org" + GRAPHQL_URI: "http://nitro-backend.staging:4000" MOCK: "false" metadata: name: staging-backend diff --git a/services/backend.yml b/services/backend.yml index d4f01286a..1654f884a 100644 --- a/services/backend.yml +++ b/services/backend.yml @@ -13,8 +13,4 @@ spec: selector: workload.user.cattle.io/workloadselector: deployment-staging-backend type: LoadBalancer - sessionAffinity: None externalTrafficPolicy: Cluster -status: - loadBalancer: {} - diff --git a/services/neo4j.yml b/services/neo4j.yml index e071f78bb..65b7785b6 100644 --- a/services/neo4j.yml +++ b/services/neo4j.yml @@ -6,8 +6,6 @@ metadata: labels: workload.user.cattle.io/workloadselector: deployment-staging-neo4j spec: - selector: - workload.user.cattle.io/workloadselector: deployment-staging-neo4j ports: - name: bolt port: 7687 @@ -15,7 +13,7 @@ spec: - name: web port: 7474 targetPort: 7474 - type: LoadBalancer - sessionAffinity: None + selector: + workload.user.cattle.io/workloadselector: deployment-staging-neo4j type: ClusterIP diff --git a/services/web.yml b/services/web.yml index e1bd542f1..ad2b9678b 100644 --- a/services/web.yml +++ b/services/web.yml @@ -13,8 +13,4 @@ spec: selector: workload.user.cattle.io/workloadselector: deployment-staging-web type: LoadBalancer - sessionAffinity: None externalTrafficPolicy: Cluster -status: - loadBalancer: {} - From e8b212f5ccdd2b4718755ea3dcffb9fbd75de514 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Robert=20Sch=C3=A4fer?= Date: Fri, 1 Feb 2019 01:08:19 +0100 Subject: [PATCH 09/26] Backend is accessible only from internal network I think this is better. For development it might be interesting to have access to the backend but I think one should do development with docker-compose not minikube and such. --- README.md | 3 +-- services/backend.yml | 2 -- services/neo4j.yml | 2 -- 3 files changed, 1 insertion(+), 6 deletions(-) diff --git a/README.md b/README.md index 1c9212c07..1fbabb948 100644 --- a/README.md +++ b/README.md @@ -77,8 +77,7 @@ $ kubectl apply -f services/ ## Access the services ```shell -$ minikube service nitro-backend --namespace=staging -$ minikube service nitro-web --namespace=staging +$ minikube service nitro-web --namespace=staging ``` diff --git a/services/backend.yml b/services/backend.yml index 1654f884a..39cfca63a 100644 --- a/services/backend.yml +++ b/services/backend.yml @@ -12,5 +12,3 @@ spec: targetPort: 4000 selector: workload.user.cattle.io/workloadselector: deployment-staging-backend - type: LoadBalancer - externalTrafficPolicy: Cluster diff --git a/services/neo4j.yml b/services/neo4j.yml index 65b7785b6..4ff0953a7 100644 --- a/services/neo4j.yml +++ b/services/neo4j.yml @@ -15,5 +15,3 @@ spec: targetPort: 7474 selector: workload.user.cattle.io/workloadselector: deployment-staging-neo4j - type: ClusterIP - From 9b5f88d7ac9d573c9989c2213b9efad41ae24450 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Robert=20Sch=C3=A4fer?= Date: Fri, 1 Feb 2019 01:25:05 +0100 Subject: [PATCH 10/26] Kubernetes best practices: * put many configurations in one file * expose services before you start pods See: https://kubernetes.io/docs/concepts/configuration/overview/ --- README.md | 13 ++- deployments/db-migration-worker.yml | 172 ++++++++++++++++------------ deployments/neo4j.yml | 144 ++++++++++++++--------- deployments/web.yml | 5 - volumes/ssh-keys.yml | 25 ---- 5 files changed, 193 insertions(+), 166 deletions(-) delete mode 100644 volumes/ssh-keys.yml diff --git a/README.md b/README.md index 1fbabb948..0096071ee 100644 --- a/README.md +++ b/README.md @@ -57,7 +57,13 @@ $ kubectl apply -f secrets.yml ```shell $ kubectl apply -f volumes/ ``` -Wait until the persistent volumes get available. + +## Expose the services + +```shell +$ kubectl apply -f services/ +``` +Wait until persistent volumes and services become available. ## Create deployments ```shell @@ -68,11 +74,6 @@ Sit back and relax and have a look into your kubernetes dashboard. Wait until all pods turn green and they don't show a warning `Waiting: ContainerCreating` anymore. -## Expose the services - -```shell -$ kubectl apply -f services/ -``` ## Access the services diff --git a/deployments/db-migration-worker.yml b/deployments/db-migration-worker.yml index 509f98093..685904aba 100644 --- a/deployments/db-migration-worker.yml +++ b/deployments/db-migration-worker.yml @@ -1,75 +1,101 @@ -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - name: nitro-db-migration-worker - namespace: staging -spec: - replicas: 1 - minReadySeconds: 15 - progressDeadlineSeconds: 60 - selector: - matchLabels: - workload.user.cattle.io/workloadselector: deployment-staging-db-migration-worker - template: - metadata: - labels: +--- + apiVersion: extensions/v1beta1 + kind: Deployment + metadata: + name: nitro-db-migration-worker + namespace: staging + spec: + replicas: 1 + minReadySeconds: 15 + progressDeadlineSeconds: 60 + selector: + matchLabels: workload.user.cattle.io/workloadselector: deployment-staging-db-migration-worker - name: "nitro-db-migration-worker" - spec: - containers: - - env: - - name: SSH_USERNAME - valueFrom: - configMapKeyRef: - name: staging-db-migration-worker - key: SSH_USERNAME - - name: SSH_HOST - valueFrom: - configMapKeyRef: - name: staging-db-migration-worker - key: SSH_HOST - - name: MONGODB_USERNAME - valueFrom: - configMapKeyRef: - name: staging-db-migration-worker - key: MONGODB_USERNAME - - name: MONGODB_AUTH_DB - valueFrom: - configMapKeyRef: - name: staging-db-migration-worker - key: MONGODB_AUTH_DB - - name: MONGODB_DATABASE - valueFrom: - configMapKeyRef: - name: staging-db-migration-worker - key: MONGODB_DATABASE - - name: UPLOADS_DIRECTORY - valueFrom: - configMapKeyRef: - name: staging-db-migration-worker - key: UPLOADS_DIRECTORY - - name: MONGODB_PASSWORD - valueFrom: - secretKeyRef: - name: staging - key: MONGODB_PASSWORD - optional: false - image: humanconnection/db-migration-worker:latest + template: + metadata: + labels: + workload.user.cattle.io/workloadselector: deployment-staging-db-migration-worker name: nitro-db-migration-worker - resources: {} - imagePullPolicy: Always - volumeMounts: - - mountPath: /root/ - name: ssh-keys-directory - - mountPath: /mongo-export/ - name: mongo-export - restartPolicy: Always - volumes: - - name: ssh-keys-directory - persistentVolumeClaim: - claimName: ssh-keys-claim - - name: mongo-export - persistentVolumeClaim: - claimName: mongo-export-claim - terminationGracePeriodSeconds: 30 -status: {} + spec: + containers: + - env: + - name: SSH_USERNAME + valueFrom: + configMapKeyRef: + name: staging-db-migration-worker + key: SSH_USERNAME + - name: SSH_HOST + valueFrom: + configMapKeyRef: + name: staging-db-migration-worker + key: SSH_HOST + - name: MONGODB_USERNAME + valueFrom: + configMapKeyRef: + name: staging-db-migration-worker + key: MONGODB_USERNAME + - name: MONGODB_AUTH_DB + valueFrom: + configMapKeyRef: + name: staging-db-migration-worker + key: MONGODB_AUTH_DB + - name: MONGODB_DATABASE + valueFrom: + configMapKeyRef: + name: staging-db-migration-worker + key: MONGODB_DATABASE + - name: UPLOADS_DIRECTORY + valueFrom: + configMapKeyRef: + name: staging-db-migration-worker + key: UPLOADS_DIRECTORY + - name: MONGODB_PASSWORD + valueFrom: + secretKeyRef: + name: staging + key: MONGODB_PASSWORD + optional: false + image: humanconnection/db-migration-worker:latest + name: nitro-db-migration-worker + resources: {} + imagePullPolicy: Always + volumeMounts: + - mountPath: /root/ + name: ssh-keys-directory + - mountPath: /mongo-export/ + name: mongo-export + restartPolicy: Always + volumes: + - name: ssh-keys-directory + persistentVolumeClaim: + claimName: ssh-keys-claim + - name: mongo-export + persistentVolumeClaim: + claimName: mongo-export-claim + terminationGracePeriodSeconds: 30 + status: {} +--- + apiVersion: v1 + kind: PersistentVolume + metadata: + name: ssh-keys-volume + namespace: staging + spec: + accessModes: + - ReadWriteOnce + capacity: + storage: 1Mi + hostPath: + path: /data/pv0001/ +--- + kind: PersistentVolumeClaim + apiVersion: v1 + metadata: + name: ssh-keys-claim + namespace: staging + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Mi diff --git a/deployments/neo4j.yml b/deployments/neo4j.yml index 2b3485c77..7f96cd45a 100644 --- a/deployments/neo4j.yml +++ b/deployments/neo4j.yml @@ -1,59 +1,89 @@ -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - name: nitro-neo4j - namespace: staging -spec: - replicas: 1 - strategy: {} - selector: - matchLabels: - workload.user.cattle.io/workloadselector: deployment-staging-neo4j - template: - metadata: - labels: +--- + apiVersion: extensions/v1beta1 + kind: Deployment + metadata: + name: nitro-neo4j + namespace: staging + spec: + replicas: 1 + strategy: {} + selector: + matchLabels: workload.user.cattle.io/workloadselector: deployment-staging-neo4j - name: "nitro-neo4j" - spec: - containers: - - env: - - name: NEO4J_apoc_import_file_enabled - value: "true" - - name: NEO4J_dbms_memory_pagecache_size - value: 1G - - name: NEO4J_dbms_memory_heap_max__size - value: 1G - - name: NEO4J_AUTH - value: none - - name: NEO4J_URI - valueFrom: - configMapKeyRef: - name: staging-neo4j - key: NEO4J_URI - - name: NEO4J_USER - valueFrom: - configMapKeyRef: - name: staging-neo4j - key: NEO4J_USER - - name: NEO4J_AUTH - valueFrom: - configMapKeyRef: - name: staging-neo4j - key: NEO4J_AUTH - image: humanconnection/neo4j:latest + template: + metadata: + labels: + workload.user.cattle.io/workloadselector: deployment-staging-neo4j name: nitro-neo4j - ports: - - containerPort: 7687 - - containerPort: 7474 - # - containerPort: 7473 - resources: {} - imagePullPolicy: IfNotPresent - volumeMounts: - - mountPath: /mongo-export/ - name: mongo-export - restartPolicy: Always - volumes: - - name: mongo-export - persistentVolumeClaim: - claimName: mongo-export-claim -status: {} + spec: + containers: + - env: + - name: NEO4J_apoc_import_file_enabled + value: "true" + - name: NEO4J_dbms_memory_pagecache_size + value: 1G + - name: NEO4J_dbms_memory_heap_max__size + value: 1G + - name: NEO4J_AUTH + value: none + - name: NEO4J_URI + valueFrom: + configMapKeyRef: + name: staging-neo4j + key: NEO4J_URI + - name: NEO4J_USER + valueFrom: + configMapKeyRef: + name: staging-neo4j + key: NEO4J_USER + - name: NEO4J_AUTH + valueFrom: + configMapKeyRef: + name: staging-neo4j + key: NEO4J_AUTH + image: humanconnection/neo4j:latest + name: nitro-neo4j + ports: + - containerPort: 7687 + - containerPort: 7474 + resources: {} + imagePullPolicy: IfNotPresent + volumeMounts: + - mountPath: /data/ + name: neo4j-data + - mountPath: /mongo-export/ + name: mongo-export + restartPolicy: Always + volumes: + - name: mongo-export + persistentVolumeClaim: + claimName: mongo-export-claim + - name: neo4j-data + persistentVolumeClaim: + claimName: neo4j-data-claim + status: {} +--- + apiVersion: v1 + kind: PersistentVolume + metadata: + name: neo4j-data-volume + namespace: staging + spec: + accessModes: + - ReadWriteOnce + capacity: + storage: 4Gi + hostPath: + path: /data/neo4j/ +--- + kind: PersistentVolumeClaim + apiVersion: v1 + metadata: + name: neo4j-data-claim + namespace: staging + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 4Gi diff --git a/deployments/web.yml b/deployments/web.yml index 5cad7f039..43be04bbe 100644 --- a/deployments/web.yml +++ b/deployments/web.yml @@ -7,11 +7,6 @@ spec: replicas: 2 minReadySeconds: 15 progressDeadlineSeconds: 60 - # strategy: - # rollingUpdate: - # maxSurge: 1 - # maxUnavailable: 0 - # type: RollingUpdate selector: matchLabels: workload.user.cattle.io/workloadselector: deployment-staging-web diff --git a/volumes/ssh-keys.yml b/volumes/ssh-keys.yml deleted file mode 100644 index 4ffd83e80..000000000 --- a/volumes/ssh-keys.yml +++ /dev/null @@ -1,25 +0,0 @@ ---- - apiVersion: v1 - kind: PersistentVolume - metadata: - name: ssh-keys-volume - namespace: staging - spec: - accessModes: - - ReadWriteOnce - capacity: - storage: 1Mi - hostPath: - path: /data/pv0001/ ---- - kind: PersistentVolumeClaim - apiVersion: v1 - metadata: - name: ssh-keys-claim - namespace: staging - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 1Mi From ad3a97407688778b192b80c9a63af85f4b020e6d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Robert=20Sch=C3=A4fer?= Date: Fri, 1 Feb 2019 01:53:51 +0100 Subject: [PATCH 11/26] Base64 encode default secrets in template The JWT_SECRET is taken from the current default secret in `Nitro-Backend`, the MONGODB_PASSWORD is just the label encoded. --- secrets.template.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/secrets.template.yml b/secrets.template.yml index f8a4642a3..755cd2d06 100644 --- a/secrets.template.yml +++ b/secrets.template.yml @@ -1,7 +1,8 @@ apiVersion: v1 kind: Secret data: - JWT_SECRET: "aHVtYW5jb25uZWN0aW9uLWRlcGxveW1lbnQ=" + JWT_SECRET: "Yi8mJjdiNzhCRiZmdi9WZA==" + MONGODB_PASSWORD: "TU9OR09EQl9QQVNTV09SRA==" metadata: name: staging namespace: staging From 6fed4797eddafcf7e01c0140a6f3ad49477dee0c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Robert=20Sch=C3=A4fer?= Date: Fri, 1 Feb 2019 18:50:30 +0100 Subject: [PATCH 12/26] Add commits to configuration to deploy :latest tag The recommended way to update a kubernetes deployment to a new image is to change the image tag. However, our build server is configured to push every commit of the `master` branch to docker hub to the respective repository and tag `:latest`. So adding some configuration that can be changed seems to be a trick to re-deploy the `:latest` image. See here: https://stackoverflow.com/a/51835397 --- deployments/backend.yml | 2 ++ deployments/db-migration-worker.yml | 4 +++- deployments/neo4j.yml | 7 +++++-- deployments/web.yml | 2 ++ 4 files changed, 12 insertions(+), 3 deletions(-) diff --git a/deployments/backend.yml b/deployments/backend.yml index f2ebffb9c..31f8ef357 100644 --- a/deployments/backend.yml +++ b/deployments/backend.yml @@ -18,6 +18,8 @@ spec: spec: containers: - env: + - name: COMMIT + value: - name: MOCK value: "false" - name: CLIENT_URI diff --git a/deployments/db-migration-worker.yml b/deployments/db-migration-worker.yml index 685904aba..952cf0121 100644 --- a/deployments/db-migration-worker.yml +++ b/deployments/db-migration-worker.yml @@ -19,6 +19,8 @@ spec: containers: - env: + - name: COMMIT + value: - name: SSH_USERNAME valueFrom: configMapKeyRef: @@ -64,7 +66,6 @@ name: ssh-keys-directory - mountPath: /mongo-export/ name: mongo-export - restartPolicy: Always volumes: - name: ssh-keys-directory persistentVolumeClaim: @@ -72,6 +73,7 @@ - name: mongo-export persistentVolumeClaim: claimName: mongo-export-claim + restartPolicy: Always terminationGracePeriodSeconds: 30 status: {} --- diff --git a/deployments/neo4j.yml b/deployments/neo4j.yml index 7f96cd45a..ef394e36f 100644 --- a/deployments/neo4j.yml +++ b/deployments/neo4j.yml @@ -18,6 +18,8 @@ spec: containers: - env: + - name: COMMIT + value: - name: NEO4J_apoc_import_file_enabled value: "true" - name: NEO4J_dbms_memory_pagecache_size @@ -47,13 +49,12 @@ - containerPort: 7687 - containerPort: 7474 resources: {} - imagePullPolicy: IfNotPresent + imagePullPolicy: Always volumeMounts: - mountPath: /data/ name: neo4j-data - mountPath: /mongo-export/ name: mongo-export - restartPolicy: Always volumes: - name: mongo-export persistentVolumeClaim: @@ -61,6 +62,8 @@ - name: neo4j-data persistentVolumeClaim: claimName: neo4j-data-claim + restartPolicy: Always + terminationGracePeriodSeconds: 30 status: {} --- apiVersion: v1 diff --git a/deployments/web.yml b/deployments/web.yml index 43be04bbe..48e99b9c2 100644 --- a/deployments/web.yml +++ b/deployments/web.yml @@ -18,6 +18,8 @@ spec: spec: containers: - env: + - name: COMMIT + value: - name: HOST value: 0.0.0.0 - name: BACKEND_URL From d8c23c3452267d308f31c587ae896ffabd88d5b1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Robert=20Sch=C3=A4fer?= Date: Fri, 1 Feb 2019 19:53:09 +0100 Subject: [PATCH 13/26] Reproduced the whole setup locally with success --- README.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/README.md b/README.md index 0096071ee..d43687b4a 100644 --- a/README.md +++ b/README.md @@ -8,9 +8,8 @@ - [ ] configure Let's Encrypt cert-manager from yml - [ ] configure ingress form yml - [ ] configure persistent & shared storage between nodes -- [ ] reproduce setup locally +- [x] reproduce setup locally -> The dummy directory has some lb configurations that did not work properly on Digital Ocean but could be used as a starting point for getting it right ## Install Minikube, kubectl There are many Kubernetes distributions, but if you're just getting started, Minikube is a tool that you can use to get your feet wet. From af15ec6393b127123371d29915225081605ddcbc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Robert=20Sch=C3=A4fer?= Date: Fri, 1 Feb 2019 23:34:19 +0100 Subject: [PATCH 14/26] Fix certain configuration for Digital Ocean --- config/neo4j.yml | 2 +- deployments/db-migration-worker.yml | 17 +++-------------- services/neo4j.yml | 2 +- volumes/mongo-export.yml | 16 ++-------------- volumes/uploads.yml | 16 ++-------------- 5 files changed, 9 insertions(+), 44 deletions(-) diff --git a/config/neo4j.yml b/config/neo4j.yml index 78d1ba3cd..0165338db 100644 --- a/config/neo4j.yml +++ b/config/neo4j.yml @@ -1,7 +1,7 @@ apiVersion: v1 kind: ConfigMap data: - NEO4J_URI: "bolt://neo4j.staging:7687" + NEO4J_URI: "bolt://nitro-neo4j.staging:7687" NEO4J_USER: "neo4j" NEO4J_AUTH: none metadata: diff --git a/deployments/db-migration-worker.yml b/deployments/db-migration-worker.yml index 952cf0121..f4d427096 100644 --- a/deployments/db-migration-worker.yml +++ b/deployments/db-migration-worker.yml @@ -76,19 +76,6 @@ restartPolicy: Always terminationGracePeriodSeconds: 30 status: {} ---- - apiVersion: v1 - kind: PersistentVolume - metadata: - name: ssh-keys-volume - namespace: staging - spec: - accessModes: - - ReadWriteOnce - capacity: - storage: 1Mi - hostPath: - path: /data/pv0001/ --- kind: PersistentVolumeClaim apiVersion: v1 @@ -100,4 +87,6 @@ - ReadWriteOnce resources: requests: - storage: 1Mi + # waaay too much + # unfortunately Digital Oceans volumes start at 1Gi + storage: 1Gi diff --git a/services/neo4j.yml b/services/neo4j.yml index 4ff0953a7..d6c7a95b4 100644 --- a/services/neo4j.yml +++ b/services/neo4j.yml @@ -1,7 +1,7 @@ apiVersion: v1 kind: Service metadata: - name: neo4j + name: nitro-neo4j namespace: staging labels: workload.user.cattle.io/workloadselector: deployment-staging-neo4j diff --git a/volumes/mongo-export.yml b/volumes/mongo-export.yml index a5ef064cc..1fb537e5c 100644 --- a/volumes/mongo-export.yml +++ b/volumes/mongo-export.yml @@ -1,16 +1,3 @@ ---- - kind: PersistentVolume - apiVersion: v1 - metadata: - name: mongo-export-volume - namespace: staging - spec: - accessModes: - - ReadWriteMany - capacity: - storage: 1Gi - hostPath: - path: /data/shared/mongo-exports/ --- kind: PersistentVolumeClaim apiVersion: v1 @@ -19,7 +6,8 @@ namespace: staging spec: accessModes: - - ReadWriteMany + - ReadWriteOnce resources: requests: storage: 1Gi + storageClassName: do-block-storage diff --git a/volumes/uploads.yml b/volumes/uploads.yml index 34b600aab..3a9dfcdad 100644 --- a/volumes/uploads.yml +++ b/volumes/uploads.yml @@ -1,16 +1,3 @@ ---- - apiVersion: v1 - kind: PersistentVolume - metadata: - name: uploads-volume - namespace: staging - spec: - accessModes: - - ReadWriteMany - capacity: - storage: 8Gi - hostPath: - path: /data/shared/uploads/ --- kind: PersistentVolumeClaim apiVersion: v1 @@ -19,7 +6,8 @@ namespace: staging spec: accessModes: - - ReadWriteMany + - ReadWriteOnce resources: requests: storage: 8Gi + storageClassName: do-block-storage From 671826e060032596ab2e112786eb0913f3031034 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Robert=20Sch=C3=A4fer?= Date: Sat, 2 Feb 2019 12:40:29 +0100 Subject: [PATCH 15/26] Bundle all configuration in one folder staging/ --- .gitignore | 2 +- {config => staging/config}/.gitignore | 0 {config => staging/config}/backend.yml | 0 {config => staging/config}/neo4j.yml | 0 {config => staging/config}/web.yml | 0 {deployments => staging/deployments}/backend.yml | 0 {deployments => staging/deployments}/db-migration-worker.yml | 0 {deployments => staging/deployments}/neo4j.yml | 0 {deployments => staging/deployments}/web.yml | 0 namespace-staging.yml => staging/namespace-staging.yml | 0 {services => staging/services}/backend.yml | 0 {services => staging/services}/neo4j.yml | 0 {services => staging/services}/web.yml | 0 {volumes => staging/volumes}/mongo-export.yml | 1 - {volumes => staging/volumes}/uploads.yml | 1 - 15 files changed, 1 insertion(+), 3 deletions(-) rename {config => staging/config}/.gitignore (100%) rename {config => staging/config}/backend.yml (100%) rename {config => staging/config}/neo4j.yml (100%) rename {config => staging/config}/web.yml (100%) rename {deployments => staging/deployments}/backend.yml (100%) rename {deployments => staging/deployments}/db-migration-worker.yml (100%) rename {deployments => staging/deployments}/neo4j.yml (100%) rename {deployments => staging/deployments}/web.yml (100%) rename namespace-staging.yml => staging/namespace-staging.yml (100%) rename {services => staging/services}/backend.yml (100%) rename {services => staging/services}/neo4j.yml (100%) rename {services => staging/services}/web.yml (100%) rename {volumes => staging/volumes}/mongo-export.yml (84%) rename {volumes => staging/volumes}/uploads.yml (84%) diff --git a/.gitignore b/.gitignore index 32cfb3b9e..8a42d3602 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1 @@ -*secrets*.yml +*secrets.yml diff --git a/config/.gitignore b/staging/config/.gitignore similarity index 100% rename from config/.gitignore rename to staging/config/.gitignore diff --git a/config/backend.yml b/staging/config/backend.yml similarity index 100% rename from config/backend.yml rename to staging/config/backend.yml diff --git a/config/neo4j.yml b/staging/config/neo4j.yml similarity index 100% rename from config/neo4j.yml rename to staging/config/neo4j.yml diff --git a/config/web.yml b/staging/config/web.yml similarity index 100% rename from config/web.yml rename to staging/config/web.yml diff --git a/deployments/backend.yml b/staging/deployments/backend.yml similarity index 100% rename from deployments/backend.yml rename to staging/deployments/backend.yml diff --git a/deployments/db-migration-worker.yml b/staging/deployments/db-migration-worker.yml similarity index 100% rename from deployments/db-migration-worker.yml rename to staging/deployments/db-migration-worker.yml diff --git a/deployments/neo4j.yml b/staging/deployments/neo4j.yml similarity index 100% rename from deployments/neo4j.yml rename to staging/deployments/neo4j.yml diff --git a/deployments/web.yml b/staging/deployments/web.yml similarity index 100% rename from deployments/web.yml rename to staging/deployments/web.yml diff --git a/namespace-staging.yml b/staging/namespace-staging.yml similarity index 100% rename from namespace-staging.yml rename to staging/namespace-staging.yml diff --git a/services/backend.yml b/staging/services/backend.yml similarity index 100% rename from services/backend.yml rename to staging/services/backend.yml diff --git a/services/neo4j.yml b/staging/services/neo4j.yml similarity index 100% rename from services/neo4j.yml rename to staging/services/neo4j.yml diff --git a/services/web.yml b/staging/services/web.yml similarity index 100% rename from services/web.yml rename to staging/services/web.yml diff --git a/volumes/mongo-export.yml b/staging/volumes/mongo-export.yml similarity index 84% rename from volumes/mongo-export.yml rename to staging/volumes/mongo-export.yml index 1fb537e5c..563a9cfe6 100644 --- a/volumes/mongo-export.yml +++ b/staging/volumes/mongo-export.yml @@ -10,4 +10,3 @@ resources: requests: storage: 1Gi - storageClassName: do-block-storage diff --git a/volumes/uploads.yml b/staging/volumes/uploads.yml similarity index 84% rename from volumes/uploads.yml rename to staging/volumes/uploads.yml index 3a9dfcdad..a48d28ddc 100644 --- a/volumes/uploads.yml +++ b/staging/volumes/uploads.yml @@ -10,4 +10,3 @@ resources: requests: storage: 8Gi - storageClassName: do-block-storage From 15f391539440fe7755b1e60cce85571c7747e208 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Robert=20Sch=C3=A4fer?= Date: Sat, 2 Feb 2019 13:08:07 +0100 Subject: [PATCH 16/26] Adding dashboard deployment for digital ocean --- README.md | 110 ++++++++++++------ dashboard/admin-user.yml | 5 + dashboard/role-binding.yml | 12 ++ ...space-staging.yml => namespace-staging.yml | 0 4 files changed, 89 insertions(+), 38 deletions(-) create mode 100644 dashboard/admin-user.yml create mode 100644 dashboard/role-binding.yml rename staging/namespace-staging.yml => namespace-staging.yml (100%) diff --git a/README.md b/README.md index d43687b4a..3fb1a983b 100644 --- a/README.md +++ b/README.md @@ -10,13 +10,12 @@ - [ ] configure persistent & shared storage between nodes - [x] reproduce setup locally - -## Install Minikube, kubectl +## Minikube There are many Kubernetes distributions, but if you're just getting started, Minikube is a tool that you can use to get your feet wet. [Install Minikube](https://kubernetes.io/docs/tasks/tools/install-minikube/) -# Open minikube dashboard +### Open minikube dashboard ``` $ minikube dashboard ``` @@ -25,63 +24,98 @@ Some of the steps below need some timing to make ressources available to other dependent deployments. Keeping an eye on the dashboard is a great way to check that. -## Create a namespace locally -```shell -$ kubectl create -f namespace-staging.yml -``` -Switch to the namespace `staging` in your kubernetes dashboard. +### Access exposed services -## Setup config maps -```shell -$ cp db-migration-worker.template.yml config/db-migration-worker.yml -# edit all variables according to the setup of the remote legacy server +Follow the installation instruction below. Just at the end, expose the +`nitro-web` service on your host system with: -$ kubectl apply -f config/ +```shell +$ minikube service nitro-web --namespace=staging ``` -## Setup secrets and deploy themn +## Digital Ocean + +Install the kubernetes dashboard first: +```sh +$ kubectl apply -f dashboard/ +``` +Proxy localhost to the remote kubernetes dashboard: +```sh +kubectl proxy +``` +Get your token on the command line: +```sh +$ kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin-user | awk '{print $1}') +``` +It should print something like: +``` +Name: admin-user-token-6gl6l +Namespace: kube-system +Labels: +Annotations: kubernetes.io/service-account.name=admin-user + kubernetes.io/service-account.uid=b16afba9-dfec-11e7-bbb9-901b0e532516 + +Type: kubernetes.io/service-account-token + +Data +==== +ca.crt: 1025 bytes +namespace: 11 bytes +token: eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJhZG1pbi11c2VyLXRva2VuLTZnbDZsIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImFkbWluLXVzZXIiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiJiMTZhZmJhOS1kZmVjLTExZTctYmJiOS05MDFiMGU1MzI1MTYiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZS1zeXN0ZW06YWRtaW4tdXNlciJ9.M70CU3lbu3PP4OjhFms8PVL5pQKj-jj4RNSLA4YmQfTXpPUuxqXjiTf094_Rzr0fgN_IVX6gC4fiNUL5ynx9KU-lkPfk0HnX8scxfJNzypL039mpGt0bbe1IXKSIRaq_9VW59Xz-yBUhycYcKPO9RM2Qa1Ax29nqNVko4vLn1_1wPqJ6XSq3GYI8anTzV8Fku4jasUwjrws6Cn6_sPEGmL54sq5R4Z5afUtv-mItTmqZZdxnkRqcJLlg2Y8WbCPogErbsaCDJoABQ7ppaqHetwfM_0yMun6ABOQbIwwl8pspJhpplKwyo700OSpvTT9zlBsu-b35lzXGBRHzv5g_RA + +``` +Grab the token and paste it into the login screen at [http://localhost:8001/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/](http://localhost:8001/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/) + + +## Installation with kubernetes (minikube + Digital Ocean) + +You have to do some prerequisites and change some secrets according to your own setup. + +#### Setup config maps +```shell +$ cp db-migration-worker.template.yml staging/config/db-migration-worker.yml +``` +Edit all variables according to the setup of the remote legacy server. + +#### Setup secrets and deploy themn + +```sh +$ cp secrets.yml.template staging/secrets.yml +``` +Change all secrets as needed. + If you want to edit secrets, you have to `base64` encode them. See [kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/secret/#creating-a-secret-manually). ```shell # example how to base64 a string: $ echo -n 'admin' | base64 YWRtaW4= +``` +Those secrets get `base64` decoded in a kubernetes pod. -$ cp secrets.yml.template secrets.yml -# change all variables as needed and deploy them +#### Create a namespace locally +```shell +$ kubectl create -f namespace-staging.yml +``` +Switch to the namespace `staging` in your kubernetes dashboard. + +### Run the configuration +```shell +$ cd staging/ $ kubectl apply -f secrets.yml -``` - -## Create volumes -```shell +$ kubectl apply -f config/ $ kubectl apply -f volumes/ -``` - -## Expose the services - -```shell $ kubectl apply -f services/ -``` -Wait until persistent volumes and services become available. - -## Create deployments -```shell $ kubectl apply -f deployments/ ``` + This can take a while because kubernetes will download the docker images. Sit back and relax and have a look into your kubernetes dashboard. Wait until all pods turn green and they don't show a warning `Waiting: ContainerCreating` anymore. -## Access the services - -```shell -$ minikube service nitro-web --namespace=staging -``` - - -## Provision db-migration-worker +### Provision db-migration-worker Copy your private ssh key and the `.known-hosts` file of your remote legacy server. ```shell diff --git a/dashboard/admin-user.yml b/dashboard/admin-user.yml new file mode 100644 index 000000000..27b6bb802 --- /dev/null +++ b/dashboard/admin-user.yml @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: admin-user + namespace: kube-system diff --git a/dashboard/role-binding.yml b/dashboard/role-binding.yml new file mode 100644 index 000000000..faa8927a2 --- /dev/null +++ b/dashboard/role-binding.yml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: admin-user +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: +- kind: ServiceAccount + name: admin-user + namespace: kube-system diff --git a/staging/namespace-staging.yml b/namespace-staging.yml similarity index 100% rename from staging/namespace-staging.yml rename to namespace-staging.yml From 0b075830bc497dde1145aa6a9617687d3f46bc2c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Robert=20Sch=C3=A4fer?= Date: Sat, 2 Feb 2019 13:33:42 +0100 Subject: [PATCH 17/26] Put many configuration files in one folder --- README.md | 47 +++++++++---------- ...onfigmap-db-migration-worker.template.yaml | 0 ...pace-staging.yml => namespace-staging.yaml | 0 secrets.template.yml => secrets.template.yaml | 0 staging/.gitignore | 2 + staging/config/.gitignore | 1 - staging/config/backend.yml | 9 ---- staging/config/neo4j.yml | 9 ---- staging/config/web.yml | 8 ---- staging/configmaps.yaml | 29 ++++++++++++ .../backend.yml => deployment-backend.yaml} | 0 ...ml => deployment-db-migration-worker.yaml} | 0 .../neo4j.yml => deployment-neo4j.yaml} | 0 .../web.yml => deployment-web.yaml} | 0 .../backend.yml => service-backend.yaml} | 0 .../neo4j.yml => service-neo4j.yaml} | 0 .../{services/web.yml => service-web.yaml} | 0 ...rt.yml => volume-claim-mongo-exports.yaml} | 0 .../uploads.yml => volume-claim-uploads.yaml} | 0 19 files changed, 52 insertions(+), 53 deletions(-) rename db-migration-worker.template.yml => configmap-db-migration-worker.template.yaml (100%) rename namespace-staging.yml => namespace-staging.yaml (100%) rename secrets.template.yml => secrets.template.yaml (100%) create mode 100644 staging/.gitignore delete mode 100644 staging/config/.gitignore delete mode 100644 staging/config/backend.yml delete mode 100644 staging/config/neo4j.yml delete mode 100644 staging/config/web.yml create mode 100644 staging/configmaps.yaml rename staging/{deployments/backend.yml => deployment-backend.yaml} (100%) rename staging/{deployments/db-migration-worker.yml => deployment-db-migration-worker.yaml} (100%) rename staging/{deployments/neo4j.yml => deployment-neo4j.yaml} (100%) rename staging/{deployments/web.yml => deployment-web.yaml} (100%) rename staging/{services/backend.yml => service-backend.yaml} (100%) rename staging/{services/neo4j.yml => service-neo4j.yaml} (100%) rename staging/{services/web.yml => service-web.yaml} (100%) rename staging/{volumes/mongo-export.yml => volume-claim-mongo-exports.yaml} (100%) rename staging/{volumes/uploads.yml => volume-claim-uploads.yaml} (100%) diff --git a/README.md b/README.md index 3fb1a983b..0225f0aa9 100644 --- a/README.md +++ b/README.md @@ -1,21 +1,20 @@ # Human-Connection Nitro | Deployment Configuration -> Currently the deployment is not primetime ready as you still have to do some manual work. That we need to change, the following list gives some glimpse of the missing steps. - -## Todo`s -- [ ] check labels and selectors if they all are correct -- [ ] configure NGINX from yml +Todos: +- [x] check labels and selectors if they all are correct +- [x] configure NGINX from yml - [ ] configure Let's Encrypt cert-manager from yml -- [ ] configure ingress form yml -- [ ] configure persistent & shared storage between nodes +- [x] configure ingress from yml +- [x] configure persistent & shared storage between nodes - [x] reproduce setup locally ## Minikube -There are many Kubernetes distributions, but if you're just getting started, Minikube is a tool that you can use to get your feet wet. +There are many Kubernetes distributions, but if you're just getting started, +Minikube is a tool that you can use to get your feet wet. [Install Minikube](https://kubernetes.io/docs/tasks/tools/install-minikube/) -### Open minikube dashboard +Open minikube dashboard: ``` $ minikube dashboard ``` @@ -24,10 +23,9 @@ Some of the steps below need some timing to make ressources available to other dependent deployments. Keeping an eye on the dashboard is a great way to check that. -### Access exposed services - -Follow the installation instruction below. Just at the end, expose the -`nitro-web` service on your host system with: +Follow the [installation instruction](#installation-with-kubernetes) below. +If all the pods and services have settled and everything looks green in your +minikube dashboard, expose the `nitro-web` service on your host system with: ```shell $ minikube service nitro-web --namespace=staging @@ -35,7 +33,7 @@ $ minikube service nitro-web --namespace=staging ## Digital Ocean -Install the kubernetes dashboard first: +First, install kubernetes dashboard: ```sh $ kubectl apply -f dashboard/ ``` @@ -67,20 +65,21 @@ token: eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZ Grab the token and paste it into the login screen at [http://localhost:8001/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/](http://localhost:8001/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/) -## Installation with kubernetes (minikube + Digital Ocean) +## Installation with kubernetes -You have to do some prerequisites and change some secrets according to your own setup. +You have to do some prerequisites e.g. change some secrets according to your +own setup. #### Setup config maps ```shell -$ cp db-migration-worker.template.yml staging/config/db-migration-worker.yml +$ cp configmap-db-migration-worker.template.yaml staging/configmap-db-migration-worker.yaml ``` Edit all variables according to the setup of the remote legacy server. #### Setup secrets and deploy themn ```sh -$ cp secrets.yml.template staging/secrets.yml +$ cp secrets.template.yaml staging/secrets.yaml ``` Change all secrets as needed. @@ -95,18 +94,13 @@ Those secrets get `base64` decoded in a kubernetes pod. #### Create a namespace locally ```shell -$ kubectl create -f namespace-staging.yml +$ kubectl create -f namespace-staging.yaml ``` Switch to the namespace `staging` in your kubernetes dashboard. ### Run the configuration ```shell -$ cd staging/ -$ kubectl apply -f secrets.yml -$ kubectl apply -f config/ -$ kubectl apply -f volumes/ -$ kubectl apply -f services/ -$ kubectl apply -f deployments/ +$ kubectl apply -f staging/ ``` This can take a while because kubernetes will download the docker images. @@ -116,7 +110,8 @@ Wait until all pods turn green and they don't show a warning ### Provision db-migration-worker -Copy your private ssh key and the `.known-hosts` file of your remote legacy server. +Copy your private ssh key and the `.known-hosts` file of your remote legacy +server. ```shell # check the corresponding db-migration-worker pod diff --git a/db-migration-worker.template.yml b/configmap-db-migration-worker.template.yaml similarity index 100% rename from db-migration-worker.template.yml rename to configmap-db-migration-worker.template.yaml diff --git a/namespace-staging.yml b/namespace-staging.yaml similarity index 100% rename from namespace-staging.yml rename to namespace-staging.yaml diff --git a/secrets.template.yml b/secrets.template.yaml similarity index 100% rename from secrets.template.yml rename to secrets.template.yaml diff --git a/staging/.gitignore b/staging/.gitignore new file mode 100644 index 000000000..599426dbb --- /dev/null +++ b/staging/.gitignore @@ -0,0 +1,2 @@ +configmap-db-migration-worker.yaml +secrets.yaml diff --git a/staging/config/.gitignore b/staging/config/.gitignore deleted file mode 100644 index 6fe22561d..000000000 --- a/staging/config/.gitignore +++ /dev/null @@ -1 +0,0 @@ -db-migration-worker.yml diff --git a/staging/config/backend.yml b/staging/config/backend.yml deleted file mode 100644 index cfb19b538..000000000 --- a/staging/config/backend.yml +++ /dev/null @@ -1,9 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -data: - GRAPHQL_PORT: "4000" - GRAPHQL_URI: "http://nitro-backend.staging:4000" - MOCK: "false" -metadata: - name: staging-backend - namespace: staging diff --git a/staging/config/neo4j.yml b/staging/config/neo4j.yml deleted file mode 100644 index 0165338db..000000000 --- a/staging/config/neo4j.yml +++ /dev/null @@ -1,9 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -data: - NEO4J_URI: "bolt://nitro-neo4j.staging:7687" - NEO4J_USER: "neo4j" - NEO4J_AUTH: none -metadata: - name: staging-neo4j - namespace: staging diff --git a/staging/config/web.yml b/staging/config/web.yml deleted file mode 100644 index 1dbf5e25e..000000000 --- a/staging/config/web.yml +++ /dev/null @@ -1,8 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -data: - CLIENT_URI: "https://nitro-staging.human-connection.org" - MAPBOX_TOKEN: pk.eyJ1IjoiaHVtYW4tY29ubmVjdGlvbiIsImEiOiJjajl0cnBubGoweTVlM3VwZ2lzNTNud3ZtIn0.KZ8KK9l70omjXbEkkbHGsQ -metadata: - name: staging-web - namespace: staging diff --git a/staging/configmaps.yaml b/staging/configmaps.yaml new file mode 100644 index 000000000..c07353141 --- /dev/null +++ b/staging/configmaps.yaml @@ -0,0 +1,29 @@ +--- + apiVersion: v1 + kind: ConfigMap + data: + GRAPHQL_PORT: "4000" + GRAPHQL_URI: "http://nitro-backend.staging:4000" + MOCK: "false" + metadata: + name: staging-backend + namespace: staging +--- + apiVersion: v1 + kind: ConfigMap + data: + NEO4J_URI: "bolt://nitro-neo4j.staging:7687" + NEO4J_USER: "neo4j" + NEO4J_AUTH: none + metadata: + name: staging-neo4j + namespace: staging +--- + apiVersion: v1 + kind: ConfigMap + data: + CLIENT_URI: "https://nitro-staging.human-connection.org" + MAPBOX_TOKEN: pk.eyJ1IjoiaHVtYW4tY29ubmVjdGlvbiIsImEiOiJjajl0cnBubGoweTVlM3VwZ2lzNTNud3ZtIn0.KZ8KK9l70omjXbEkkbHGsQ + metadata: + name: staging-web + namespace: staging diff --git a/staging/deployments/backend.yml b/staging/deployment-backend.yaml similarity index 100% rename from staging/deployments/backend.yml rename to staging/deployment-backend.yaml diff --git a/staging/deployments/db-migration-worker.yml b/staging/deployment-db-migration-worker.yaml similarity index 100% rename from staging/deployments/db-migration-worker.yml rename to staging/deployment-db-migration-worker.yaml diff --git a/staging/deployments/neo4j.yml b/staging/deployment-neo4j.yaml similarity index 100% rename from staging/deployments/neo4j.yml rename to staging/deployment-neo4j.yaml diff --git a/staging/deployments/web.yml b/staging/deployment-web.yaml similarity index 100% rename from staging/deployments/web.yml rename to staging/deployment-web.yaml diff --git a/staging/services/backend.yml b/staging/service-backend.yaml similarity index 100% rename from staging/services/backend.yml rename to staging/service-backend.yaml diff --git a/staging/services/neo4j.yml b/staging/service-neo4j.yaml similarity index 100% rename from staging/services/neo4j.yml rename to staging/service-neo4j.yaml diff --git a/staging/services/web.yml b/staging/service-web.yaml similarity index 100% rename from staging/services/web.yml rename to staging/service-web.yaml diff --git a/staging/volumes/mongo-export.yml b/staging/volume-claim-mongo-exports.yaml similarity index 100% rename from staging/volumes/mongo-export.yml rename to staging/volume-claim-mongo-exports.yaml diff --git a/staging/volumes/uploads.yml b/staging/volume-claim-uploads.yaml similarity index 100% rename from staging/volumes/uploads.yml rename to staging/volume-claim-uploads.yaml From abf623bd51d539c9ec94efce251b037073fc12ea Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Robert=20Sch=C3=A4fer?= Date: Sat, 2 Feb 2019 18:44:21 +0100 Subject: [PATCH 18/26] Rename .yml to .yaml files for consistency See: https://stackoverflow.com/a/21059164 @appinteractive --- .gitignore | 1 - README.md | 2 +- dashboard/{admin-user.yml => admin-user.yaml} | 0 dashboard/{role-binding.yml => role-binding.yaml} | 0 4 files changed, 1 insertion(+), 2 deletions(-) delete mode 100644 .gitignore rename dashboard/{admin-user.yml => admin-user.yaml} (100%) rename dashboard/{role-binding.yml => role-binding.yaml} (100%) diff --git a/.gitignore b/.gitignore deleted file mode 100644 index 8a42d3602..000000000 --- a/.gitignore +++ /dev/null @@ -1 +0,0 @@ -*secrets.yml diff --git a/README.md b/README.md index 0225f0aa9..7f998fcf2 100644 --- a/README.md +++ b/README.md @@ -39,7 +39,7 @@ $ kubectl apply -f dashboard/ ``` Proxy localhost to the remote kubernetes dashboard: ```sh -kubectl proxy +$ kubectl proxy ``` Get your token on the command line: ```sh diff --git a/dashboard/admin-user.yml b/dashboard/admin-user.yaml similarity index 100% rename from dashboard/admin-user.yml rename to dashboard/admin-user.yaml diff --git a/dashboard/role-binding.yml b/dashboard/role-binding.yaml similarity index 100% rename from dashboard/role-binding.yml rename to dashboard/role-binding.yaml From 246a46c2e8807e5ef46aab9b80fba77c4059330f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Robert=20Sch=C3=A4fer?= Date: Sat, 2 Feb 2019 20:47:26 +0100 Subject: [PATCH 19/26] Remove obsolete volume in deployment --- staging/deployment-neo4j.yaml | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/staging/deployment-neo4j.yaml b/staging/deployment-neo4j.yaml index ef394e36f..58ca7f24b 100644 --- a/staging/deployment-neo4j.yaml +++ b/staging/deployment-neo4j.yaml @@ -65,19 +65,6 @@ restartPolicy: Always terminationGracePeriodSeconds: 30 status: {} ---- - apiVersion: v1 - kind: PersistentVolume - metadata: - name: neo4j-data-volume - namespace: staging - spec: - accessModes: - - ReadWriteOnce - capacity: - storage: 4Gi - hostPath: - path: /data/neo4j/ --- kind: PersistentVolumeClaim apiVersion: v1 From fb929da2cd1dc7d5b3436ff3e19283faaa69c3a2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Robert=20Sch=C3=A4fer?= Date: Mon, 4 Feb 2019 01:34:17 +0100 Subject: [PATCH 20/26] Refactor db-migration-worker deployment Instead of creating a deployment with a replica set, we just create the pod once. Also the pod should have everything in the future to run the database migration. Ie. have `cypher-shell` to write directly to the database in the current network. All required configuration is passed manually to the `db-migration-worker`-pod directly. SSH-keys are copied through a secrets file. This altogether made many configuration files obsolete. --- .gitignore | 2 + README.md | 40 ++++++--- configmap-db-migration-worker.template.yaml | 12 --- db-migration-worker.yaml | 39 +++++++++ staging/.gitignore | 2 - staging/deployment-db-migration-worker.yaml | 92 --------------------- staging/volume-claim-mongo-exports.yaml | 12 --- staging/volume-claim-uploads.yaml | 12 --- 8 files changed, 70 insertions(+), 141 deletions(-) create mode 100644 .gitignore delete mode 100644 configmap-db-migration-worker.template.yaml create mode 100644 db-migration-worker.yaml delete mode 100644 staging/.gitignore delete mode 100644 staging/deployment-db-migration-worker.yaml delete mode 100644 staging/volume-claim-mongo-exports.yaml delete mode 100644 staging/volume-claim-uploads.yaml diff --git a/.gitignore b/.gitignore new file mode 100644 index 000000000..18b453e6b --- /dev/null +++ b/.gitignore @@ -0,0 +1,2 @@ +secrets.yaml +*/secrets.yaml diff --git a/README.md b/README.md index 7f998fcf2..6c8227f59 100644 --- a/README.md +++ b/README.md @@ -109,20 +109,38 @@ Wait until all pods turn green and they don't show a warning `Waiting: ContainerCreating` anymore. -### Provision db-migration-worker -Copy your private ssh key and the `.known-hosts` file of your remote legacy -server. -```shell - -# check the corresponding db-migration-worker pod -$ kubectl --namespace=staging get pods -# change below -$ kubectl cp path/to/your/ssh/keys/.ssh staging/nitro-db-migration-worker-:/root/ +### Migrate database of Human Connection legacy server +Create a configmap with the specific connection data of your legacy server: +```sh +$ kubectl create configmap db-migration-worker \ + --namespace=staging \ + --from-literal=SSH_USERNAME=someuser \ + --from-literal=SSH_HOST=yourhost \ + --from-literal=MONGODB_USERNAME=hc-api \ + --from-literal=MONGODB_PASSWORD=secretpassword \ + --from-literal=MONGODB_AUTH_DB=hc_api \ + --from-literal=MONGODB_DATABASE=hc_api \ + --from-literal=UPLOADS_DIRECTORY=/var/www/api/uploads ``` +Create a secret with your public and private ssh keys: +```sh +$ kubectl create secret generic ssh-keys \ + --namespace=staging \ + --from-file=id_rsa=/path/to/.ssh/id_rsa \ + --from-file=id_rsa.pub=/path/to/.ssh/id_rsa.pub \ + --from-file=known_hosts=/path/to/.ssh/known_hosts +``` +As the [kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/secret/#use-case-pod-with-ssh-keys) +points out, you should be careful with your ssh keys. Anyone with access to your +cluster will have access to your ssh keys. Better create a new pair with +`ssh-keygen` and copy the public key to your legacy server with `ssh-copy-id`. +Create the pod and the required volume: +```sh +$ kubectl apply -f db-migration-worker.yaml +``` Run the migration: ```shell # change below -$ kubectl --namespace=staging exec -it nitro-db-migration-worker- ./import.sh -$ kubectl --namespace=staging exec -it nitro-neo4j- ./import/import.sh +$ kubectl --namespace=staging exec -it nitro-db-migration-worker ./import.sh ``` diff --git a/configmap-db-migration-worker.template.yaml b/configmap-db-migration-worker.template.yaml deleted file mode 100644 index e00077577..000000000 --- a/configmap-db-migration-worker.template.yaml +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -data: - SSH_USERNAME: "" - SSH_HOST: "" - MONGODB_USERNAME: "hc-api" - MONGODB_AUTH_DB: "hc_api" - MONGODB_DATABASE: "hc_api" - UPLOADS_DIRECTORY: "/var/www/api/uploads" -metadata: - name: staging-db-migration-worker - namespace: staging diff --git a/db-migration-worker.yaml b/db-migration-worker.yaml new file mode 100644 index 000000000..e0b520e58 --- /dev/null +++ b/db-migration-worker.yaml @@ -0,0 +1,39 @@ +--- + kind: Pod + apiVersion: v1 + metadata: + name: nitro-db-migration-worker + namespace: staging + spec: + volumes: + - name: secret-volume + secret: + secretName: ssh-keys + defaultMode: 0400 + - name: mongo-export + persistentVolumeClaim: + claimName: mongo-export-claim + containers: + - name: nitro-db-migration-worker + image: humanconnection/db-migration-worker:latest + envFrom: + - configMapRef: + name: db-migration-worker + volumeMounts: + - name: secret-volume + readOnly: false + mountPath: /root/.ssh + - name: mongo-export + mountPath: /mongo-export/ +--- + kind: PersistentVolumeClaim + apiVersion: v1 + metadata: + name: mongo-export-claim + namespace: staging + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi diff --git a/staging/.gitignore b/staging/.gitignore deleted file mode 100644 index 599426dbb..000000000 --- a/staging/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -configmap-db-migration-worker.yaml -secrets.yaml diff --git a/staging/deployment-db-migration-worker.yaml b/staging/deployment-db-migration-worker.yaml deleted file mode 100644 index f4d427096..000000000 --- a/staging/deployment-db-migration-worker.yaml +++ /dev/null @@ -1,92 +0,0 @@ ---- - apiVersion: extensions/v1beta1 - kind: Deployment - metadata: - name: nitro-db-migration-worker - namespace: staging - spec: - replicas: 1 - minReadySeconds: 15 - progressDeadlineSeconds: 60 - selector: - matchLabels: - workload.user.cattle.io/workloadselector: deployment-staging-db-migration-worker - template: - metadata: - labels: - workload.user.cattle.io/workloadselector: deployment-staging-db-migration-worker - name: nitro-db-migration-worker - spec: - containers: - - env: - - name: COMMIT - value: - - name: SSH_USERNAME - valueFrom: - configMapKeyRef: - name: staging-db-migration-worker - key: SSH_USERNAME - - name: SSH_HOST - valueFrom: - configMapKeyRef: - name: staging-db-migration-worker - key: SSH_HOST - - name: MONGODB_USERNAME - valueFrom: - configMapKeyRef: - name: staging-db-migration-worker - key: MONGODB_USERNAME - - name: MONGODB_AUTH_DB - valueFrom: - configMapKeyRef: - name: staging-db-migration-worker - key: MONGODB_AUTH_DB - - name: MONGODB_DATABASE - valueFrom: - configMapKeyRef: - name: staging-db-migration-worker - key: MONGODB_DATABASE - - name: UPLOADS_DIRECTORY - valueFrom: - configMapKeyRef: - name: staging-db-migration-worker - key: UPLOADS_DIRECTORY - - name: MONGODB_PASSWORD - valueFrom: - secretKeyRef: - name: staging - key: MONGODB_PASSWORD - optional: false - image: humanconnection/db-migration-worker:latest - name: nitro-db-migration-worker - resources: {} - imagePullPolicy: Always - volumeMounts: - - mountPath: /root/ - name: ssh-keys-directory - - mountPath: /mongo-export/ - name: mongo-export - volumes: - - name: ssh-keys-directory - persistentVolumeClaim: - claimName: ssh-keys-claim - - name: mongo-export - persistentVolumeClaim: - claimName: mongo-export-claim - restartPolicy: Always - terminationGracePeriodSeconds: 30 - status: {} ---- - kind: PersistentVolumeClaim - apiVersion: v1 - metadata: - name: ssh-keys-claim - namespace: staging - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - # waaay too much - # unfortunately Digital Oceans volumes start at 1Gi - storage: 1Gi diff --git a/staging/volume-claim-mongo-exports.yaml b/staging/volume-claim-mongo-exports.yaml deleted file mode 100644 index 563a9cfe6..000000000 --- a/staging/volume-claim-mongo-exports.yaml +++ /dev/null @@ -1,12 +0,0 @@ ---- - kind: PersistentVolumeClaim - apiVersion: v1 - metadata: - name: mongo-export-claim - namespace: staging - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 1Gi diff --git a/staging/volume-claim-uploads.yaml b/staging/volume-claim-uploads.yaml deleted file mode 100644 index a48d28ddc..000000000 --- a/staging/volume-claim-uploads.yaml +++ /dev/null @@ -1,12 +0,0 @@ ---- - kind: PersistentVolumeClaim - apiVersion: v1 - metadata: - name: uploads-claim - namespace: staging - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 8Gi From 81ae557be1da93b5fcc045090553cdf4815e13e7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Robert=20Sch=C3=A4fer?= Date: Mon, 4 Feb 2019 13:00:00 +0100 Subject: [PATCH 21/26] Create a persistent volume claim for uploads --- staging/deployment-backend.yaml | 146 ++++++++++++++++++-------------- 1 file changed, 83 insertions(+), 63 deletions(-) diff --git a/staging/deployment-backend.yaml b/staging/deployment-backend.yaml index 31f8ef357..b3f329ba8 100644 --- a/staging/deployment-backend.yaml +++ b/staging/deployment-backend.yaml @@ -1,64 +1,84 @@ -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - name: nitro-backend - namespace: staging -spec: - replicas: 2 - minReadySeconds: 15 - progressDeadlineSeconds: 60 - selector: - matchLabels: - workload.user.cattle.io/workloadselector: deployment-staging-backend - template: - metadata: - labels: +--- + apiVersion: extensions/v1beta1 + kind: Deployment + metadata: + name: nitro-backend + namespace: staging + spec: + replicas: 2 + minReadySeconds: 15 + progressDeadlineSeconds: 60 + selector: + matchLabels: workload.user.cattle.io/workloadselector: deployment-staging-backend - name: "nitro-backend" - spec: - containers: - - env: - - name: COMMIT - value: - - name: MOCK - value: "false" - - name: CLIENT_URI - valueFrom: - configMapKeyRef: - name: staging-web - key: CLIENT_URI - - name: GRAPHQL_PORT - valueFrom: - configMapKeyRef: - name: staging-backend - key: GRAPHQL_PORT - - name: GRAPHQL_URI - valueFrom: - configMapKeyRef: - name: staging-backend - key: GRAPHQL_URI - - name: MAPBOX_TOKEN - valueFrom: - configMapKeyRef: - name: staging-web - key: MAPBOX_TOKEN - - name: JWT_SECRET - valueFrom: - secretKeyRef: - name: staging - key: JWT_SECRET - optional: false - - name: NEO4J_URI - valueFrom: - configMapKeyRef: - name: staging-neo4j - key: NEO4J_URI - image: humanconnection/nitro-backend:latest - name: nitro-backend - ports: - - containerPort: 4000 - resources: {} - imagePullPolicy: Always - restartPolicy: Always - terminationGracePeriodSeconds: 30 -status: {} + template: + metadata: + labels: + workload.user.cattle.io/workloadselector: deployment-staging-backend + name: "nitro-backend" + spec: + containers: + - env: + - name: COMMIT + value: + - name: MOCK + value: "false" + - name: CLIENT_URI + valueFrom: + configMapKeyRef: + name: staging-web + key: CLIENT_URI + - name: GRAPHQL_PORT + valueFrom: + configMapKeyRef: + name: staging-backend + key: GRAPHQL_PORT + - name: GRAPHQL_URI + valueFrom: + configMapKeyRef: + name: staging-backend + key: GRAPHQL_URI + - name: MAPBOX_TOKEN + valueFrom: + configMapKeyRef: + name: staging-web + key: MAPBOX_TOKEN + - name: JWT_SECRET + valueFrom: + secretKeyRef: + name: staging + key: JWT_SECRET + optional: false + - name: NEO4J_URI + valueFrom: + configMapKeyRef: + name: staging-neo4j + key: NEO4J_URI + image: humanconnection/nitro-backend:latest + name: nitro-backend + ports: + - containerPort: 4000 + resources: {} + imagePullPolicy: Always + volumeMounts: + - mountPath: /nitro-backend/public/uploads + name: uploads + volumes: + - name: uploads + persistentVolumeClaim: + claimName: uploads-claim + restartPolicy: Always + terminationGracePeriodSeconds: 30 + status: {} +--- + kind: PersistentVolumeClaim + apiVersion: v1 + metadata: + name: uploads-claim + namespace: staging + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 10Gi From d876a3f442b12118dce20a974bb36f8cfbe17993 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Robert=20Sch=C3=A4fer?= Date: Mon, 4 Feb 2019 20:44:07 +0100 Subject: [PATCH 22/26] Multiple container per pod setup --- staging/deployment-backend.yaml | 29 +++++++++++++++++------ staging/deployment-neo4j.yaml | 41 ++++++++++++++++++++++++++++----- 2 files changed, 57 insertions(+), 13 deletions(-) diff --git a/staging/deployment-backend.yaml b/staging/deployment-backend.yaml index b3f329ba8..da3c4f148 100644 --- a/staging/deployment-backend.yaml +++ b/staging/deployment-backend.yaml @@ -18,7 +18,24 @@ name: "nitro-backend" spec: containers: - - env: + - name: nitro-db-migration-worker + image: humanconnection/db-migration-worker:latest + imagePullPolicy: Always + envFrom: + - configMapRef: + name: db-migration-worker + volumeMounts: + - name: secret-volume + readOnly: false + mountPath: /root/.ssh + - name: uploads + mountPath: /uploads/ + - name: nitro-backend + image: humanconnection/nitro-backend:latest + imagePullPolicy: Always + ports: + - containerPort: 4000 + env: - name: COMMIT value: - name: MOCK @@ -54,16 +71,14 @@ configMapKeyRef: name: staging-neo4j key: NEO4J_URI - image: humanconnection/nitro-backend:latest - name: nitro-backend - ports: - - containerPort: 4000 - resources: {} - imagePullPolicy: Always volumeMounts: - mountPath: /nitro-backend/public/uploads name: uploads volumes: + - name: secret-volume + secret: + secretName: ssh-keys + defaultMode: 0400 - name: uploads persistentVolumeClaim: claimName: uploads-claim diff --git a/staging/deployment-neo4j.yaml b/staging/deployment-neo4j.yaml index 58ca7f24b..957696341 100644 --- a/staging/deployment-neo4j.yaml +++ b/staging/deployment-neo4j.yaml @@ -17,7 +17,25 @@ name: nitro-neo4j spec: containers: - - env: + - name: nitro-db-migration-worker + image: humanconnection/db-migration-worker:latest + imagePullPolicy: Always + envFrom: + - configMapRef: + name: db-migration-worker + env: + - name: COMMIT + value: + volumeMounts: + - name: secret-volume + readOnly: false + mountPath: /root/.ssh + - name: mongo-export + mountPath: /mongo-export/ + - name: nitro-neo4j + image: humanconnection/neo4j:latest + imagePullPolicy: Always + env: - name: COMMIT value: - name: NEO4J_apoc_import_file_enabled @@ -43,19 +61,19 @@ configMapKeyRef: name: staging-neo4j key: NEO4J_AUTH - image: humanconnection/neo4j:latest - name: nitro-neo4j ports: - containerPort: 7687 - containerPort: 7474 - resources: {} - imagePullPolicy: Always volumeMounts: - mountPath: /data/ name: neo4j-data - mountPath: /mongo-export/ name: mongo-export volumes: + - name: secret-volume + secret: + secretName: ssh-keys + defaultMode: 0400 - name: mongo-export persistentVolumeClaim: claimName: mongo-export-claim @@ -64,7 +82,6 @@ claimName: neo4j-data-claim restartPolicy: Always terminationGracePeriodSeconds: 30 - status: {} --- kind: PersistentVolumeClaim apiVersion: v1 @@ -77,3 +94,15 @@ resources: requests: storage: 4Gi +--- + kind: PersistentVolumeClaim + apiVersion: v1 + metadata: + name: mongo-export-claim + namespace: staging + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi From 1fbec5f52538316bf6b6247ea4812b2b0e94ed83 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Robert=20Sch=C3=A4fer?= Date: Mon, 4 Feb 2019 22:00:53 +0100 Subject: [PATCH 23/26] Set NEO4J_URI to localhost Apparently, if you have a mult-container setup, the same pod is accessible via localhost. --- staging/deployment-neo4j.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/staging/deployment-neo4j.yaml b/staging/deployment-neo4j.yaml index 957696341..720246568 100644 --- a/staging/deployment-neo4j.yaml +++ b/staging/deployment-neo4j.yaml @@ -26,6 +26,8 @@ env: - name: COMMIT value: + - name: NEO4J_URI + value: bolt://localhost:7687 volumeMounts: - name: secret-volume readOnly: false From 8c6bc72bd2232685a1e746dff4e7474ab8c68888 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Robert=20Sch=C3=A4fer?= Date: Mon, 4 Feb 2019 22:05:24 +0100 Subject: [PATCH 24/26] Update README --- README.md | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/README.md b/README.md index 6c8227f59..a61074503 100644 --- a/README.md +++ b/README.md @@ -98,18 +98,7 @@ $ kubectl create -f namespace-staging.yaml ``` Switch to the namespace `staging` in your kubernetes dashboard. -### Run the configuration -```shell -$ kubectl apply -f staging/ -``` - -This can take a while because kubernetes will download the docker images. -Sit back and relax and have a look into your kubernetes dashboard. -Wait until all pods turn green and they don't show a warning -`Waiting: ContainerCreating` anymore. - - -### Migrate database of Human Connection legacy server +### Prepare migration of Human Connection legacy server Create a configmap with the specific connection data of your legacy server: ```sh $ kubectl create configmap db-migration-worker \ @@ -120,7 +109,9 @@ $ kubectl create configmap db-migration-worker \ --from-literal=MONGODB_PASSWORD=secretpassword \ --from-literal=MONGODB_AUTH_DB=hc_api \ --from-literal=MONGODB_DATABASE=hc_api \ - --from-literal=UPLOADS_DIRECTORY=/var/www/api/uploads + --from-literal=UPLOADS_DIRECTORY=/var/www/api/uploads \ + --from-literal=NEO4J_URI= \ + ``` Create a secret with your public and private ssh keys: ```sh @@ -135,12 +126,21 @@ points out, you should be careful with your ssh keys. Anyone with access to your cluster will have access to your ssh keys. Better create a new pair with `ssh-keygen` and copy the public key to your legacy server with `ssh-copy-id`. -Create the pod and the required volume: -```sh -$ kubectl apply -f db-migration-worker.yaml +### Run the configuration +```shell +$ kubectl apply -f staging/ ``` + +This can take a while because kubernetes will download the docker images. +Sit back and relax and have a look into your kubernetes dashboard. +Wait until all pods turn green and they don't show a warning +`Waiting: ContainerCreating` anymore. + +### Migrate legacy database Run the migration: ```shell +$ kubectl --namespace=staging get pods # change below -$ kubectl --namespace=staging exec -it nitro-db-migration-worker ./import.sh +$ kubectl --namespace=staging exec -it nitro-neo4j-65bbdb597c-nc2lv migrate +$ kubectl --namespace=staging exec -it nitro-backend-c6cc5ff69-8h96z sync_uploads ``` From 99262a0d4dd70199a098d20205eee91ae2e4f9c4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Robert=20Sch=C3=A4fer?= Date: Tue, 5 Feb 2019 17:27:59 +0100 Subject: [PATCH 25/26] Fix README --- README.md | 18 +++++++++--------- staging/deployment-web.yaml | 4 ++-- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/README.md b/README.md index a61074503..201724265 100644 --- a/README.md +++ b/README.md @@ -101,16 +101,16 @@ Switch to the namespace `staging` in your kubernetes dashboard. ### Prepare migration of Human Connection legacy server Create a configmap with the specific connection data of your legacy server: ```sh -$ kubectl create configmap db-migration-worker \ - --namespace=staging \ - --from-literal=SSH_USERNAME=someuser \ - --from-literal=SSH_HOST=yourhost \ - --from-literal=MONGODB_USERNAME=hc-api \ - --from-literal=MONGODB_PASSWORD=secretpassword \ - --from-literal=MONGODB_AUTH_DB=hc_api \ - --from-literal=MONGODB_DATABASE=hc_api \ +$ kubectl create configmap db-migration-worker \ + --namespace=staging \ + --from-literal=SSH_USERNAME=someuser \ + --from-literal=SSH_HOST=yourhost \ + --from-literal=MONGODB_USERNAME=hc-api \ + --from-literal=MONGODB_PASSWORD=secretpassword \ + --from-literal=MONGODB_AUTH_DB=hc_api \ + --from-literal=MONGODB_DATABASE=hc_api \ --from-literal=UPLOADS_DIRECTORY=/var/www/api/uploads \ - --from-literal=NEO4J_URI= \ + --from-literal=NEO4J_URI=bolt://neo4j:7687 ``` Create a secret with your public and private ssh keys: diff --git a/staging/deployment-web.yaml b/staging/deployment-web.yaml index 48e99b9c2..de9651528 100644 --- a/staging/deployment-web.yaml +++ b/staging/deployment-web.yaml @@ -17,7 +17,8 @@ spec: name: nitro-web spec: containers: - - env: + - name: web + env: - name: COMMIT value: - name: HOST @@ -39,7 +40,6 @@ spec: key: JWT_SECRET optional: false image: humanconnection/nitro-web:latest - name: web ports: - containerPort: 3000 resources: {} From e1e457abaed92659b52e376d634d5e7edd36c9c1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Robert=20Sch=C3=A4fer?= Date: Tue, 5 Feb 2019 17:44:30 +0100 Subject: [PATCH 26/26] Cleanly separate legacy migration part Use `kubectl patch -f ...` --- README.md | 36 ++++++++++++----- legacy-migration/deployment-backend.yaml | 27 +++++++++++++ legacy-migration/deployment-neo4j.yaml | 39 +++++++++++++++++++ .../volume-claim-mongo-export.yaml | 12 ++++++ staging/deployment-backend.yaml | 16 -------- staging/deployment-neo4j.yaml | 38 ------------------ 6 files changed, 104 insertions(+), 64 deletions(-) create mode 100644 legacy-migration/deployment-backend.yaml create mode 100644 legacy-migration/deployment-neo4j.yaml create mode 100644 legacy-migration/volume-claim-mongo-export.yaml diff --git a/README.md b/README.md index 201724265..6ab975a07 100644 --- a/README.md +++ b/README.md @@ -98,7 +98,24 @@ $ kubectl create -f namespace-staging.yaml ``` Switch to the namespace `staging` in your kubernetes dashboard. -### Prepare migration of Human Connection legacy server + +### Run the configuration +```shell +$ kubectl apply -f staging/ +``` + +This can take a while because kubernetes will download the docker images. +Sit back and relax and have a look into your kubernetes dashboard. +Wait until all pods turn green and they don't show a warning +`Waiting: ContainerCreating` anymore. + +#### Legacy data migration + +This setup is completely optional and only required if you have data on a server +which is running our legacy code and you want to import that data. It will +import the uploads folder and migrate a dump of mongodb into neo4j. + +##### Prepare migration of Human Connection legacy server Create a configmap with the specific connection data of your legacy server: ```sh $ kubectl create configmap db-migration-worker \ @@ -126,17 +143,16 @@ points out, you should be careful with your ssh keys. Anyone with access to your cluster will have access to your ssh keys. Better create a new pair with `ssh-keygen` and copy the public key to your legacy server with `ssh-copy-id`. -### Run the configuration -```shell -$ kubectl apply -f staging/ +##### Migrate legacy database +Patch the existing deployments to use a multi-container setup: +```bash +cd legacy-migration +kubectl apply -f volume-claim-mongo-export.yaml +kubectl patch --namespace=staging deployment nitro-backend --patch "$(cat deployment-backend.yaml)" +kubectl patch --namespace=staging deployment nitro-neo4j --patch "$(cat deployment-neo4j.yaml)" +cd .. ``` -This can take a while because kubernetes will download the docker images. -Sit back and relax and have a look into your kubernetes dashboard. -Wait until all pods turn green and they don't show a warning -`Waiting: ContainerCreating` anymore. - -### Migrate legacy database Run the migration: ```shell $ kubectl --namespace=staging get pods diff --git a/legacy-migration/deployment-backend.yaml b/legacy-migration/deployment-backend.yaml new file mode 100644 index 000000000..e29730cae --- /dev/null +++ b/legacy-migration/deployment-backend.yaml @@ -0,0 +1,27 @@ +--- + apiVersion: extensions/v1beta1 + kind: Deployment + metadata: + name: nitro-backend + namespace: staging + spec: + template: + spec: + containers: + - name: nitro-db-migration-worker + image: humanconnection/db-migration-worker:latest + imagePullPolicy: Always + envFrom: + - configMapRef: + name: db-migration-worker + volumeMounts: + - name: secret-volume + readOnly: false + mountPath: /root/.ssh + - name: uploads + mountPath: /uploads/ + volumes: + - name: secret-volume + secret: + secretName: ssh-keys + defaultMode: 0400 diff --git a/legacy-migration/deployment-neo4j.yaml b/legacy-migration/deployment-neo4j.yaml new file mode 100644 index 000000000..887c02f3a --- /dev/null +++ b/legacy-migration/deployment-neo4j.yaml @@ -0,0 +1,39 @@ +--- + apiVersion: extensions/v1beta1 + kind: Deployment + metadata: + name: nitro-neo4j + namespace: staging + spec: + template: + spec: + containers: + - name: nitro-db-migration-worker + image: humanconnection/db-migration-worker:latest + imagePullPolicy: Always + envFrom: + - configMapRef: + name: db-migration-worker + env: + - name: COMMIT + value: + - name: NEO4J_URI + value: bolt://localhost:7687 + volumeMounts: + - name: secret-volume + readOnly: false + mountPath: /root/.ssh + - name: mongo-export + mountPath: /mongo-export/ + - name: nitro-neo4j + volumeMounts: + - mountPath: /mongo-export/ + name: mongo-export + volumes: + - name: secret-volume + secret: + secretName: ssh-keys + defaultMode: 0400 + - name: mongo-export + persistentVolumeClaim: + claimName: mongo-export-claim diff --git a/legacy-migration/volume-claim-mongo-export.yaml b/legacy-migration/volume-claim-mongo-export.yaml new file mode 100644 index 000000000..563a9cfe6 --- /dev/null +++ b/legacy-migration/volume-claim-mongo-export.yaml @@ -0,0 +1,12 @@ +--- + kind: PersistentVolumeClaim + apiVersion: v1 + metadata: + name: mongo-export-claim + namespace: staging + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi diff --git a/staging/deployment-backend.yaml b/staging/deployment-backend.yaml index da3c4f148..4c2832a71 100644 --- a/staging/deployment-backend.yaml +++ b/staging/deployment-backend.yaml @@ -18,18 +18,6 @@ name: "nitro-backend" spec: containers: - - name: nitro-db-migration-worker - image: humanconnection/db-migration-worker:latest - imagePullPolicy: Always - envFrom: - - configMapRef: - name: db-migration-worker - volumeMounts: - - name: secret-volume - readOnly: false - mountPath: /root/.ssh - - name: uploads - mountPath: /uploads/ - name: nitro-backend image: humanconnection/nitro-backend:latest imagePullPolicy: Always @@ -75,10 +63,6 @@ - mountPath: /nitro-backend/public/uploads name: uploads volumes: - - name: secret-volume - secret: - secretName: ssh-keys - defaultMode: 0400 - name: uploads persistentVolumeClaim: claimName: uploads-claim diff --git a/staging/deployment-neo4j.yaml b/staging/deployment-neo4j.yaml index 720246568..d9aeab542 100644 --- a/staging/deployment-neo4j.yaml +++ b/staging/deployment-neo4j.yaml @@ -17,23 +17,6 @@ name: nitro-neo4j spec: containers: - - name: nitro-db-migration-worker - image: humanconnection/db-migration-worker:latest - imagePullPolicy: Always - envFrom: - - configMapRef: - name: db-migration-worker - env: - - name: COMMIT - value: - - name: NEO4J_URI - value: bolt://localhost:7687 - volumeMounts: - - name: secret-volume - readOnly: false - mountPath: /root/.ssh - - name: mongo-export - mountPath: /mongo-export/ - name: nitro-neo4j image: humanconnection/neo4j:latest imagePullPolicy: Always @@ -69,16 +52,7 @@ volumeMounts: - mountPath: /data/ name: neo4j-data - - mountPath: /mongo-export/ - name: mongo-export volumes: - - name: secret-volume - secret: - secretName: ssh-keys - defaultMode: 0400 - - name: mongo-export - persistentVolumeClaim: - claimName: mongo-export-claim - name: neo4j-data persistentVolumeClaim: claimName: neo4j-data-claim @@ -96,15 +70,3 @@ resources: requests: storage: 4Gi ---- - kind: PersistentVolumeClaim - apiVersion: v1 - metadata: - name: mongo-export-claim - namespace: staging - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 1Gi