From 81e211f6fb91cfa58b5d20b11624e8db74ba574a Mon Sep 17 00:00:00 2001 From: Grzegorz Leoniec Date: Thu, 17 Jan 2019 14:10:38 +0100 Subject: [PATCH 01/55] Initial commit --- .gitignore | 1 + README.md | 35 +++++ dummies/backend-service.yaml | 13 ++ dummies/do-loadbalancer.yaml | 12 ++ dummies/ingress-backend.yaml | 15 ++ dummies/nginx.yaml | 22 +++ dummies/web-service.yaml | 13 ++ staging/backend-configmap.yaml | 9 ++ staging/backend-deployment.yaml | 62 ++++++++ staging/deployment.yaml | 260 ++++++++++++++++++++++++++++++++ staging/neo4j-configmap.yaml | 9 ++ staging/neo4j-deployment.yaml | 50 ++++++ staging/neo4j-service.yaml | 22 +++ staging/secrets.yaml.template | 7 + staging/web-configmap.yaml | 7 + staging/web-deployment.yaml | 47 ++++++ 16 files changed, 584 insertions(+) create mode 100644 .gitignore create mode 100644 README.md create mode 100644 dummies/backend-service.yaml create mode 100644 dummies/do-loadbalancer.yaml create mode 100644 dummies/ingress-backend.yaml create mode 100644 dummies/nginx.yaml create mode 100644 dummies/web-service.yaml create mode 100644 staging/backend-configmap.yaml create mode 100644 staging/backend-deployment.yaml create mode 100644 staging/deployment.yaml create mode 100644 staging/neo4j-configmap.yaml create mode 100644 staging/neo4j-deployment.yaml create mode 100644 staging/neo4j-service.yaml create mode 100644 staging/secrets.yaml.template create mode 100644 staging/web-configmap.yaml create mode 100644 staging/web-deployment.yaml diff --git a/.gitignore b/.gitignore new file mode 100644 index 000000000..da61c76ef --- /dev/null +++ b/.gitignore @@ -0,0 +1 @@ +*secrets*.yaml diff --git a/README.md b/README.md new file mode 100644 index 000000000..fe28119cf --- /dev/null +++ b/README.md @@ -0,0 +1,35 @@ +# Human-Connection Nitro | Deployment Configuration + +> Currently the deployment is not primetime ready as you still have to do some manual work. That we need to change, the following list gives some glimpse of the missing steps. + +## Todo`s +- [ ] check labels and selectors if they all are correct +- [ ] configure NGINX from yaml +- [ ] configure Let's Encrypt cert-manager from yaml +- [ ] configure ingress form yaml +- [ ] configure persistent & shared storage between nodes +- [ ] reproduce setup locally + +> The dummy directory has some lb configurations that did not work properly on Digital Ocean but could be used as a starting point for getting it right + + +## Apply the config map to staging namespace +```shell +cd ./staging +kubectl apply -f configmap-neo4j.yaml -f configmap-backend.yaml -f configmap-web.yaml +``` + + +## Setup secrets and deploy themn +```shell +cd ./staging +cp secrets.yaml.template secrets.yaml +# change all vars as needed and deploy it afterwards +kubectl apply -f secrets.yaml +``` + +## Deploy the app +```shell +cd ./staging +kubectl apply -f deployment-neo4j.yaml -f deployment-backend.yaml -f deployment-web.yaml +``` diff --git a/dummies/backend-service.yaml b/dummies/backend-service.yaml new file mode 100644 index 000000000..48fffbc24 --- /dev/null +++ b/dummies/backend-service.yaml @@ -0,0 +1,13 @@ +kind: Service +apiVersion: v1 +metadata: + labels: + k8s-app: nitro-backend + name: nitro-backend + namespace: staging +spec: + ports: + - port: 4000 + targetPort: 4000 + selector: + k8s-app: nitro-backend diff --git a/dummies/do-loadbalancer.yaml b/dummies/do-loadbalancer.yaml new file mode 100644 index 000000000..9c700e082 --- /dev/null +++ b/dummies/do-loadbalancer.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: Service +metadata: + name: sample-load-balancer + namespace: staging +spec: + type: LoadBalancer + ports: + - protocol: TCP + port: 80 + targetPort: 80 + name: http diff --git a/dummies/ingress-backend.yaml b/dummies/ingress-backend.yaml new file mode 100644 index 000000000..0640b49fd --- /dev/null +++ b/dummies/ingress-backend.yaml @@ -0,0 +1,15 @@ +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: backend-ingress + namespace: staging + annotations: + nginx.ingress.kubernetes.io/rewrite-target: / +spec: + rules: + - http: + paths: + - path: / + backend: + serviceName: backend + servicePort: 4000 diff --git a/dummies/nginx.yaml b/dummies/nginx.yaml new file mode 100644 index 000000000..1f5136b4b --- /dev/null +++ b/dummies/nginx.yaml @@ -0,0 +1,22 @@ +apiVersion: v1 +kind: Service +metadata: + name: ingress-nginx + namespace: staging + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx +spec: + type: NodePort + ports: + - name: http + port: 80 + targetPort: 80 + protocol: TCP + - name: https + port: 443 + targetPort: 443 + protocol: TCP + selector: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx diff --git a/dummies/web-service.yaml b/dummies/web-service.yaml new file mode 100644 index 000000000..847ba3c05 --- /dev/null +++ b/dummies/web-service.yaml @@ -0,0 +1,13 @@ +kind: Service +apiVersion: v1 +metadata: + labels: + k8s-app: nitro-web + name: nitro-web + namespace: staging +spec: + ports: + - port: 3000 + targetPort: 3000 + selector: + k8s-app: nitro-web diff --git a/staging/backend-configmap.yaml b/staging/backend-configmap.yaml new file mode 100644 index 000000000..ba7c819bc --- /dev/null +++ b/staging/backend-configmap.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: ConfigMap +data: + GRAPHQL_PORT: "4000" + GRAPHQL_URI: "https://api-nitro-staging.human-connection.org" + MOCK: "false" +metadata: + name: staging-backend + namespace: staging diff --git a/staging/backend-deployment.yaml b/staging/backend-deployment.yaml new file mode 100644 index 000000000..ee0dfcef6 --- /dev/null +++ b/staging/backend-deployment.yaml @@ -0,0 +1,62 @@ +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: nitro-backend + namespace: staging +spec: + replicas: 2 + minReadySeconds: 15 + progressDeadlineSeconds: 60 + # strategy: + # rollingUpdate: + # maxSurge: 1 + # maxUnavailable: 0 + # type: RollingUpdate + selector: + matchLabels: + workload.user.cattle.io/workloadselector: deployment-staging-backend + template: + metadata: + labels: + workload.user.cattle.io/workloadselector: deployment-staging-backend + name: "nitro-backend" + spec: + containers: + - env: + - name: MOCK + value: "false" + - name: CLIENT_URI + valueFrom: + configMapKeyRef: + name: staging-web + key: CLIENT_URI + - name: GRAPHQL_PORT + valueFrom: + configMapKeyRef: + name: staging-backend + key: GRAPHQL_PORT + - name: GRAPHQL_URI + valueFrom: + configMapKeyRef: + name: staging-backend + key: GRAPHQL_URI + - name: JWT_SECRET + valueFrom: + secretKeyRef: + name: staging + key: JWT_SECRET + optional: false + - name: NEO4J_URI + valueFrom: + configMapKeyRef: + name: staging-neo4j + key: NEO4J_URI + image: humanconnection/nitro-backend:latest + name: nitro-backend + ports: + - containerPort: 4000 + resources: {} + imagePullPolicy: Always + restartPolicy: Always + terminationGracePeriodSeconds: 30 +status: {} diff --git a/staging/deployment.yaml b/staging/deployment.yaml new file mode 100644 index 000000000..9d68db535 --- /dev/null +++ b/staging/deployment.yaml @@ -0,0 +1,260 @@ +apiVersion: v1 +items: +- apiVersion: extensions/v1beta1 + kind: Deployment + metadata: + name: backend + namespace: staging + spec: + minReadySeconds: 15 + progressDeadlineSeconds: 60 + replicas: 1 + revisionHistoryLimit: 10 + selector: + matchLabels: + cattle.io/creator: norman + workload.user.cattle.io/workloadselector: deployment-staging-backend + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 0 + type: RollingUpdate + template: + spec: + containers: + - env: + - name: MOCK + valueFrom: + configMapKeyRef: + key: MOCK + name: staging-backend + optional: false + - name: NEO4J_URI + valueFrom: + configMapKeyRef: + key: NEO4J_URI + name: staging-neo4j + optional: false + - name: JWT_SECRET + valueFrom: + secretKeyRef: + key: JWT_SECRET + name: staging + optional: false + - name: NEO4J_AUTH + valueFrom: + configMapKeyRef: + key: NEO4J_AUTH + name: staging-neo4j + optional: false + - name: CLIENT_URI + valueFrom: + configMapKeyRef: + key: CLIENT_URI + name: staging-web + optional: false + - name: GRAPHQL_PORT + valueFrom: + configMapKeyRef: + key: GRAPHQL_PORT + name: staging-backend + optional: false + - name: GRAPHQL_URI + valueFrom: + configMapKeyRef: + key: GRAPHQL_URI + name: staging-backend + optional: false + image: humanconnection/nitro-backend:latest + imagePullPolicy: Always + name: backend + resources: {} + tty: true + restartPolicy: Always + terminationGracePeriodSeconds: 30 +#- apiVersion: extensions/v1beta1 +# kind: Deployment +# metadata: +# annotations: +# deployment.kubernetes.io/revision: "2" +# field.cattle.io/creatorId: user-x8jr4 +# field.cattle.io/publicEndpoints: '[{"nodeName":"c-2kbhr:m-bmgq4","addresses":["104.248.30.130"],"port":7687,"protocol":"TCP","podName":"staging:neo4j-2-6589cbc4d5-q4bxl","allNodes":false},{"nodeName":"c-2kbhr:m-bmgq4","addresses":["104.248.30.130"],"port":7474,"protocol":"TCP","podName":"staging:neo4j-2-6589cbc4d5-q4bxl","allNodes":false},{"nodeName":"c-2kbhr:m-bmgq4","addresses":["104.248.30.130"],"port":7473,"protocol":"TCP","podName":"staging:neo4j-2-6589cbc4d5-q4bxl","allNodes":false}]' +# creationTimestamp: 2018-12-10T19:07:58Z +# generation: 8 +# labels: +# cattle.io/creator: norman +# workload.user.cattle.io/workloadselector: deployment-staging-neo4j-2 +# name: neo4j-2 +# namespace: staging +# resourceVersion: "2380945" +# selfLink: /apis/extensions/v1beta1/namespaces/staging/deployments/neo4j-2 +# uid: e80460f6-fcae-11e8-943a-c6c288d5f6fa +# spec: +# progressDeadlineSeconds: 600 +# replicas: 1 +# revisionHistoryLimit: 10 +# selector: +# matchLabels: +# workload.user.cattle.io/workloadselector: deployment-staging-neo4j-2 +# strategy: +# rollingUpdate: +# maxSurge: 1 +# maxUnavailable: 0 +# type: RollingUpdate +# template: +# metadata: +# annotations: +# cattle.io/timestamp: 2018-12-11T11:11:09Z +# field.cattle.io/ports: '[[{"containerPort":7687,"dnsName":"neo4j-2-hostport","hostPort":7687,"kind":"HostPort","name":"7687tcp76870","protocol":"TCP","sourcePort":7687},{"containerPort":7474,"dnsName":"neo4j-2-hostport","hostPort":7474,"kind":"HostPort","name":"7474tcp74740","protocol":"TCP","sourcePort":7474},{"containerPort":7473,"dnsName":"neo4j-2-hostport","hostPort":7473,"kind":"HostPort","name":"7473tcp74730","protocol":"TCP","sourcePort":7473}]]' +# creationTimestamp: null +# labels: +# workload.user.cattle.io/workloadselector: deployment-staging-neo4j-2 +# spec: +# containers: +# - env: +# - name: NEO4J_AUTH +# value: none +# image: humanconnection/neo4j:latest +# imagePullPolicy: IfNotPresent +# name: neo4j-2 +# ports: +# - containerPort: 7687 +# hostPort: 7687 +# name: 7687tcp76870 +# protocol: TCP +# - containerPort: 7474 +# hostPort: 7474 +# name: 7474tcp74740 +# protocol: TCP +# - containerPort: 7473 +# hostPort: 7473 +# name: 7473tcp74730 +# protocol: TCP +# resources: {} +# securityContext: +# allowPrivilegeEscalation: false +# capabilities: {} +# privileged: false +# readOnlyRootFilesystem: false +# runAsNonRoot: false +# stdin: true +# terminationMessagePath: /dev/termination-log +# terminationMessagePolicy: File +# tty: true +# dnsPolicy: ClusterFirst +# restartPolicy: Always +# schedulerName: default-scheduler +# securityContext: {} +# terminationGracePeriodSeconds: 30 +# status: +# availableReplicas: 1 +# conditions: +# - lastTransitionTime: 2018-12-10T19:07:58Z +# lastUpdateTime: 2018-12-11T11:11:18Z +# message: ReplicaSet "neo4j-2-6589cbc4d5" has successfully progressed. +# reason: NewReplicaSetAvailable +# status: "True" +# type: Progressing +# - lastTransitionTime: 2018-12-11T12:12:41Z +# lastUpdateTime: 2018-12-11T12:12:41Z +# message: Deployment has minimum availability. +# reason: MinimumReplicasAvailable +# status: "True" +# type: Available +# observedGeneration: 8 +# readyReplicas: 1 +# replicas: 1 +# updatedReplicas: 1 +##- apiVersion: extensions/v1beta1 +# kind: Deployment +# metadata: +# annotations: +# deployment.kubernetes.io/revision: "15" +# field.cattle.io/creatorId: user-x8jr4 +# field.cattle.io/publicEndpoints: '[{"addresses":["68.183.211.116"],"port":31726,"protocol":"TCP","serviceName":"staging:web-nodeport","allNodes":true},{"addresses":["104.248.25.205"],"port":80,"protocol":"HTTP","serviceName":"staging:ingress-ef72b2ceebfff95d50b0537c0e9e98d8","ingressName":"staging:web","hostname":"web.staging.104.248.25.205.xip.io","allNodes":true}]' +# creationTimestamp: 2018-11-30T13:56:41Z +# generation: 56 +# labels: +# cattle.io/creator: norman +# workload.user.cattle.io/workloadselector: deployment-staging-web +# name: web +# namespace: staging +# resourceVersion: "2401610" +# selfLink: /apis/extensions/v1beta1/namespaces/staging/deployments/web +# uid: c3870196-f4a7-11e8-943a-c6c288d5f6fa +# spec: +# progressDeadlineSeconds: 600 +# replicas: 1 +# revisionHistoryLimit: 10 +# selector: +# matchLabels: +# workload.user.cattle.io/workloadselector: deployment-staging-web +# strategy: +# rollingUpdate: +# maxSurge: 1 +# maxUnavailable: 0 +# type: RollingUpdate +# template: +# metadata: +# labels: +# workload.user.cattle.io/workloadselector: deployment-staging-web +# spec: +# containers: +# - env: +# - name: HOST +# value: 0.0.0.0 +# - name: JWT_SECRET +# valueFrom: +# secretKeyRef: +# key: JWT_SECRET +# name: jwt-secret +# optional: false +# - name: BACKEND_URL +# valueFrom: +# configMapKeyRef: +# key: GRAPHQL_URI +# name: staging-configs +# optional: false +# image: humanconnection/nitro-web:latest +# imagePullPolicy: Always +# name: web +# ports: +# - containerPort: 3000 +# name: 3000tcp01 +# protocol: TCP +# resources: {} +# securityContext: +# allowPrivilegeEscalation: false +# capabilities: {} +# privileged: false +# readOnlyRootFilesystem: false +# runAsNonRoot: false +# stdin: true +# terminationMessagePath: /dev/termination-log +# terminationMessagePolicy: File +# tty: true +# dnsPolicy: ClusterFirst +# restartPolicy: Always +# schedulerName: default-scheduler +# securityContext: {} +# terminationGracePeriodSeconds: 30 +# status: +# availableReplicas: 1 +# conditions: +# - lastTransitionTime: 2018-11-30T14:53:36Z +# lastUpdateTime: 2018-12-11T11:17:34Z +# message: ReplicaSet "web-5864d6db9c" has successfully progressed. +# reason: NewReplicaSetAvailable +# status: "True" +# type: Progressing +# - lastTransitionTime: 2018-12-11T11:23:17Z +# lastUpdateTime: 2018-12-11T11:23:17Z +# message: Deployment has minimum availability. +# reason: MinimumReplicasAvailable +# status: "True" +# type: Available +# observedGeneration: 56 +# readyReplicas: 1 +# replicas: 1 +# updatedReplicas: 1 +kind: List diff --git a/staging/neo4j-configmap.yaml b/staging/neo4j-configmap.yaml new file mode 100644 index 000000000..2f5ece848 --- /dev/null +++ b/staging/neo4j-configmap.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: ConfigMap +data: + NEO4J_URI: "bolt://neo4j:7687" + NEO4J_USER: "neo4j" + NEO4J_AUTH: none +metadata: + name: staging-neo4j + namespace: staging diff --git a/staging/neo4j-deployment.yaml b/staging/neo4j-deployment.yaml new file mode 100644 index 000000000..66cf5a966 --- /dev/null +++ b/staging/neo4j-deployment.yaml @@ -0,0 +1,50 @@ +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: nitro-neo4j + namespace: staging +spec: + replicas: 1 + strategy: {} + selector: + matchLabels: + workload.user.cattle.io/workloadselector: deployment-staging-neo4j + template: + metadata: + labels: + workload.user.cattle.io/workloadselector: deployment-staging-neo4j + name: "nitro-neo4j" + spec: + containers: + - env: + - name: NEO4J_dbms_memory_pagecache_size + value: 1G + - name: NEO4J_dbms_memory_heap_max__size + value: 1G + - name: NEO4J_AUTH + value: none + - name: NEO4J_URI + valueFrom: + configMapKeyRef: + name: staging-neo4j + key: NEO4J_URI + - name: NEO4J_USER + valueFrom: + configMapKeyRef: + name: staging-neo4j + key: NEO4J_USER + - name: NEO4J_AUTH + valueFrom: + configMapKeyRef: + name: staging-neo4j + key: NEO4J_AUTH + image: humanconnection/neo4j:latest + name: nitro-neo4j + ports: + - containerPort: 7687 + - containerPort: 7474 + # - containerPort: 7473 + resources: {} + imagePullPolicy: IfNotPresent + restartPolicy: Always +status: {} diff --git a/staging/neo4j-service.yaml b/staging/neo4j-service.yaml new file mode 100644 index 000000000..0f66d7474 --- /dev/null +++ b/staging/neo4j-service.yaml @@ -0,0 +1,22 @@ +apiVersion: v1 +kind: Service +metadata: + annotations: + field.cattle.io/ipAddresses: "null" + field.cattle.io/targetDnsRecordIds: "null" + field.cattle.io/targetWorkloadIds: '["deployment:staging:nitro-neo4j"]' + labels: + cattle.io/creator: norman + name: neo4j + namespace: staging +spec: + clusterIP: None + ports: + - name: default + port: 42 + protocol: TCP + targetPort: 42 + selector: + workloadID_neo4j: "true" + sessionAffinity: None + type: ClusterIP diff --git a/staging/secrets.yaml.template b/staging/secrets.yaml.template new file mode 100644 index 000000000..33b8e472b --- /dev/null +++ b/staging/secrets.yaml.template @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: Secret +data: + JWT_SECRET: "HgfiztF679FvIZtvv3" +metadata: + name: staging + namespace: staging diff --git a/staging/web-configmap.yaml b/staging/web-configmap.yaml new file mode 100644 index 000000000..a05140fb8 --- /dev/null +++ b/staging/web-configmap.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: ConfigMap +data: + CLIENT_URI: "https://nitro-staging.human-connection.org" +metadata: + name: staging-web + namespace: staging diff --git a/staging/web-deployment.yaml b/staging/web-deployment.yaml new file mode 100644 index 000000000..97b53a7bf --- /dev/null +++ b/staging/web-deployment.yaml @@ -0,0 +1,47 @@ +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: nitro-web + namespace: staging +spec: + replicas: 2 + minReadySeconds: 15 + progressDeadlineSeconds: 60 + # strategy: + # rollingUpdate: + # maxSurge: 1 + # maxUnavailable: 0 + # type: RollingUpdate + selector: + matchLabels: + workload.user.cattle.io/workloadselector: deployment-staging-web + template: + metadata: + labels: + workload.user.cattle.io/workloadselector: deployment-staging-web + name: nitro-web + spec: + containers: + - env: + - name: HOST + value: 0.0.0.0 + - name: BACKEND_URL + valueFrom: + configMapKeyRef: + name: staging-backend + key: GRAPHQL_URI + - name: JWT_SECRET + valueFrom: + secretKeyRef: + name: staging + key: JWT_SECRET + optional: false + image: humanconnection/nitro-web:latest + name: web + ports: + - containerPort: 3000 + resources: {} + imagePullPolicy: Always + restartPolicy: Always + terminationGracePeriodSeconds: 30 +status: {} From 1dc8e13dfa224c6f8a22ced71e90757034399dc8 Mon Sep 17 00:00:00 2001 From: Matt Rider Date: Tue, 22 Jan 2019 10:43:26 -0200 Subject: [PATCH 02/55] Add instructions to docs, fix kubectl commands --- README.md | 15 ++++++++++++--- namespace-staging.json | 10 ++++++++++ staging/secrets.yaml.template | 2 +- 3 files changed, 23 insertions(+), 4 deletions(-) create mode 100644 namespace-staging.json diff --git a/README.md b/README.md index fe28119cf..13f4e5ec5 100644 --- a/README.md +++ b/README.md @@ -12,14 +12,22 @@ > The dummy directory has some lb configurations that did not work properly on Digital Ocean but could be used as a starting point for getting it right +## Install Minikube, kubectl +There are many Kubernetes distributions, but if you're just getting started, Minikube is a tool that you can use to get your feet wet. + +[Install Minikube](https://kubernetes.io/docs/tasks/tools/install-minikube/) + +## Create a namespace locally +```shell +kubectl create -f namespace-staging.json +``` ## Apply the config map to staging namespace ```shell cd ./staging -kubectl apply -f configmap-neo4j.yaml -f configmap-backend.yaml -f configmap-web.yaml +kubectl apply -f neo4j-configmap.yaml -f backend-configmap.yaml -f web-configmap.yaml ``` - ## Setup secrets and deploy themn ```shell cd ./staging @@ -31,5 +39,6 @@ kubectl apply -f secrets.yaml ## Deploy the app ```shell cd ./staging -kubectl apply -f deployment-neo4j.yaml -f deployment-backend.yaml -f deployment-web.yaml +kubectl apply -f neo4j-deployment.yaml -f backend-deployment.yaml -f web-deployment.yaml ``` + \ No newline at end of file diff --git a/namespace-staging.json b/namespace-staging.json new file mode 100644 index 000000000..6b71bc772 --- /dev/null +++ b/namespace-staging.json @@ -0,0 +1,10 @@ +{ + "kind": "Namespace", + "apiVersion": "v1", + "metadata": { + "name": "staging", + "labels": { + "name": "staging" + } + } +} \ No newline at end of file diff --git a/staging/secrets.yaml.template b/staging/secrets.yaml.template index 33b8e472b..f8a4642a3 100644 --- a/staging/secrets.yaml.template +++ b/staging/secrets.yaml.template @@ -1,7 +1,7 @@ apiVersion: v1 kind: Secret data: - JWT_SECRET: "HgfiztF679FvIZtvv3" + JWT_SECRET: "aHVtYW5jb25uZWN0aW9uLWRlcGxveW1lbnQ=" metadata: name: staging namespace: staging From 687192d5213943431a25149a85ee255657da012d Mon Sep 17 00:00:00 2001 From: Grzegorz Leoniec Date: Fri, 25 Jan 2019 15:23:32 +0100 Subject: [PATCH 03/55] Added Mapbox token --- staging/backend-deployment.yaml | 5 +++++ staging/web-configmap.yaml | 1 + staging/web-deployment.yaml | 5 +++++ 3 files changed, 11 insertions(+) diff --git a/staging/backend-deployment.yaml b/staging/backend-deployment.yaml index ee0dfcef6..b5f379ddb 100644 --- a/staging/backend-deployment.yaml +++ b/staging/backend-deployment.yaml @@ -40,6 +40,11 @@ spec: configMapKeyRef: name: staging-backend key: GRAPHQL_URI + - name: MAPBOX_TOKEN + valueFrom: + configMapKeyRef: + name: staging-web + key: MAPBOX_TOKEN - name: JWT_SECRET valueFrom: secretKeyRef: diff --git a/staging/web-configmap.yaml b/staging/web-configmap.yaml index a05140fb8..1dbf5e25e 100644 --- a/staging/web-configmap.yaml +++ b/staging/web-configmap.yaml @@ -2,6 +2,7 @@ apiVersion: v1 kind: ConfigMap data: CLIENT_URI: "https://nitro-staging.human-connection.org" + MAPBOX_TOKEN: pk.eyJ1IjoiaHVtYW4tY29ubmVjdGlvbiIsImEiOiJjajl0cnBubGoweTVlM3VwZ2lzNTNud3ZtIn0.KZ8KK9l70omjXbEkkbHGsQ metadata: name: staging-web namespace: staging diff --git a/staging/web-deployment.yaml b/staging/web-deployment.yaml index 97b53a7bf..5cad7f039 100644 --- a/staging/web-deployment.yaml +++ b/staging/web-deployment.yaml @@ -30,6 +30,11 @@ spec: configMapKeyRef: name: staging-backend key: GRAPHQL_URI + - name: MAPBOX_TOKEN + valueFrom: + configMapKeyRef: + name: staging-web + key: MAPBOX_TOKEN - name: JWT_SECRET valueFrom: secretKeyRef: From 4f9f26a1da195c3da4fc8c29e6cd5539de96f8c5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Robert=20Sch=C3=A4fer?= Date: Fri, 25 Jan 2019 18:26:22 +0100 Subject: [PATCH 04/55] Add instructions to expose/access the services --- README.md | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 13f4e5ec5..db0007d92 100644 --- a/README.md +++ b/README.md @@ -41,4 +41,23 @@ kubectl apply -f secrets.yaml cd ./staging kubectl apply -f neo4j-deployment.yaml -f backend-deployment.yaml -f web-deployment.yaml ``` - \ No newline at end of file +This can take a while. +Sit back and relax and have a look into your minikube dashboard: +``` +minikube dashboard +``` +Wait until all pods turn green and they don't show a warning `Waiting: ContainerCreating` anymore. + +## Expose the services + +```shell +kubectl expose deployment nitro-backend --namespace=staging --type=LoadBalancer --port=4000 +kubectl expose deployment nitro-web --namespace=staging --type=LoadBalancer --port=3000 +``` + +## Access the service + +```shell +minikube service nitro-backend --namespace=staging +minikube service nitro-web --namespace=staging +``` From 7d7e34b1323801c6955b86c440da76ec1e64f490 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Robert=20Sch=C3=A4fer?= Date: Mon, 28 Jan 2019 18:33:21 +0100 Subject: [PATCH 05/55] Add deployment for db-migation-worker --- README.md | 11 +++- staging/.gitignore | 1 + ...b-migration-worker-configmap.template.yaml | 12 ++++ staging/db-migration-worker-deployment.yaml | 63 +++++++++++++++++++ 4 files changed, 85 insertions(+), 2 deletions(-) create mode 100644 staging/.gitignore create mode 100644 staging/db-migration-worker-configmap.template.yaml create mode 100644 staging/db-migration-worker-deployment.yaml diff --git a/README.md b/README.md index db0007d92..17fbe5780 100644 --- a/README.md +++ b/README.md @@ -22,10 +22,17 @@ There are many Kubernetes distributions, but if you're just getting started, Min kubectl create -f namespace-staging.json ``` +## Change config maps according to your needs +```shell +cd ./staging +cp db-migration-worker-configmap.template.yaml db-migration-worker-configmap.yaml +# edit all variables according to the setup of the remote legacy server +``` + ## Apply the config map to staging namespace ```shell cd ./staging -kubectl apply -f neo4j-configmap.yaml -f backend-configmap.yaml -f web-configmap.yaml +kubectl apply -f neo4j-configmap.yaml -f backend-configmap.yaml -f web-configmap.yaml -f db-migration-worker-configmap.yaml ``` ## Setup secrets and deploy themn @@ -39,7 +46,7 @@ kubectl apply -f secrets.yaml ## Deploy the app ```shell cd ./staging -kubectl apply -f neo4j-deployment.yaml -f backend-deployment.yaml -f web-deployment.yaml +kubectl apply -f neo4j-deployment.yaml -f backend-deployment.yaml -f web-deployment.yaml -f db-migration-worker-deployment.yaml ``` This can take a while. Sit back and relax and have a look into your minikube dashboard: diff --git a/staging/.gitignore b/staging/.gitignore new file mode 100644 index 000000000..d9c5e61bf --- /dev/null +++ b/staging/.gitignore @@ -0,0 +1 @@ +db-migration-worker-configmap.yaml diff --git a/staging/db-migration-worker-configmap.template.yaml b/staging/db-migration-worker-configmap.template.yaml new file mode 100644 index 000000000..e00077577 --- /dev/null +++ b/staging/db-migration-worker-configmap.template.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: ConfigMap +data: + SSH_USERNAME: "" + SSH_HOST: "" + MONGODB_USERNAME: "hc-api" + MONGODB_AUTH_DB: "hc_api" + MONGODB_DATABASE: "hc_api" + UPLOADS_DIRECTORY: "/var/www/api/uploads" +metadata: + name: staging-db-migration-worker + namespace: staging diff --git a/staging/db-migration-worker-deployment.yaml b/staging/db-migration-worker-deployment.yaml new file mode 100644 index 000000000..7b06dcb6d --- /dev/null +++ b/staging/db-migration-worker-deployment.yaml @@ -0,0 +1,63 @@ +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: nitro-db-migration-worker + namespace: staging +spec: + replicas: 1 + minReadySeconds: 15 + progressDeadlineSeconds: 60 + selector: + matchLabels: + workload.user.cattle.io/workloadselector: deployment-staging-db-migration-worker + template: + metadata: + labels: + workload.user.cattle.io/workloadselector: deployment-staging-db-migration-worker + name: "nitro-db-migration-worker" + spec: + containers: + - env: + - name: SSH_USERNAME + valueFrom: + configMapKeyRef: + name: staging-db-migration-worker + key: SSH_USERNAME + - name: SSH_HOST + valueFrom: + configMapKeyRef: + name: staging-db-migration-worker + key: SSH_HOST + - name: MONGODB_USERNAME + valueFrom: + configMapKeyRef: + name: staging-db-migration-worker + key: MONGODB_USERNAME + - name: MONGODB_AUTH_DB + valueFrom: + configMapKeyRef: + name: staging-db-migration-worker + key: MONGODB_AUTH_DB + - name: MONGODB_DATABASE + valueFrom: + configMapKeyRef: + name: staging-db-migration-worker + key: MONGODB_DATABASE + - name: UPLOADS_DIRECTORY + valueFrom: + configMapKeyRef: + name: staging-db-migration-worker + key: UPLOADS_DIRECTORY + - name: MONGODB_PASSWORD + valueFrom: + secretKeyRef: + name: staging + key: MONGODB_PASSWORD + optional: false + image: humanconnection/db-migration-worker:latest + name: nitro-db-migration-worker + resources: {} + imagePullPolicy: Always + restartPolicy: Always + terminationGracePeriodSeconds: 30 +status: {} From ad55cc03e521fc728a8174b53da86a2f7996683d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Robert=20Sch=C3=A4fer?= Date: Mon, 28 Jan 2019 18:46:52 +0100 Subject: [PATCH 06/55] Remove obsolete deployment files --- staging/deployment.yaml | 260 ---------------------------------------- 1 file changed, 260 deletions(-) delete mode 100644 staging/deployment.yaml diff --git a/staging/deployment.yaml b/staging/deployment.yaml deleted file mode 100644 index 9d68db535..000000000 --- a/staging/deployment.yaml +++ /dev/null @@ -1,260 +0,0 @@ -apiVersion: v1 -items: -- apiVersion: extensions/v1beta1 - kind: Deployment - metadata: - name: backend - namespace: staging - spec: - minReadySeconds: 15 - progressDeadlineSeconds: 60 - replicas: 1 - revisionHistoryLimit: 10 - selector: - matchLabels: - cattle.io/creator: norman - workload.user.cattle.io/workloadselector: deployment-staging-backend - strategy: - rollingUpdate: - maxSurge: 1 - maxUnavailable: 0 - type: RollingUpdate - template: - spec: - containers: - - env: - - name: MOCK - valueFrom: - configMapKeyRef: - key: MOCK - name: staging-backend - optional: false - - name: NEO4J_URI - valueFrom: - configMapKeyRef: - key: NEO4J_URI - name: staging-neo4j - optional: false - - name: JWT_SECRET - valueFrom: - secretKeyRef: - key: JWT_SECRET - name: staging - optional: false - - name: NEO4J_AUTH - valueFrom: - configMapKeyRef: - key: NEO4J_AUTH - name: staging-neo4j - optional: false - - name: CLIENT_URI - valueFrom: - configMapKeyRef: - key: CLIENT_URI - name: staging-web - optional: false - - name: GRAPHQL_PORT - valueFrom: - configMapKeyRef: - key: GRAPHQL_PORT - name: staging-backend - optional: false - - name: GRAPHQL_URI - valueFrom: - configMapKeyRef: - key: GRAPHQL_URI - name: staging-backend - optional: false - image: humanconnection/nitro-backend:latest - imagePullPolicy: Always - name: backend - resources: {} - tty: true - restartPolicy: Always - terminationGracePeriodSeconds: 30 -#- apiVersion: extensions/v1beta1 -# kind: Deployment -# metadata: -# annotations: -# deployment.kubernetes.io/revision: "2" -# field.cattle.io/creatorId: user-x8jr4 -# field.cattle.io/publicEndpoints: '[{"nodeName":"c-2kbhr:m-bmgq4","addresses":["104.248.30.130"],"port":7687,"protocol":"TCP","podName":"staging:neo4j-2-6589cbc4d5-q4bxl","allNodes":false},{"nodeName":"c-2kbhr:m-bmgq4","addresses":["104.248.30.130"],"port":7474,"protocol":"TCP","podName":"staging:neo4j-2-6589cbc4d5-q4bxl","allNodes":false},{"nodeName":"c-2kbhr:m-bmgq4","addresses":["104.248.30.130"],"port":7473,"protocol":"TCP","podName":"staging:neo4j-2-6589cbc4d5-q4bxl","allNodes":false}]' -# creationTimestamp: 2018-12-10T19:07:58Z -# generation: 8 -# labels: -# cattle.io/creator: norman -# workload.user.cattle.io/workloadselector: deployment-staging-neo4j-2 -# name: neo4j-2 -# namespace: staging -# resourceVersion: "2380945" -# selfLink: /apis/extensions/v1beta1/namespaces/staging/deployments/neo4j-2 -# uid: e80460f6-fcae-11e8-943a-c6c288d5f6fa -# spec: -# progressDeadlineSeconds: 600 -# replicas: 1 -# revisionHistoryLimit: 10 -# selector: -# matchLabels: -# workload.user.cattle.io/workloadselector: deployment-staging-neo4j-2 -# strategy: -# rollingUpdate: -# maxSurge: 1 -# maxUnavailable: 0 -# type: RollingUpdate -# template: -# metadata: -# annotations: -# cattle.io/timestamp: 2018-12-11T11:11:09Z -# field.cattle.io/ports: '[[{"containerPort":7687,"dnsName":"neo4j-2-hostport","hostPort":7687,"kind":"HostPort","name":"7687tcp76870","protocol":"TCP","sourcePort":7687},{"containerPort":7474,"dnsName":"neo4j-2-hostport","hostPort":7474,"kind":"HostPort","name":"7474tcp74740","protocol":"TCP","sourcePort":7474},{"containerPort":7473,"dnsName":"neo4j-2-hostport","hostPort":7473,"kind":"HostPort","name":"7473tcp74730","protocol":"TCP","sourcePort":7473}]]' -# creationTimestamp: null -# labels: -# workload.user.cattle.io/workloadselector: deployment-staging-neo4j-2 -# spec: -# containers: -# - env: -# - name: NEO4J_AUTH -# value: none -# image: humanconnection/neo4j:latest -# imagePullPolicy: IfNotPresent -# name: neo4j-2 -# ports: -# - containerPort: 7687 -# hostPort: 7687 -# name: 7687tcp76870 -# protocol: TCP -# - containerPort: 7474 -# hostPort: 7474 -# name: 7474tcp74740 -# protocol: TCP -# - containerPort: 7473 -# hostPort: 7473 -# name: 7473tcp74730 -# protocol: TCP -# resources: {} -# securityContext: -# allowPrivilegeEscalation: false -# capabilities: {} -# privileged: false -# readOnlyRootFilesystem: false -# runAsNonRoot: false -# stdin: true -# terminationMessagePath: /dev/termination-log -# terminationMessagePolicy: File -# tty: true -# dnsPolicy: ClusterFirst -# restartPolicy: Always -# schedulerName: default-scheduler -# securityContext: {} -# terminationGracePeriodSeconds: 30 -# status: -# availableReplicas: 1 -# conditions: -# - lastTransitionTime: 2018-12-10T19:07:58Z -# lastUpdateTime: 2018-12-11T11:11:18Z -# message: ReplicaSet "neo4j-2-6589cbc4d5" has successfully progressed. -# reason: NewReplicaSetAvailable -# status: "True" -# type: Progressing -# - lastTransitionTime: 2018-12-11T12:12:41Z -# lastUpdateTime: 2018-12-11T12:12:41Z -# message: Deployment has minimum availability. -# reason: MinimumReplicasAvailable -# status: "True" -# type: Available -# observedGeneration: 8 -# readyReplicas: 1 -# replicas: 1 -# updatedReplicas: 1 -##- apiVersion: extensions/v1beta1 -# kind: Deployment -# metadata: -# annotations: -# deployment.kubernetes.io/revision: "15" -# field.cattle.io/creatorId: user-x8jr4 -# field.cattle.io/publicEndpoints: '[{"addresses":["68.183.211.116"],"port":31726,"protocol":"TCP","serviceName":"staging:web-nodeport","allNodes":true},{"addresses":["104.248.25.205"],"port":80,"protocol":"HTTP","serviceName":"staging:ingress-ef72b2ceebfff95d50b0537c0e9e98d8","ingressName":"staging:web","hostname":"web.staging.104.248.25.205.xip.io","allNodes":true}]' -# creationTimestamp: 2018-11-30T13:56:41Z -# generation: 56 -# labels: -# cattle.io/creator: norman -# workload.user.cattle.io/workloadselector: deployment-staging-web -# name: web -# namespace: staging -# resourceVersion: "2401610" -# selfLink: /apis/extensions/v1beta1/namespaces/staging/deployments/web -# uid: c3870196-f4a7-11e8-943a-c6c288d5f6fa -# spec: -# progressDeadlineSeconds: 600 -# replicas: 1 -# revisionHistoryLimit: 10 -# selector: -# matchLabels: -# workload.user.cattle.io/workloadselector: deployment-staging-web -# strategy: -# rollingUpdate: -# maxSurge: 1 -# maxUnavailable: 0 -# type: RollingUpdate -# template: -# metadata: -# labels: -# workload.user.cattle.io/workloadselector: deployment-staging-web -# spec: -# containers: -# - env: -# - name: HOST -# value: 0.0.0.0 -# - name: JWT_SECRET -# valueFrom: -# secretKeyRef: -# key: JWT_SECRET -# name: jwt-secret -# optional: false -# - name: BACKEND_URL -# valueFrom: -# configMapKeyRef: -# key: GRAPHQL_URI -# name: staging-configs -# optional: false -# image: humanconnection/nitro-web:latest -# imagePullPolicy: Always -# name: web -# ports: -# - containerPort: 3000 -# name: 3000tcp01 -# protocol: TCP -# resources: {} -# securityContext: -# allowPrivilegeEscalation: false -# capabilities: {} -# privileged: false -# readOnlyRootFilesystem: false -# runAsNonRoot: false -# stdin: true -# terminationMessagePath: /dev/termination-log -# terminationMessagePolicy: File -# tty: true -# dnsPolicy: ClusterFirst -# restartPolicy: Always -# schedulerName: default-scheduler -# securityContext: {} -# terminationGracePeriodSeconds: 30 -# status: -# availableReplicas: 1 -# conditions: -# - lastTransitionTime: 2018-11-30T14:53:36Z -# lastUpdateTime: 2018-12-11T11:17:34Z -# message: ReplicaSet "web-5864d6db9c" has successfully progressed. -# reason: NewReplicaSetAvailable -# status: "True" -# type: Progressing -# - lastTransitionTime: 2018-12-11T11:23:17Z -# lastUpdateTime: 2018-12-11T11:23:17Z -# message: Deployment has minimum availability. -# reason: MinimumReplicasAvailable -# status: "True" -# type: Available -# observedGeneration: 56 -# readyReplicas: 1 -# replicas: 1 -# updatedReplicas: 1 -kind: List From 8a74f452a9a71ad2a30e5f4117aa1e71f5fc3c4f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Robert=20Sch=C3=A4fer?= Date: Wed, 30 Jan 2019 19:18:23 +0100 Subject: [PATCH 07/55] Create persistent volumes for minikube Running the import works in minikube! --- README.md | 18 ++++++++++++++++++ staging/db-migration-worker-deployment.yaml | 12 ++++++++++++ staging/neo4j-deployment.yaml | 7 +++++++ staging/volumes/mongo-export-claim.yaml | 11 +++++++++++ staging/volumes/mongo-export-volume.yaml | 12 ++++++++++++ staging/volumes/ssh-keys-volume-claim.yml | 11 +++++++++++ staging/volumes/ssh-keys-volume.yaml | 12 ++++++++++++ staging/volumes/uploads-claim.yaml | 11 +++++++++++ staging/volumes/uploads-volume.yaml | 12 ++++++++++++ 9 files changed, 106 insertions(+) create mode 100644 staging/volumes/mongo-export-claim.yaml create mode 100644 staging/volumes/mongo-export-volume.yaml create mode 100644 staging/volumes/ssh-keys-volume-claim.yml create mode 100644 staging/volumes/ssh-keys-volume.yaml create mode 100644 staging/volumes/uploads-claim.yaml create mode 100644 staging/volumes/uploads-volume.yaml diff --git a/README.md b/README.md index 17fbe5780..29faf84d5 100644 --- a/README.md +++ b/README.md @@ -46,6 +46,7 @@ kubectl apply -f secrets.yaml ## Deploy the app ```shell cd ./staging +kubectl apply -f ./volumes kubectl apply -f neo4j-deployment.yaml -f backend-deployment.yaml -f web-deployment.yaml -f db-migration-worker-deployment.yaml ``` This can take a while. @@ -68,3 +69,20 @@ kubectl expose deployment nitro-web --namespace=staging --type=LoadBalancer minikube service nitro-backend --namespace=staging minikube service nitro-web --namespace=staging ``` + + +## Provisioning db-migration-worker +Copy your private ssh key and the `.known-hosts` file of your remote legacy server. +```shell + +# check the corresponding db-migration-worker pod +kubectl --namespace=staging get pods +# change below +kubectl cp path/to/your/ssh/keys/folder staging/nitro-db-migration-worker-:/root/ +``` + +Run the migration: +```shell +# change below +kubectl --namespace=staging exec -it nitro-db-migration-worker- ./import.sh +``` diff --git a/staging/db-migration-worker-deployment.yaml b/staging/db-migration-worker-deployment.yaml index 7b06dcb6d..509f98093 100644 --- a/staging/db-migration-worker-deployment.yaml +++ b/staging/db-migration-worker-deployment.yaml @@ -58,6 +58,18 @@ spec: name: nitro-db-migration-worker resources: {} imagePullPolicy: Always + volumeMounts: + - mountPath: /root/ + name: ssh-keys-directory + - mountPath: /mongo-export/ + name: mongo-export restartPolicy: Always + volumes: + - name: ssh-keys-directory + persistentVolumeClaim: + claimName: ssh-keys-claim + - name: mongo-export + persistentVolumeClaim: + claimName: mongo-export-claim terminationGracePeriodSeconds: 30 status: {} diff --git a/staging/neo4j-deployment.yaml b/staging/neo4j-deployment.yaml index 66cf5a966..e8268b047 100644 --- a/staging/neo4j-deployment.yaml +++ b/staging/neo4j-deployment.yaml @@ -46,5 +46,12 @@ spec: # - containerPort: 7473 resources: {} imagePullPolicy: IfNotPresent + volumeMounts: + - mountPath: /mongo-export/ + name: mongo-export restartPolicy: Always + volumes: + - name: mongo-export + persistentVolumeClaim: + claimName: mongo-export-claim status: {} diff --git a/staging/volumes/mongo-export-claim.yaml b/staging/volumes/mongo-export-claim.yaml new file mode 100644 index 000000000..1c91996db --- /dev/null +++ b/staging/volumes/mongo-export-claim.yaml @@ -0,0 +1,11 @@ +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: mongo-export-claim + namespace: staging +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 1Gi diff --git a/staging/volumes/mongo-export-volume.yaml b/staging/volumes/mongo-export-volume.yaml new file mode 100644 index 000000000..945c28765 --- /dev/null +++ b/staging/volumes/mongo-export-volume.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: mongo-export-volume + namespace: staging +spec: + accessModes: + - ReadWriteMany + capacity: + storage: 1Gi + hostPath: + path: /data/shared/mongo-exports/ diff --git a/staging/volumes/ssh-keys-volume-claim.yml b/staging/volumes/ssh-keys-volume-claim.yml new file mode 100644 index 000000000..30191a500 --- /dev/null +++ b/staging/volumes/ssh-keys-volume-claim.yml @@ -0,0 +1,11 @@ +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: ssh-keys-claim + namespace: staging +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Mi diff --git a/staging/volumes/ssh-keys-volume.yaml b/staging/volumes/ssh-keys-volume.yaml new file mode 100644 index 000000000..9c0353b74 --- /dev/null +++ b/staging/volumes/ssh-keys-volume.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: ssh-keys-volume + namespace: staging +spec: + accessModes: + - ReadWriteOnce + capacity: + storage: 1Mi + hostPath: + path: /data/pv0001/ diff --git a/staging/volumes/uploads-claim.yaml b/staging/volumes/uploads-claim.yaml new file mode 100644 index 000000000..c1b11ed4e --- /dev/null +++ b/staging/volumes/uploads-claim.yaml @@ -0,0 +1,11 @@ +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: uploads-claim + namespace: staging +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 8Gi diff --git a/staging/volumes/uploads-volume.yaml b/staging/volumes/uploads-volume.yaml new file mode 100644 index 000000000..4600a76a4 --- /dev/null +++ b/staging/volumes/uploads-volume.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: uploads-volume + namespace: staging +spec: + accessModes: + - ReadWriteMany + capacity: + storage: 8Gi + hostPath: + path: /data/shared/uploads/ From 330fd9a8e9b0d95e833e91aa62cfdc887ed6c354 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Robert=20Sch=C3=A4fer?= Date: Wed, 30 Jan 2019 20:15:40 +0100 Subject: [PATCH 08/55] Enable apoc to run neo4j import --- staging/neo4j-deployment.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/staging/neo4j-deployment.yaml b/staging/neo4j-deployment.yaml index e8268b047..2b3485c77 100644 --- a/staging/neo4j-deployment.yaml +++ b/staging/neo4j-deployment.yaml @@ -17,6 +17,8 @@ spec: spec: containers: - env: + - name: NEO4J_apoc_import_file_enabled + value: "true" - name: NEO4J_dbms_memory_pagecache_size value: 1G - name: NEO4J_dbms_memory_heap_max__size From 5cd0485117061624070e1758435e1816f8d022e9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Robert=20Sch=C3=A4fer?= Date: Thu, 31 Jan 2019 18:19:19 +0100 Subject: [PATCH 09/55] Point the backend to the local neo4j service --- README.md | 3 +-- services/backend.yml | 22 ++++++++++++++++++++++ services/neo4j.yml | 23 +++++++++++++++++++++++ services/web.yml | 21 +++++++++++++++++++++ services/webapp.yml | 21 +++++++++++++++++++++ staging/backend-deployment.yaml | 5 ----- staging/neo4j-configmap.yaml | 2 +- staging/neo4j-service.yaml | 22 ---------------------- 8 files changed, 89 insertions(+), 30 deletions(-) create mode 100644 services/backend.yml create mode 100644 services/neo4j.yml create mode 100644 services/web.yml create mode 100644 services/webapp.yml delete mode 100644 staging/neo4j-service.yaml diff --git a/README.md b/README.md index 29faf84d5..00ddea2d4 100644 --- a/README.md +++ b/README.md @@ -59,8 +59,7 @@ Wait until all pods turn green and they don't show a warning `Waiting: Container ## Expose the services ```shell -kubectl expose deployment nitro-backend --namespace=staging --type=LoadBalancer --port=4000 -kubectl expose deployment nitro-web --namespace=staging --type=LoadBalancer --port=3000 +kubectl create -f services/ ``` ## Access the service diff --git a/services/backend.yml b/services/backend.yml new file mode 100644 index 000000000..0d4246275 --- /dev/null +++ b/services/backend.yml @@ -0,0 +1,22 @@ +apiVersion: v1 +kind: Service +metadata: + name: nitro-backend + namespace: staging + labels: + workload.user.cattle.io/workloadselector: deployment-staging-backend +spec: + ports: + - name: web + protocol: TCP + port: 4000 + targetPort: 4000 + nodePort: 32612 + selector: + workload.user.cattle.io/workloadselector: deployment-staging-backend + type: LoadBalancer + sessionAffinity: None + externalTrafficPolicy: Cluster +status: + loadBalancer: {} + diff --git a/services/neo4j.yml b/services/neo4j.yml new file mode 100644 index 000000000..681ba2e40 --- /dev/null +++ b/services/neo4j.yml @@ -0,0 +1,23 @@ +apiVersion: v1 +kind: Service +metadata: + name: neo4j + namespace: staging + labels: + workload.user.cattle.io/workloadselector: deployment-staging-neo4j +spec: + selector: + workload.user.cattle.io/workloadselector: deployment-staging-neo4j + ports: + - name: bolt + protocol: TCP + port: 7687 + targetPort: 7687 + - name: web + protocol: TCP + port: 7474 + targetPort: 7474 + type: LoadBalancer + sessionAffinity: None + type: ClusterIP + diff --git a/services/web.yml b/services/web.yml new file mode 100644 index 000000000..a46e27dea --- /dev/null +++ b/services/web.yml @@ -0,0 +1,21 @@ +apiVersion: v1 +kind: Service +metadata: + name: nitro-web + namespace: staging + labels: + workload.user.cattle.io/workloadselector: deployment-staging-web +spec: + ports: + - name: web + protocol: "TCP" + port: 3000 + targetPort: 3000 + selector: + workload.user.cattle.io/workloadselector: deployment-staging-web + type: LoadBalancer + sessionAffinity: None + externalTrafficPolicy: Cluster +status: + loadBalancer: {} + diff --git a/services/webapp.yml b/services/webapp.yml new file mode 100644 index 000000000..a46e27dea --- /dev/null +++ b/services/webapp.yml @@ -0,0 +1,21 @@ +apiVersion: v1 +kind: Service +metadata: + name: nitro-web + namespace: staging + labels: + workload.user.cattle.io/workloadselector: deployment-staging-web +spec: + ports: + - name: web + protocol: "TCP" + port: 3000 + targetPort: 3000 + selector: + workload.user.cattle.io/workloadselector: deployment-staging-web + type: LoadBalancer + sessionAffinity: None + externalTrafficPolicy: Cluster +status: + loadBalancer: {} + diff --git a/staging/backend-deployment.yaml b/staging/backend-deployment.yaml index b5f379ddb..f2ebffb9c 100644 --- a/staging/backend-deployment.yaml +++ b/staging/backend-deployment.yaml @@ -7,11 +7,6 @@ spec: replicas: 2 minReadySeconds: 15 progressDeadlineSeconds: 60 - # strategy: - # rollingUpdate: - # maxSurge: 1 - # maxUnavailable: 0 - # type: RollingUpdate selector: matchLabels: workload.user.cattle.io/workloadselector: deployment-staging-backend diff --git a/staging/neo4j-configmap.yaml b/staging/neo4j-configmap.yaml index 2f5ece848..78d1ba3cd 100644 --- a/staging/neo4j-configmap.yaml +++ b/staging/neo4j-configmap.yaml @@ -1,7 +1,7 @@ apiVersion: v1 kind: ConfigMap data: - NEO4J_URI: "bolt://neo4j:7687" + NEO4J_URI: "bolt://neo4j.staging:7687" NEO4J_USER: "neo4j" NEO4J_AUTH: none metadata: diff --git a/staging/neo4j-service.yaml b/staging/neo4j-service.yaml deleted file mode 100644 index 0f66d7474..000000000 --- a/staging/neo4j-service.yaml +++ /dev/null @@ -1,22 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - annotations: - field.cattle.io/ipAddresses: "null" - field.cattle.io/targetDnsRecordIds: "null" - field.cattle.io/targetWorkloadIds: '["deployment:staging:nitro-neo4j"]' - labels: - cattle.io/creator: norman - name: neo4j - namespace: staging -spec: - clusterIP: None - ports: - - name: default - port: 42 - protocol: TCP - targetPort: 42 - selector: - workloadID_neo4j: "true" - sessionAffinity: None - type: ClusterIP From bbfe39e0766ce01f35aad45ce7c869b10d0979a5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Robert=20Sch=C3=A4fer?= Date: Thu, 31 Jan 2019 23:24:20 +0100 Subject: [PATCH 10/55] Use folders for groups of YAML files This keeps our configuration DRY and helps us to save keystrokes. --- README.md | 23 ++++++++--------- config/.gitignore | 1 + .../backend.yml | 0 .../neo4j-configmap.yaml => config/neo4j.yml | 0 staging/web-configmap.yaml => config/web.yml | 0 ...e.yaml => db-migration-worker.template.yml | 0 .../backend.yml | 0 .../db-migration-worker.yml | 0 .../neo4j.yml | 0 .../web.yml | 0 dummies/backend-service.yaml | 13 ---------- dummies/do-loadbalancer.yaml | 12 --------- dummies/ingress-backend.yaml | 15 ----------- dummies/nginx.yaml | 22 ---------------- dummies/web-service.yaml | 13 ---------- namespace-staging.json | 10 -------- namespace-staging.yml | 6 +++++ ...rets.yaml.template => secrets.template.yml | 0 services/backend.yml | 2 -- services/neo4j.yml | 2 -- services/web.yml | 1 - services/webapp.yml | 21 ---------------- staging/.gitignore | 1 - staging/volumes/mongo-export-claim.yaml | 11 -------- staging/volumes/mongo-export-volume.yaml | 12 --------- staging/volumes/ssh-keys-volume-claim.yml | 11 -------- staging/volumes/ssh-keys-volume.yaml | 12 --------- staging/volumes/uploads-claim.yaml | 11 -------- staging/volumes/uploads-volume.yaml | 12 --------- volumes/mongo-export.yml | 25 +++++++++++++++++++ volumes/ssh-keys.yml | 25 +++++++++++++++++++ volumes/uploads.yml | 25 +++++++++++++++++++ 32 files changed, 93 insertions(+), 193 deletions(-) create mode 100644 config/.gitignore rename staging/backend-configmap.yaml => config/backend.yml (100%) rename staging/neo4j-configmap.yaml => config/neo4j.yml (100%) rename staging/web-configmap.yaml => config/web.yml (100%) rename staging/db-migration-worker-configmap.template.yaml => db-migration-worker.template.yml (100%) rename staging/backend-deployment.yaml => deployments/backend.yml (100%) rename staging/db-migration-worker-deployment.yaml => deployments/db-migration-worker.yml (100%) rename staging/neo4j-deployment.yaml => deployments/neo4j.yml (100%) rename staging/web-deployment.yaml => deployments/web.yml (100%) delete mode 100644 dummies/backend-service.yaml delete mode 100644 dummies/do-loadbalancer.yaml delete mode 100644 dummies/ingress-backend.yaml delete mode 100644 dummies/nginx.yaml delete mode 100644 dummies/web-service.yaml delete mode 100644 namespace-staging.json create mode 100644 namespace-staging.yml rename staging/secrets.yaml.template => secrets.template.yml (100%) delete mode 100644 services/webapp.yml delete mode 100644 staging/.gitignore delete mode 100644 staging/volumes/mongo-export-claim.yaml delete mode 100644 staging/volumes/mongo-export-volume.yaml delete mode 100644 staging/volumes/ssh-keys-volume-claim.yml delete mode 100644 staging/volumes/ssh-keys-volume.yaml delete mode 100644 staging/volumes/uploads-claim.yaml delete mode 100644 staging/volumes/uploads-volume.yaml create mode 100644 volumes/mongo-export.yml create mode 100644 volumes/ssh-keys.yml create mode 100644 volumes/uploads.yml diff --git a/README.md b/README.md index 00ddea2d4..48d644622 100644 --- a/README.md +++ b/README.md @@ -19,35 +19,33 @@ There are many Kubernetes distributions, but if you're just getting started, Min ## Create a namespace locally ```shell -kubectl create -f namespace-staging.json +kubectl create -f namespace-staging.yml ``` ## Change config maps according to your needs ```shell -cd ./staging +cd config/ cp db-migration-worker-configmap.template.yaml db-migration-worker-configmap.yaml # edit all variables according to the setup of the remote legacy server +cd .. ``` ## Apply the config map to staging namespace ```shell -cd ./staging -kubectl apply -f neo4j-configmap.yaml -f backend-configmap.yaml -f web-configmap.yaml -f db-migration-worker-configmap.yaml +kubectl apply -f config/ ``` ## Setup secrets and deploy themn ```shell -cd ./staging cp secrets.yaml.template secrets.yaml # change all vars as needed and deploy it afterwards kubectl apply -f secrets.yaml ``` -## Deploy the app +## Create volumes and deployments ```shell -cd ./staging -kubectl apply -f ./volumes -kubectl apply -f neo4j-deployment.yaml -f backend-deployment.yaml -f web-deployment.yaml -f db-migration-worker-deployment.yaml +kubectl apply -f volumes/ +kubectl apply -f deployments/ ``` This can take a while. Sit back and relax and have a look into your minikube dashboard: @@ -59,7 +57,7 @@ Wait until all pods turn green and they don't show a warning `Waiting: Container ## Expose the services ```shell -kubectl create -f services/ +kubectl apply -f services/ ``` ## Access the service @@ -77,11 +75,12 @@ Copy your private ssh key and the `.known-hosts` file of your remote legacy serv # check the corresponding db-migration-worker pod kubectl --namespace=staging get pods # change below -kubectl cp path/to/your/ssh/keys/folder staging/nitro-db-migration-worker-:/root/ +kubectl cp path/to/your/ssh/keys/.ssh staging/nitro-db-migration-worker-:/root/ ``` Run the migration: ```shell -# change below +# change below kubectl --namespace=staging exec -it nitro-db-migration-worker- ./import.sh +kubectl --namespace=staging exec -it nitro-neo4j- ./import/import.sh ``` diff --git a/config/.gitignore b/config/.gitignore new file mode 100644 index 000000000..6fe22561d --- /dev/null +++ b/config/.gitignore @@ -0,0 +1 @@ +db-migration-worker.yml diff --git a/staging/backend-configmap.yaml b/config/backend.yml similarity index 100% rename from staging/backend-configmap.yaml rename to config/backend.yml diff --git a/staging/neo4j-configmap.yaml b/config/neo4j.yml similarity index 100% rename from staging/neo4j-configmap.yaml rename to config/neo4j.yml diff --git a/staging/web-configmap.yaml b/config/web.yml similarity index 100% rename from staging/web-configmap.yaml rename to config/web.yml diff --git a/staging/db-migration-worker-configmap.template.yaml b/db-migration-worker.template.yml similarity index 100% rename from staging/db-migration-worker-configmap.template.yaml rename to db-migration-worker.template.yml diff --git a/staging/backend-deployment.yaml b/deployments/backend.yml similarity index 100% rename from staging/backend-deployment.yaml rename to deployments/backend.yml diff --git a/staging/db-migration-worker-deployment.yaml b/deployments/db-migration-worker.yml similarity index 100% rename from staging/db-migration-worker-deployment.yaml rename to deployments/db-migration-worker.yml diff --git a/staging/neo4j-deployment.yaml b/deployments/neo4j.yml similarity index 100% rename from staging/neo4j-deployment.yaml rename to deployments/neo4j.yml diff --git a/staging/web-deployment.yaml b/deployments/web.yml similarity index 100% rename from staging/web-deployment.yaml rename to deployments/web.yml diff --git a/dummies/backend-service.yaml b/dummies/backend-service.yaml deleted file mode 100644 index 48fffbc24..000000000 --- a/dummies/backend-service.yaml +++ /dev/null @@ -1,13 +0,0 @@ -kind: Service -apiVersion: v1 -metadata: - labels: - k8s-app: nitro-backend - name: nitro-backend - namespace: staging -spec: - ports: - - port: 4000 - targetPort: 4000 - selector: - k8s-app: nitro-backend diff --git a/dummies/do-loadbalancer.yaml b/dummies/do-loadbalancer.yaml deleted file mode 100644 index 9c700e082..000000000 --- a/dummies/do-loadbalancer.yaml +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: sample-load-balancer - namespace: staging -spec: - type: LoadBalancer - ports: - - protocol: TCP - port: 80 - targetPort: 80 - name: http diff --git a/dummies/ingress-backend.yaml b/dummies/ingress-backend.yaml deleted file mode 100644 index 0640b49fd..000000000 --- a/dummies/ingress-backend.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: backend-ingress - namespace: staging - annotations: - nginx.ingress.kubernetes.io/rewrite-target: / -spec: - rules: - - http: - paths: - - path: / - backend: - serviceName: backend - servicePort: 4000 diff --git a/dummies/nginx.yaml b/dummies/nginx.yaml deleted file mode 100644 index 1f5136b4b..000000000 --- a/dummies/nginx.yaml +++ /dev/null @@ -1,22 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: ingress-nginx - namespace: staging - labels: - app.kubernetes.io/name: ingress-nginx - app.kubernetes.io/part-of: ingress-nginx -spec: - type: NodePort - ports: - - name: http - port: 80 - targetPort: 80 - protocol: TCP - - name: https - port: 443 - targetPort: 443 - protocol: TCP - selector: - app.kubernetes.io/name: ingress-nginx - app.kubernetes.io/part-of: ingress-nginx diff --git a/dummies/web-service.yaml b/dummies/web-service.yaml deleted file mode 100644 index 847ba3c05..000000000 --- a/dummies/web-service.yaml +++ /dev/null @@ -1,13 +0,0 @@ -kind: Service -apiVersion: v1 -metadata: - labels: - k8s-app: nitro-web - name: nitro-web - namespace: staging -spec: - ports: - - port: 3000 - targetPort: 3000 - selector: - k8s-app: nitro-web diff --git a/namespace-staging.json b/namespace-staging.json deleted file mode 100644 index 6b71bc772..000000000 --- a/namespace-staging.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "kind": "Namespace", - "apiVersion": "v1", - "metadata": { - "name": "staging", - "labels": { - "name": "staging" - } - } -} \ No newline at end of file diff --git a/namespace-staging.yml b/namespace-staging.yml new file mode 100644 index 000000000..d63b4e0f9 --- /dev/null +++ b/namespace-staging.yml @@ -0,0 +1,6 @@ +kind: Namespace +apiVersion: v1 +metadata: + name: staging + labels: + name: staging diff --git a/staging/secrets.yaml.template b/secrets.template.yml similarity index 100% rename from staging/secrets.yaml.template rename to secrets.template.yml diff --git a/services/backend.yml b/services/backend.yml index 0d4246275..d4f01286a 100644 --- a/services/backend.yml +++ b/services/backend.yml @@ -8,10 +8,8 @@ metadata: spec: ports: - name: web - protocol: TCP port: 4000 targetPort: 4000 - nodePort: 32612 selector: workload.user.cattle.io/workloadselector: deployment-staging-backend type: LoadBalancer diff --git a/services/neo4j.yml b/services/neo4j.yml index 681ba2e40..e071f78bb 100644 --- a/services/neo4j.yml +++ b/services/neo4j.yml @@ -10,11 +10,9 @@ spec: workload.user.cattle.io/workloadselector: deployment-staging-neo4j ports: - name: bolt - protocol: TCP port: 7687 targetPort: 7687 - name: web - protocol: TCP port: 7474 targetPort: 7474 type: LoadBalancer diff --git a/services/web.yml b/services/web.yml index a46e27dea..e1bd542f1 100644 --- a/services/web.yml +++ b/services/web.yml @@ -8,7 +8,6 @@ metadata: spec: ports: - name: web - protocol: "TCP" port: 3000 targetPort: 3000 selector: diff --git a/services/webapp.yml b/services/webapp.yml deleted file mode 100644 index a46e27dea..000000000 --- a/services/webapp.yml +++ /dev/null @@ -1,21 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: nitro-web - namespace: staging - labels: - workload.user.cattle.io/workloadselector: deployment-staging-web -spec: - ports: - - name: web - protocol: "TCP" - port: 3000 - targetPort: 3000 - selector: - workload.user.cattle.io/workloadselector: deployment-staging-web - type: LoadBalancer - sessionAffinity: None - externalTrafficPolicy: Cluster -status: - loadBalancer: {} - diff --git a/staging/.gitignore b/staging/.gitignore deleted file mode 100644 index d9c5e61bf..000000000 --- a/staging/.gitignore +++ /dev/null @@ -1 +0,0 @@ -db-migration-worker-configmap.yaml diff --git a/staging/volumes/mongo-export-claim.yaml b/staging/volumes/mongo-export-claim.yaml deleted file mode 100644 index 1c91996db..000000000 --- a/staging/volumes/mongo-export-claim.yaml +++ /dev/null @@ -1,11 +0,0 @@ -kind: PersistentVolumeClaim -apiVersion: v1 -metadata: - name: mongo-export-claim - namespace: staging -spec: - accessModes: - - ReadWriteMany - resources: - requests: - storage: 1Gi diff --git a/staging/volumes/mongo-export-volume.yaml b/staging/volumes/mongo-export-volume.yaml deleted file mode 100644 index 945c28765..000000000 --- a/staging/volumes/mongo-export-volume.yaml +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: v1 -kind: PersistentVolume -metadata: - name: mongo-export-volume - namespace: staging -spec: - accessModes: - - ReadWriteMany - capacity: - storage: 1Gi - hostPath: - path: /data/shared/mongo-exports/ diff --git a/staging/volumes/ssh-keys-volume-claim.yml b/staging/volumes/ssh-keys-volume-claim.yml deleted file mode 100644 index 30191a500..000000000 --- a/staging/volumes/ssh-keys-volume-claim.yml +++ /dev/null @@ -1,11 +0,0 @@ -kind: PersistentVolumeClaim -apiVersion: v1 -metadata: - name: ssh-keys-claim - namespace: staging -spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 1Mi diff --git a/staging/volumes/ssh-keys-volume.yaml b/staging/volumes/ssh-keys-volume.yaml deleted file mode 100644 index 9c0353b74..000000000 --- a/staging/volumes/ssh-keys-volume.yaml +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: v1 -kind: PersistentVolume -metadata: - name: ssh-keys-volume - namespace: staging -spec: - accessModes: - - ReadWriteOnce - capacity: - storage: 1Mi - hostPath: - path: /data/pv0001/ diff --git a/staging/volumes/uploads-claim.yaml b/staging/volumes/uploads-claim.yaml deleted file mode 100644 index c1b11ed4e..000000000 --- a/staging/volumes/uploads-claim.yaml +++ /dev/null @@ -1,11 +0,0 @@ -kind: PersistentVolumeClaim -apiVersion: v1 -metadata: - name: uploads-claim - namespace: staging -spec: - accessModes: - - ReadWriteMany - resources: - requests: - storage: 8Gi diff --git a/staging/volumes/uploads-volume.yaml b/staging/volumes/uploads-volume.yaml deleted file mode 100644 index 4600a76a4..000000000 --- a/staging/volumes/uploads-volume.yaml +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: v1 -kind: PersistentVolume -metadata: - name: uploads-volume - namespace: staging -spec: - accessModes: - - ReadWriteMany - capacity: - storage: 8Gi - hostPath: - path: /data/shared/uploads/ diff --git a/volumes/mongo-export.yml b/volumes/mongo-export.yml new file mode 100644 index 000000000..a5ef064cc --- /dev/null +++ b/volumes/mongo-export.yml @@ -0,0 +1,25 @@ +--- + kind: PersistentVolume + apiVersion: v1 + metadata: + name: mongo-export-volume + namespace: staging + spec: + accessModes: + - ReadWriteMany + capacity: + storage: 1Gi + hostPath: + path: /data/shared/mongo-exports/ +--- + kind: PersistentVolumeClaim + apiVersion: v1 + metadata: + name: mongo-export-claim + namespace: staging + spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 1Gi diff --git a/volumes/ssh-keys.yml b/volumes/ssh-keys.yml new file mode 100644 index 000000000..4ffd83e80 --- /dev/null +++ b/volumes/ssh-keys.yml @@ -0,0 +1,25 @@ +--- + apiVersion: v1 + kind: PersistentVolume + metadata: + name: ssh-keys-volume + namespace: staging + spec: + accessModes: + - ReadWriteOnce + capacity: + storage: 1Mi + hostPath: + path: /data/pv0001/ +--- + kind: PersistentVolumeClaim + apiVersion: v1 + metadata: + name: ssh-keys-claim + namespace: staging + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Mi diff --git a/volumes/uploads.yml b/volumes/uploads.yml new file mode 100644 index 000000000..34b600aab --- /dev/null +++ b/volumes/uploads.yml @@ -0,0 +1,25 @@ +--- + apiVersion: v1 + kind: PersistentVolume + metadata: + name: uploads-volume + namespace: staging + spec: + accessModes: + - ReadWriteMany + capacity: + storage: 8Gi + hostPath: + path: /data/shared/uploads/ +--- + kind: PersistentVolumeClaim + apiVersion: v1 + metadata: + name: uploads-claim + namespace: staging + spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 8Gi From c6661def314f80582170664333eca6defeb12614 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Robert=20Sch=C3=A4fer?= Date: Thu, 31 Jan 2019 23:52:24 +0100 Subject: [PATCH 11/55] Improve README --- .gitignore | 2 +- README.md | 78 ++++++++++++++++++++++++++++++++---------------------- 2 files changed, 47 insertions(+), 33 deletions(-) diff --git a/.gitignore b/.gitignore index da61c76ef..32cfb3b9e 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1 @@ -*secrets*.yaml +*secrets*.yml diff --git a/README.md b/README.md index 48d644622..1c9212c07 100644 --- a/README.md +++ b/README.md @@ -4,9 +4,9 @@ ## Todo`s - [ ] check labels and selectors if they all are correct -- [ ] configure NGINX from yaml -- [ ] configure Let's Encrypt cert-manager from yaml -- [ ] configure ingress form yaml +- [ ] configure NGINX from yml +- [ ] configure Let's Encrypt cert-manager from yml +- [ ] configure ingress form yml - [ ] configure persistent & shared storage between nodes - [ ] reproduce setup locally @@ -17,70 +17,84 @@ There are many Kubernetes distributions, but if you're just getting started, Min [Install Minikube](https://kubernetes.io/docs/tasks/tools/install-minikube/) +# Open minikube dashboard +``` +$ minikube dashboard +``` +This will give you an overview. +Some of the steps below need some timing to make ressources available to other +dependent deployments. Keeping an eye on the dashboard is a great way to check +that. + ## Create a namespace locally ```shell -kubectl create -f namespace-staging.yml +$ kubectl create -f namespace-staging.yml ``` +Switch to the namespace `staging` in your kubernetes dashboard. -## Change config maps according to your needs +## Setup config maps ```shell -cd config/ -cp db-migration-worker-configmap.template.yaml db-migration-worker-configmap.yaml +$ cp db-migration-worker.template.yml config/db-migration-worker.yml # edit all variables according to the setup of the remote legacy server -cd .. -``` -## Apply the config map to staging namespace -```shell -kubectl apply -f config/ +$ kubectl apply -f config/ ``` ## Setup secrets and deploy themn +If you want to edit secrets, you have to `base64` encode them. See [kubernetes +documentation](https://kubernetes.io/docs/concepts/configuration/secret/#creating-a-secret-manually). ```shell -cp secrets.yaml.template secrets.yaml -# change all vars as needed and deploy it afterwards -kubectl apply -f secrets.yaml +# example how to base64 a string: +$ echo -n 'admin' | base64 +YWRtaW4= + +$ cp secrets.yml.template secrets.yml +# change all variables as needed and deploy them +$ kubectl apply -f secrets.yml ``` -## Create volumes and deployments +## Create volumes ```shell -kubectl apply -f volumes/ -kubectl apply -f deployments/ +$ kubectl apply -f volumes/ ``` -This can take a while. -Sit back and relax and have a look into your minikube dashboard: +Wait until the persistent volumes get available. + +## Create deployments +```shell +$ kubectl apply -f deployments/ ``` -minikube dashboard -``` -Wait until all pods turn green and they don't show a warning `Waiting: ContainerCreating` anymore. +This can take a while because kubernetes will download the docker images. +Sit back and relax and have a look into your kubernetes dashboard. +Wait until all pods turn green and they don't show a warning +`Waiting: ContainerCreating` anymore. ## Expose the services ```shell -kubectl apply -f services/ +$ kubectl apply -f services/ ``` -## Access the service +## Access the services ```shell -minikube service nitro-backend --namespace=staging -minikube service nitro-web --namespace=staging +$ minikube service nitro-backend --namespace=staging +$ minikube service nitro-web --namespace=staging ``` -## Provisioning db-migration-worker +## Provision db-migration-worker Copy your private ssh key and the `.known-hosts` file of your remote legacy server. ```shell # check the corresponding db-migration-worker pod -kubectl --namespace=staging get pods +$ kubectl --namespace=staging get pods # change below -kubectl cp path/to/your/ssh/keys/.ssh staging/nitro-db-migration-worker-:/root/ +$ kubectl cp path/to/your/ssh/keys/.ssh staging/nitro-db-migration-worker-:/root/ ``` Run the migration: ```shell # change below -kubectl --namespace=staging exec -it nitro-db-migration-worker- ./import.sh -kubectl --namespace=staging exec -it nitro-neo4j- ./import/import.sh +$ kubectl --namespace=staging exec -it nitro-db-migration-worker- ./import.sh +$ kubectl --namespace=staging exec -it nitro-neo4j- ./import/import.sh ``` From 7b981c06d08835fce95a4376c546472e78880af5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Robert=20Sch=C3=A4fer?= Date: Fri, 1 Feb 2019 00:23:53 +0100 Subject: [PATCH 12/55] Point web service to backend of internal network --- config/backend.yml | 2 +- services/backend.yml | 4 ---- services/neo4j.yml | 6 ++---- services/web.yml | 4 ---- 4 files changed, 3 insertions(+), 13 deletions(-) diff --git a/config/backend.yml b/config/backend.yml index ba7c819bc..cfb19b538 100644 --- a/config/backend.yml +++ b/config/backend.yml @@ -2,7 +2,7 @@ apiVersion: v1 kind: ConfigMap data: GRAPHQL_PORT: "4000" - GRAPHQL_URI: "https://api-nitro-staging.human-connection.org" + GRAPHQL_URI: "http://nitro-backend.staging:4000" MOCK: "false" metadata: name: staging-backend diff --git a/services/backend.yml b/services/backend.yml index d4f01286a..1654f884a 100644 --- a/services/backend.yml +++ b/services/backend.yml @@ -13,8 +13,4 @@ spec: selector: workload.user.cattle.io/workloadselector: deployment-staging-backend type: LoadBalancer - sessionAffinity: None externalTrafficPolicy: Cluster -status: - loadBalancer: {} - diff --git a/services/neo4j.yml b/services/neo4j.yml index e071f78bb..65b7785b6 100644 --- a/services/neo4j.yml +++ b/services/neo4j.yml @@ -6,8 +6,6 @@ metadata: labels: workload.user.cattle.io/workloadselector: deployment-staging-neo4j spec: - selector: - workload.user.cattle.io/workloadselector: deployment-staging-neo4j ports: - name: bolt port: 7687 @@ -15,7 +13,7 @@ spec: - name: web port: 7474 targetPort: 7474 - type: LoadBalancer - sessionAffinity: None + selector: + workload.user.cattle.io/workloadselector: deployment-staging-neo4j type: ClusterIP diff --git a/services/web.yml b/services/web.yml index e1bd542f1..ad2b9678b 100644 --- a/services/web.yml +++ b/services/web.yml @@ -13,8 +13,4 @@ spec: selector: workload.user.cattle.io/workloadselector: deployment-staging-web type: LoadBalancer - sessionAffinity: None externalTrafficPolicy: Cluster -status: - loadBalancer: {} - From e8b212f5ccdd2b4718755ea3dcffb9fbd75de514 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Robert=20Sch=C3=A4fer?= Date: Fri, 1 Feb 2019 01:08:19 +0100 Subject: [PATCH 13/55] Backend is accessible only from internal network I think this is better. For development it might be interesting to have access to the backend but I think one should do development with docker-compose not minikube and such. --- README.md | 3 +-- services/backend.yml | 2 -- services/neo4j.yml | 2 -- 3 files changed, 1 insertion(+), 6 deletions(-) diff --git a/README.md b/README.md index 1c9212c07..1fbabb948 100644 --- a/README.md +++ b/README.md @@ -77,8 +77,7 @@ $ kubectl apply -f services/ ## Access the services ```shell -$ minikube service nitro-backend --namespace=staging -$ minikube service nitro-web --namespace=staging +$ minikube service nitro-web --namespace=staging ``` diff --git a/services/backend.yml b/services/backend.yml index 1654f884a..39cfca63a 100644 --- a/services/backend.yml +++ b/services/backend.yml @@ -12,5 +12,3 @@ spec: targetPort: 4000 selector: workload.user.cattle.io/workloadselector: deployment-staging-backend - type: LoadBalancer - externalTrafficPolicy: Cluster diff --git a/services/neo4j.yml b/services/neo4j.yml index 65b7785b6..4ff0953a7 100644 --- a/services/neo4j.yml +++ b/services/neo4j.yml @@ -15,5 +15,3 @@ spec: targetPort: 7474 selector: workload.user.cattle.io/workloadselector: deployment-staging-neo4j - type: ClusterIP - From 9b5f88d7ac9d573c9989c2213b9efad41ae24450 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Robert=20Sch=C3=A4fer?= Date: Fri, 1 Feb 2019 01:25:05 +0100 Subject: [PATCH 14/55] Kubernetes best practices: * put many configurations in one file * expose services before you start pods See: https://kubernetes.io/docs/concepts/configuration/overview/ --- README.md | 13 ++- deployments/db-migration-worker.yml | 172 ++++++++++++++++------------ deployments/neo4j.yml | 144 ++++++++++++++--------- deployments/web.yml | 5 - volumes/ssh-keys.yml | 25 ---- 5 files changed, 193 insertions(+), 166 deletions(-) delete mode 100644 volumes/ssh-keys.yml diff --git a/README.md b/README.md index 1fbabb948..0096071ee 100644 --- a/README.md +++ b/README.md @@ -57,7 +57,13 @@ $ kubectl apply -f secrets.yml ```shell $ kubectl apply -f volumes/ ``` -Wait until the persistent volumes get available. + +## Expose the services + +```shell +$ kubectl apply -f services/ +``` +Wait until persistent volumes and services become available. ## Create deployments ```shell @@ -68,11 +74,6 @@ Sit back and relax and have a look into your kubernetes dashboard. Wait until all pods turn green and they don't show a warning `Waiting: ContainerCreating` anymore. -## Expose the services - -```shell -$ kubectl apply -f services/ -``` ## Access the services diff --git a/deployments/db-migration-worker.yml b/deployments/db-migration-worker.yml index 509f98093..685904aba 100644 --- a/deployments/db-migration-worker.yml +++ b/deployments/db-migration-worker.yml @@ -1,75 +1,101 @@ -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - name: nitro-db-migration-worker - namespace: staging -spec: - replicas: 1 - minReadySeconds: 15 - progressDeadlineSeconds: 60 - selector: - matchLabels: - workload.user.cattle.io/workloadselector: deployment-staging-db-migration-worker - template: - metadata: - labels: +--- + apiVersion: extensions/v1beta1 + kind: Deployment + metadata: + name: nitro-db-migration-worker + namespace: staging + spec: + replicas: 1 + minReadySeconds: 15 + progressDeadlineSeconds: 60 + selector: + matchLabels: workload.user.cattle.io/workloadselector: deployment-staging-db-migration-worker - name: "nitro-db-migration-worker" - spec: - containers: - - env: - - name: SSH_USERNAME - valueFrom: - configMapKeyRef: - name: staging-db-migration-worker - key: SSH_USERNAME - - name: SSH_HOST - valueFrom: - configMapKeyRef: - name: staging-db-migration-worker - key: SSH_HOST - - name: MONGODB_USERNAME - valueFrom: - configMapKeyRef: - name: staging-db-migration-worker - key: MONGODB_USERNAME - - name: MONGODB_AUTH_DB - valueFrom: - configMapKeyRef: - name: staging-db-migration-worker - key: MONGODB_AUTH_DB - - name: MONGODB_DATABASE - valueFrom: - configMapKeyRef: - name: staging-db-migration-worker - key: MONGODB_DATABASE - - name: UPLOADS_DIRECTORY - valueFrom: - configMapKeyRef: - name: staging-db-migration-worker - key: UPLOADS_DIRECTORY - - name: MONGODB_PASSWORD - valueFrom: - secretKeyRef: - name: staging - key: MONGODB_PASSWORD - optional: false - image: humanconnection/db-migration-worker:latest + template: + metadata: + labels: + workload.user.cattle.io/workloadselector: deployment-staging-db-migration-worker name: nitro-db-migration-worker - resources: {} - imagePullPolicy: Always - volumeMounts: - - mountPath: /root/ - name: ssh-keys-directory - - mountPath: /mongo-export/ - name: mongo-export - restartPolicy: Always - volumes: - - name: ssh-keys-directory - persistentVolumeClaim: - claimName: ssh-keys-claim - - name: mongo-export - persistentVolumeClaim: - claimName: mongo-export-claim - terminationGracePeriodSeconds: 30 -status: {} + spec: + containers: + - env: + - name: SSH_USERNAME + valueFrom: + configMapKeyRef: + name: staging-db-migration-worker + key: SSH_USERNAME + - name: SSH_HOST + valueFrom: + configMapKeyRef: + name: staging-db-migration-worker + key: SSH_HOST + - name: MONGODB_USERNAME + valueFrom: + configMapKeyRef: + name: staging-db-migration-worker + key: MONGODB_USERNAME + - name: MONGODB_AUTH_DB + valueFrom: + configMapKeyRef: + name: staging-db-migration-worker + key: MONGODB_AUTH_DB + - name: MONGODB_DATABASE + valueFrom: + configMapKeyRef: + name: staging-db-migration-worker + key: MONGODB_DATABASE + - name: UPLOADS_DIRECTORY + valueFrom: + configMapKeyRef: + name: staging-db-migration-worker + key: UPLOADS_DIRECTORY + - name: MONGODB_PASSWORD + valueFrom: + secretKeyRef: + name: staging + key: MONGODB_PASSWORD + optional: false + image: humanconnection/db-migration-worker:latest + name: nitro-db-migration-worker + resources: {} + imagePullPolicy: Always + volumeMounts: + - mountPath: /root/ + name: ssh-keys-directory + - mountPath: /mongo-export/ + name: mongo-export + restartPolicy: Always + volumes: + - name: ssh-keys-directory + persistentVolumeClaim: + claimName: ssh-keys-claim + - name: mongo-export + persistentVolumeClaim: + claimName: mongo-export-claim + terminationGracePeriodSeconds: 30 + status: {} +--- + apiVersion: v1 + kind: PersistentVolume + metadata: + name: ssh-keys-volume + namespace: staging + spec: + accessModes: + - ReadWriteOnce + capacity: + storage: 1Mi + hostPath: + path: /data/pv0001/ +--- + kind: PersistentVolumeClaim + apiVersion: v1 + metadata: + name: ssh-keys-claim + namespace: staging + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Mi diff --git a/deployments/neo4j.yml b/deployments/neo4j.yml index 2b3485c77..7f96cd45a 100644 --- a/deployments/neo4j.yml +++ b/deployments/neo4j.yml @@ -1,59 +1,89 @@ -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - name: nitro-neo4j - namespace: staging -spec: - replicas: 1 - strategy: {} - selector: - matchLabels: - workload.user.cattle.io/workloadselector: deployment-staging-neo4j - template: - metadata: - labels: +--- + apiVersion: extensions/v1beta1 + kind: Deployment + metadata: + name: nitro-neo4j + namespace: staging + spec: + replicas: 1 + strategy: {} + selector: + matchLabels: workload.user.cattle.io/workloadselector: deployment-staging-neo4j - name: "nitro-neo4j" - spec: - containers: - - env: - - name: NEO4J_apoc_import_file_enabled - value: "true" - - name: NEO4J_dbms_memory_pagecache_size - value: 1G - - name: NEO4J_dbms_memory_heap_max__size - value: 1G - - name: NEO4J_AUTH - value: none - - name: NEO4J_URI - valueFrom: - configMapKeyRef: - name: staging-neo4j - key: NEO4J_URI - - name: NEO4J_USER - valueFrom: - configMapKeyRef: - name: staging-neo4j - key: NEO4J_USER - - name: NEO4J_AUTH - valueFrom: - configMapKeyRef: - name: staging-neo4j - key: NEO4J_AUTH - image: humanconnection/neo4j:latest + template: + metadata: + labels: + workload.user.cattle.io/workloadselector: deployment-staging-neo4j name: nitro-neo4j - ports: - - containerPort: 7687 - - containerPort: 7474 - # - containerPort: 7473 - resources: {} - imagePullPolicy: IfNotPresent - volumeMounts: - - mountPath: /mongo-export/ - name: mongo-export - restartPolicy: Always - volumes: - - name: mongo-export - persistentVolumeClaim: - claimName: mongo-export-claim -status: {} + spec: + containers: + - env: + - name: NEO4J_apoc_import_file_enabled + value: "true" + - name: NEO4J_dbms_memory_pagecache_size + value: 1G + - name: NEO4J_dbms_memory_heap_max__size + value: 1G + - name: NEO4J_AUTH + value: none + - name: NEO4J_URI + valueFrom: + configMapKeyRef: + name: staging-neo4j + key: NEO4J_URI + - name: NEO4J_USER + valueFrom: + configMapKeyRef: + name: staging-neo4j + key: NEO4J_USER + - name: NEO4J_AUTH + valueFrom: + configMapKeyRef: + name: staging-neo4j + key: NEO4J_AUTH + image: humanconnection/neo4j:latest + name: nitro-neo4j + ports: + - containerPort: 7687 + - containerPort: 7474 + resources: {} + imagePullPolicy: IfNotPresent + volumeMounts: + - mountPath: /data/ + name: neo4j-data + - mountPath: /mongo-export/ + name: mongo-export + restartPolicy: Always + volumes: + - name: mongo-export + persistentVolumeClaim: + claimName: mongo-export-claim + - name: neo4j-data + persistentVolumeClaim: + claimName: neo4j-data-claim + status: {} +--- + apiVersion: v1 + kind: PersistentVolume + metadata: + name: neo4j-data-volume + namespace: staging + spec: + accessModes: + - ReadWriteOnce + capacity: + storage: 4Gi + hostPath: + path: /data/neo4j/ +--- + kind: PersistentVolumeClaim + apiVersion: v1 + metadata: + name: neo4j-data-claim + namespace: staging + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 4Gi diff --git a/deployments/web.yml b/deployments/web.yml index 5cad7f039..43be04bbe 100644 --- a/deployments/web.yml +++ b/deployments/web.yml @@ -7,11 +7,6 @@ spec: replicas: 2 minReadySeconds: 15 progressDeadlineSeconds: 60 - # strategy: - # rollingUpdate: - # maxSurge: 1 - # maxUnavailable: 0 - # type: RollingUpdate selector: matchLabels: workload.user.cattle.io/workloadselector: deployment-staging-web diff --git a/volumes/ssh-keys.yml b/volumes/ssh-keys.yml deleted file mode 100644 index 4ffd83e80..000000000 --- a/volumes/ssh-keys.yml +++ /dev/null @@ -1,25 +0,0 @@ ---- - apiVersion: v1 - kind: PersistentVolume - metadata: - name: ssh-keys-volume - namespace: staging - spec: - accessModes: - - ReadWriteOnce - capacity: - storage: 1Mi - hostPath: - path: /data/pv0001/ ---- - kind: PersistentVolumeClaim - apiVersion: v1 - metadata: - name: ssh-keys-claim - namespace: staging - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 1Mi From ad3a97407688778b192b80c9a63af85f4b020e6d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Robert=20Sch=C3=A4fer?= Date: Fri, 1 Feb 2019 01:53:51 +0100 Subject: [PATCH 15/55] Base64 encode default secrets in template The JWT_SECRET is taken from the current default secret in `Nitro-Backend`, the MONGODB_PASSWORD is just the label encoded. --- secrets.template.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/secrets.template.yml b/secrets.template.yml index f8a4642a3..755cd2d06 100644 --- a/secrets.template.yml +++ b/secrets.template.yml @@ -1,7 +1,8 @@ apiVersion: v1 kind: Secret data: - JWT_SECRET: "aHVtYW5jb25uZWN0aW9uLWRlcGxveW1lbnQ=" + JWT_SECRET: "Yi8mJjdiNzhCRiZmdi9WZA==" + MONGODB_PASSWORD: "TU9OR09EQl9QQVNTV09SRA==" metadata: name: staging namespace: staging From 6fed4797eddafcf7e01c0140a6f3ad49477dee0c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Robert=20Sch=C3=A4fer?= Date: Fri, 1 Feb 2019 18:50:30 +0100 Subject: [PATCH 16/55] Add commits to configuration to deploy :latest tag The recommended way to update a kubernetes deployment to a new image is to change the image tag. However, our build server is configured to push every commit of the `master` branch to docker hub to the respective repository and tag `:latest`. So adding some configuration that can be changed seems to be a trick to re-deploy the `:latest` image. See here: https://stackoverflow.com/a/51835397 --- deployments/backend.yml | 2 ++ deployments/db-migration-worker.yml | 4 +++- deployments/neo4j.yml | 7 +++++-- deployments/web.yml | 2 ++ 4 files changed, 12 insertions(+), 3 deletions(-) diff --git a/deployments/backend.yml b/deployments/backend.yml index f2ebffb9c..31f8ef357 100644 --- a/deployments/backend.yml +++ b/deployments/backend.yml @@ -18,6 +18,8 @@ spec: spec: containers: - env: + - name: COMMIT + value: - name: MOCK value: "false" - name: CLIENT_URI diff --git a/deployments/db-migration-worker.yml b/deployments/db-migration-worker.yml index 685904aba..952cf0121 100644 --- a/deployments/db-migration-worker.yml +++ b/deployments/db-migration-worker.yml @@ -19,6 +19,8 @@ spec: containers: - env: + - name: COMMIT + value: - name: SSH_USERNAME valueFrom: configMapKeyRef: @@ -64,7 +66,6 @@ name: ssh-keys-directory - mountPath: /mongo-export/ name: mongo-export - restartPolicy: Always volumes: - name: ssh-keys-directory persistentVolumeClaim: @@ -72,6 +73,7 @@ - name: mongo-export persistentVolumeClaim: claimName: mongo-export-claim + restartPolicy: Always terminationGracePeriodSeconds: 30 status: {} --- diff --git a/deployments/neo4j.yml b/deployments/neo4j.yml index 7f96cd45a..ef394e36f 100644 --- a/deployments/neo4j.yml +++ b/deployments/neo4j.yml @@ -18,6 +18,8 @@ spec: containers: - env: + - name: COMMIT + value: - name: NEO4J_apoc_import_file_enabled value: "true" - name: NEO4J_dbms_memory_pagecache_size @@ -47,13 +49,12 @@ - containerPort: 7687 - containerPort: 7474 resources: {} - imagePullPolicy: IfNotPresent + imagePullPolicy: Always volumeMounts: - mountPath: /data/ name: neo4j-data - mountPath: /mongo-export/ name: mongo-export - restartPolicy: Always volumes: - name: mongo-export persistentVolumeClaim: @@ -61,6 +62,8 @@ - name: neo4j-data persistentVolumeClaim: claimName: neo4j-data-claim + restartPolicy: Always + terminationGracePeriodSeconds: 30 status: {} --- apiVersion: v1 diff --git a/deployments/web.yml b/deployments/web.yml index 43be04bbe..48e99b9c2 100644 --- a/deployments/web.yml +++ b/deployments/web.yml @@ -18,6 +18,8 @@ spec: spec: containers: - env: + - name: COMMIT + value: - name: HOST value: 0.0.0.0 - name: BACKEND_URL From d8c23c3452267d308f31c587ae896ffabd88d5b1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Robert=20Sch=C3=A4fer?= Date: Fri, 1 Feb 2019 19:53:09 +0100 Subject: [PATCH 17/55] Reproduced the whole setup locally with success --- README.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/README.md b/README.md index 0096071ee..d43687b4a 100644 --- a/README.md +++ b/README.md @@ -8,9 +8,8 @@ - [ ] configure Let's Encrypt cert-manager from yml - [ ] configure ingress form yml - [ ] configure persistent & shared storage between nodes -- [ ] reproduce setup locally +- [x] reproduce setup locally -> The dummy directory has some lb configurations that did not work properly on Digital Ocean but could be used as a starting point for getting it right ## Install Minikube, kubectl There are many Kubernetes distributions, but if you're just getting started, Minikube is a tool that you can use to get your feet wet. From af15ec6393b127123371d29915225081605ddcbc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Robert=20Sch=C3=A4fer?= Date: Fri, 1 Feb 2019 23:34:19 +0100 Subject: [PATCH 18/55] Fix certain configuration for Digital Ocean --- config/neo4j.yml | 2 +- deployments/db-migration-worker.yml | 17 +++-------------- services/neo4j.yml | 2 +- volumes/mongo-export.yml | 16 ++-------------- volumes/uploads.yml | 16 ++-------------- 5 files changed, 9 insertions(+), 44 deletions(-) diff --git a/config/neo4j.yml b/config/neo4j.yml index 78d1ba3cd..0165338db 100644 --- a/config/neo4j.yml +++ b/config/neo4j.yml @@ -1,7 +1,7 @@ apiVersion: v1 kind: ConfigMap data: - NEO4J_URI: "bolt://neo4j.staging:7687" + NEO4J_URI: "bolt://nitro-neo4j.staging:7687" NEO4J_USER: "neo4j" NEO4J_AUTH: none metadata: diff --git a/deployments/db-migration-worker.yml b/deployments/db-migration-worker.yml index 952cf0121..f4d427096 100644 --- a/deployments/db-migration-worker.yml +++ b/deployments/db-migration-worker.yml @@ -76,19 +76,6 @@ restartPolicy: Always terminationGracePeriodSeconds: 30 status: {} ---- - apiVersion: v1 - kind: PersistentVolume - metadata: - name: ssh-keys-volume - namespace: staging - spec: - accessModes: - - ReadWriteOnce - capacity: - storage: 1Mi - hostPath: - path: /data/pv0001/ --- kind: PersistentVolumeClaim apiVersion: v1 @@ -100,4 +87,6 @@ - ReadWriteOnce resources: requests: - storage: 1Mi + # waaay too much + # unfortunately Digital Oceans volumes start at 1Gi + storage: 1Gi diff --git a/services/neo4j.yml b/services/neo4j.yml index 4ff0953a7..d6c7a95b4 100644 --- a/services/neo4j.yml +++ b/services/neo4j.yml @@ -1,7 +1,7 @@ apiVersion: v1 kind: Service metadata: - name: neo4j + name: nitro-neo4j namespace: staging labels: workload.user.cattle.io/workloadselector: deployment-staging-neo4j diff --git a/volumes/mongo-export.yml b/volumes/mongo-export.yml index a5ef064cc..1fb537e5c 100644 --- a/volumes/mongo-export.yml +++ b/volumes/mongo-export.yml @@ -1,16 +1,3 @@ ---- - kind: PersistentVolume - apiVersion: v1 - metadata: - name: mongo-export-volume - namespace: staging - spec: - accessModes: - - ReadWriteMany - capacity: - storage: 1Gi - hostPath: - path: /data/shared/mongo-exports/ --- kind: PersistentVolumeClaim apiVersion: v1 @@ -19,7 +6,8 @@ namespace: staging spec: accessModes: - - ReadWriteMany + - ReadWriteOnce resources: requests: storage: 1Gi + storageClassName: do-block-storage diff --git a/volumes/uploads.yml b/volumes/uploads.yml index 34b600aab..3a9dfcdad 100644 --- a/volumes/uploads.yml +++ b/volumes/uploads.yml @@ -1,16 +1,3 @@ ---- - apiVersion: v1 - kind: PersistentVolume - metadata: - name: uploads-volume - namespace: staging - spec: - accessModes: - - ReadWriteMany - capacity: - storage: 8Gi - hostPath: - path: /data/shared/uploads/ --- kind: PersistentVolumeClaim apiVersion: v1 @@ -19,7 +6,8 @@ namespace: staging spec: accessModes: - - ReadWriteMany + - ReadWriteOnce resources: requests: storage: 8Gi + storageClassName: do-block-storage From 671826e060032596ab2e112786eb0913f3031034 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Robert=20Sch=C3=A4fer?= Date: Sat, 2 Feb 2019 12:40:29 +0100 Subject: [PATCH 19/55] Bundle all configuration in one folder staging/ --- .gitignore | 2 +- {config => staging/config}/.gitignore | 0 {config => staging/config}/backend.yml | 0 {config => staging/config}/neo4j.yml | 0 {config => staging/config}/web.yml | 0 {deployments => staging/deployments}/backend.yml | 0 {deployments => staging/deployments}/db-migration-worker.yml | 0 {deployments => staging/deployments}/neo4j.yml | 0 {deployments => staging/deployments}/web.yml | 0 namespace-staging.yml => staging/namespace-staging.yml | 0 {services => staging/services}/backend.yml | 0 {services => staging/services}/neo4j.yml | 0 {services => staging/services}/web.yml | 0 {volumes => staging/volumes}/mongo-export.yml | 1 - {volumes => staging/volumes}/uploads.yml | 1 - 15 files changed, 1 insertion(+), 3 deletions(-) rename {config => staging/config}/.gitignore (100%) rename {config => staging/config}/backend.yml (100%) rename {config => staging/config}/neo4j.yml (100%) rename {config => staging/config}/web.yml (100%) rename {deployments => staging/deployments}/backend.yml (100%) rename {deployments => staging/deployments}/db-migration-worker.yml (100%) rename {deployments => staging/deployments}/neo4j.yml (100%) rename {deployments => staging/deployments}/web.yml (100%) rename namespace-staging.yml => staging/namespace-staging.yml (100%) rename {services => staging/services}/backend.yml (100%) rename {services => staging/services}/neo4j.yml (100%) rename {services => staging/services}/web.yml (100%) rename {volumes => staging/volumes}/mongo-export.yml (84%) rename {volumes => staging/volumes}/uploads.yml (84%) diff --git a/.gitignore b/.gitignore index 32cfb3b9e..8a42d3602 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1 @@ -*secrets*.yml +*secrets.yml diff --git a/config/.gitignore b/staging/config/.gitignore similarity index 100% rename from config/.gitignore rename to staging/config/.gitignore diff --git a/config/backend.yml b/staging/config/backend.yml similarity index 100% rename from config/backend.yml rename to staging/config/backend.yml diff --git a/config/neo4j.yml b/staging/config/neo4j.yml similarity index 100% rename from config/neo4j.yml rename to staging/config/neo4j.yml diff --git a/config/web.yml b/staging/config/web.yml similarity index 100% rename from config/web.yml rename to staging/config/web.yml diff --git a/deployments/backend.yml b/staging/deployments/backend.yml similarity index 100% rename from deployments/backend.yml rename to staging/deployments/backend.yml diff --git a/deployments/db-migration-worker.yml b/staging/deployments/db-migration-worker.yml similarity index 100% rename from deployments/db-migration-worker.yml rename to staging/deployments/db-migration-worker.yml diff --git a/deployments/neo4j.yml b/staging/deployments/neo4j.yml similarity index 100% rename from deployments/neo4j.yml rename to staging/deployments/neo4j.yml diff --git a/deployments/web.yml b/staging/deployments/web.yml similarity index 100% rename from deployments/web.yml rename to staging/deployments/web.yml diff --git a/namespace-staging.yml b/staging/namespace-staging.yml similarity index 100% rename from namespace-staging.yml rename to staging/namespace-staging.yml diff --git a/services/backend.yml b/staging/services/backend.yml similarity index 100% rename from services/backend.yml rename to staging/services/backend.yml diff --git a/services/neo4j.yml b/staging/services/neo4j.yml similarity index 100% rename from services/neo4j.yml rename to staging/services/neo4j.yml diff --git a/services/web.yml b/staging/services/web.yml similarity index 100% rename from services/web.yml rename to staging/services/web.yml diff --git a/volumes/mongo-export.yml b/staging/volumes/mongo-export.yml similarity index 84% rename from volumes/mongo-export.yml rename to staging/volumes/mongo-export.yml index 1fb537e5c..563a9cfe6 100644 --- a/volumes/mongo-export.yml +++ b/staging/volumes/mongo-export.yml @@ -10,4 +10,3 @@ resources: requests: storage: 1Gi - storageClassName: do-block-storage diff --git a/volumes/uploads.yml b/staging/volumes/uploads.yml similarity index 84% rename from volumes/uploads.yml rename to staging/volumes/uploads.yml index 3a9dfcdad..a48d28ddc 100644 --- a/volumes/uploads.yml +++ b/staging/volumes/uploads.yml @@ -10,4 +10,3 @@ resources: requests: storage: 8Gi - storageClassName: do-block-storage From 15f391539440fe7755b1e60cce85571c7747e208 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Robert=20Sch=C3=A4fer?= Date: Sat, 2 Feb 2019 13:08:07 +0100 Subject: [PATCH 20/55] Adding dashboard deployment for digital ocean --- README.md | 110 ++++++++++++------ dashboard/admin-user.yml | 5 + dashboard/role-binding.yml | 12 ++ ...space-staging.yml => namespace-staging.yml | 0 4 files changed, 89 insertions(+), 38 deletions(-) create mode 100644 dashboard/admin-user.yml create mode 100644 dashboard/role-binding.yml rename staging/namespace-staging.yml => namespace-staging.yml (100%) diff --git a/README.md b/README.md index d43687b4a..3fb1a983b 100644 --- a/README.md +++ b/README.md @@ -10,13 +10,12 @@ - [ ] configure persistent & shared storage between nodes - [x] reproduce setup locally - -## Install Minikube, kubectl +## Minikube There are many Kubernetes distributions, but if you're just getting started, Minikube is a tool that you can use to get your feet wet. [Install Minikube](https://kubernetes.io/docs/tasks/tools/install-minikube/) -# Open minikube dashboard +### Open minikube dashboard ``` $ minikube dashboard ``` @@ -25,63 +24,98 @@ Some of the steps below need some timing to make ressources available to other dependent deployments. Keeping an eye on the dashboard is a great way to check that. -## Create a namespace locally -```shell -$ kubectl create -f namespace-staging.yml -``` -Switch to the namespace `staging` in your kubernetes dashboard. +### Access exposed services -## Setup config maps -```shell -$ cp db-migration-worker.template.yml config/db-migration-worker.yml -# edit all variables according to the setup of the remote legacy server +Follow the installation instruction below. Just at the end, expose the +`nitro-web` service on your host system with: -$ kubectl apply -f config/ +```shell +$ minikube service nitro-web --namespace=staging ``` -## Setup secrets and deploy themn +## Digital Ocean + +Install the kubernetes dashboard first: +```sh +$ kubectl apply -f dashboard/ +``` +Proxy localhost to the remote kubernetes dashboard: +```sh +kubectl proxy +``` +Get your token on the command line: +```sh +$ kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin-user | awk '{print $1}') +``` +It should print something like: +``` +Name: admin-user-token-6gl6l +Namespace: kube-system +Labels: +Annotations: kubernetes.io/service-account.name=admin-user + kubernetes.io/service-account.uid=b16afba9-dfec-11e7-bbb9-901b0e532516 + +Type: kubernetes.io/service-account-token + +Data +==== +ca.crt: 1025 bytes +namespace: 11 bytes +token: eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJhZG1pbi11c2VyLXRva2VuLTZnbDZsIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImFkbWluLXVzZXIiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiJiMTZhZmJhOS1kZmVjLTExZTctYmJiOS05MDFiMGU1MzI1MTYiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZS1zeXN0ZW06YWRtaW4tdXNlciJ9.M70CU3lbu3PP4OjhFms8PVL5pQKj-jj4RNSLA4YmQfTXpPUuxqXjiTf094_Rzr0fgN_IVX6gC4fiNUL5ynx9KU-lkPfk0HnX8scxfJNzypL039mpGt0bbe1IXKSIRaq_9VW59Xz-yBUhycYcKPO9RM2Qa1Ax29nqNVko4vLn1_1wPqJ6XSq3GYI8anTzV8Fku4jasUwjrws6Cn6_sPEGmL54sq5R4Z5afUtv-mItTmqZZdxnkRqcJLlg2Y8WbCPogErbsaCDJoABQ7ppaqHetwfM_0yMun6ABOQbIwwl8pspJhpplKwyo700OSpvTT9zlBsu-b35lzXGBRHzv5g_RA + +``` +Grab the token and paste it into the login screen at [http://localhost:8001/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/](http://localhost:8001/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/) + + +## Installation with kubernetes (minikube + Digital Ocean) + +You have to do some prerequisites and change some secrets according to your own setup. + +#### Setup config maps +```shell +$ cp db-migration-worker.template.yml staging/config/db-migration-worker.yml +``` +Edit all variables according to the setup of the remote legacy server. + +#### Setup secrets and deploy themn + +```sh +$ cp secrets.yml.template staging/secrets.yml +``` +Change all secrets as needed. + If you want to edit secrets, you have to `base64` encode them. See [kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/secret/#creating-a-secret-manually). ```shell # example how to base64 a string: $ echo -n 'admin' | base64 YWRtaW4= +``` +Those secrets get `base64` decoded in a kubernetes pod. -$ cp secrets.yml.template secrets.yml -# change all variables as needed and deploy them +#### Create a namespace locally +```shell +$ kubectl create -f namespace-staging.yml +``` +Switch to the namespace `staging` in your kubernetes dashboard. + +### Run the configuration +```shell +$ cd staging/ $ kubectl apply -f secrets.yml -``` - -## Create volumes -```shell +$ kubectl apply -f config/ $ kubectl apply -f volumes/ -``` - -## Expose the services - -```shell $ kubectl apply -f services/ -``` -Wait until persistent volumes and services become available. - -## Create deployments -```shell $ kubectl apply -f deployments/ ``` + This can take a while because kubernetes will download the docker images. Sit back and relax and have a look into your kubernetes dashboard. Wait until all pods turn green and they don't show a warning `Waiting: ContainerCreating` anymore. -## Access the services - -```shell -$ minikube service nitro-web --namespace=staging -``` - - -## Provision db-migration-worker +### Provision db-migration-worker Copy your private ssh key and the `.known-hosts` file of your remote legacy server. ```shell diff --git a/dashboard/admin-user.yml b/dashboard/admin-user.yml new file mode 100644 index 000000000..27b6bb802 --- /dev/null +++ b/dashboard/admin-user.yml @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: admin-user + namespace: kube-system diff --git a/dashboard/role-binding.yml b/dashboard/role-binding.yml new file mode 100644 index 000000000..faa8927a2 --- /dev/null +++ b/dashboard/role-binding.yml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: admin-user +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: +- kind: ServiceAccount + name: admin-user + namespace: kube-system diff --git a/staging/namespace-staging.yml b/namespace-staging.yml similarity index 100% rename from staging/namespace-staging.yml rename to namespace-staging.yml From 0b075830bc497dde1145aa6a9617687d3f46bc2c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Robert=20Sch=C3=A4fer?= Date: Sat, 2 Feb 2019 13:33:42 +0100 Subject: [PATCH 21/55] Put many configuration files in one folder --- README.md | 47 +++++++++---------- ...onfigmap-db-migration-worker.template.yaml | 0 ...pace-staging.yml => namespace-staging.yaml | 0 secrets.template.yml => secrets.template.yaml | 0 staging/.gitignore | 2 + staging/config/.gitignore | 1 - staging/config/backend.yml | 9 ---- staging/config/neo4j.yml | 9 ---- staging/config/web.yml | 8 ---- staging/configmaps.yaml | 29 ++++++++++++ .../backend.yml => deployment-backend.yaml} | 0 ...ml => deployment-db-migration-worker.yaml} | 0 .../neo4j.yml => deployment-neo4j.yaml} | 0 .../web.yml => deployment-web.yaml} | 0 .../backend.yml => service-backend.yaml} | 0 .../neo4j.yml => service-neo4j.yaml} | 0 .../{services/web.yml => service-web.yaml} | 0 ...rt.yml => volume-claim-mongo-exports.yaml} | 0 .../uploads.yml => volume-claim-uploads.yaml} | 0 19 files changed, 52 insertions(+), 53 deletions(-) rename db-migration-worker.template.yml => configmap-db-migration-worker.template.yaml (100%) rename namespace-staging.yml => namespace-staging.yaml (100%) rename secrets.template.yml => secrets.template.yaml (100%) create mode 100644 staging/.gitignore delete mode 100644 staging/config/.gitignore delete mode 100644 staging/config/backend.yml delete mode 100644 staging/config/neo4j.yml delete mode 100644 staging/config/web.yml create mode 100644 staging/configmaps.yaml rename staging/{deployments/backend.yml => deployment-backend.yaml} (100%) rename staging/{deployments/db-migration-worker.yml => deployment-db-migration-worker.yaml} (100%) rename staging/{deployments/neo4j.yml => deployment-neo4j.yaml} (100%) rename staging/{deployments/web.yml => deployment-web.yaml} (100%) rename staging/{services/backend.yml => service-backend.yaml} (100%) rename staging/{services/neo4j.yml => service-neo4j.yaml} (100%) rename staging/{services/web.yml => service-web.yaml} (100%) rename staging/{volumes/mongo-export.yml => volume-claim-mongo-exports.yaml} (100%) rename staging/{volumes/uploads.yml => volume-claim-uploads.yaml} (100%) diff --git a/README.md b/README.md index 3fb1a983b..0225f0aa9 100644 --- a/README.md +++ b/README.md @@ -1,21 +1,20 @@ # Human-Connection Nitro | Deployment Configuration -> Currently the deployment is not primetime ready as you still have to do some manual work. That we need to change, the following list gives some glimpse of the missing steps. - -## Todo`s -- [ ] check labels and selectors if they all are correct -- [ ] configure NGINX from yml +Todos: +- [x] check labels and selectors if they all are correct +- [x] configure NGINX from yml - [ ] configure Let's Encrypt cert-manager from yml -- [ ] configure ingress form yml -- [ ] configure persistent & shared storage between nodes +- [x] configure ingress from yml +- [x] configure persistent & shared storage between nodes - [x] reproduce setup locally ## Minikube -There are many Kubernetes distributions, but if you're just getting started, Minikube is a tool that you can use to get your feet wet. +There are many Kubernetes distributions, but if you're just getting started, +Minikube is a tool that you can use to get your feet wet. [Install Minikube](https://kubernetes.io/docs/tasks/tools/install-minikube/) -### Open minikube dashboard +Open minikube dashboard: ``` $ minikube dashboard ``` @@ -24,10 +23,9 @@ Some of the steps below need some timing to make ressources available to other dependent deployments. Keeping an eye on the dashboard is a great way to check that. -### Access exposed services - -Follow the installation instruction below. Just at the end, expose the -`nitro-web` service on your host system with: +Follow the [installation instruction](#installation-with-kubernetes) below. +If all the pods and services have settled and everything looks green in your +minikube dashboard, expose the `nitro-web` service on your host system with: ```shell $ minikube service nitro-web --namespace=staging @@ -35,7 +33,7 @@ $ minikube service nitro-web --namespace=staging ## Digital Ocean -Install the kubernetes dashboard first: +First, install kubernetes dashboard: ```sh $ kubectl apply -f dashboard/ ``` @@ -67,20 +65,21 @@ token: eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZ Grab the token and paste it into the login screen at [http://localhost:8001/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/](http://localhost:8001/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/) -## Installation with kubernetes (minikube + Digital Ocean) +## Installation with kubernetes -You have to do some prerequisites and change some secrets according to your own setup. +You have to do some prerequisites e.g. change some secrets according to your +own setup. #### Setup config maps ```shell -$ cp db-migration-worker.template.yml staging/config/db-migration-worker.yml +$ cp configmap-db-migration-worker.template.yaml staging/configmap-db-migration-worker.yaml ``` Edit all variables according to the setup of the remote legacy server. #### Setup secrets and deploy themn ```sh -$ cp secrets.yml.template staging/secrets.yml +$ cp secrets.template.yaml staging/secrets.yaml ``` Change all secrets as needed. @@ -95,18 +94,13 @@ Those secrets get `base64` decoded in a kubernetes pod. #### Create a namespace locally ```shell -$ kubectl create -f namespace-staging.yml +$ kubectl create -f namespace-staging.yaml ``` Switch to the namespace `staging` in your kubernetes dashboard. ### Run the configuration ```shell -$ cd staging/ -$ kubectl apply -f secrets.yml -$ kubectl apply -f config/ -$ kubectl apply -f volumes/ -$ kubectl apply -f services/ -$ kubectl apply -f deployments/ +$ kubectl apply -f staging/ ``` This can take a while because kubernetes will download the docker images. @@ -116,7 +110,8 @@ Wait until all pods turn green and they don't show a warning ### Provision db-migration-worker -Copy your private ssh key and the `.known-hosts` file of your remote legacy server. +Copy your private ssh key and the `.known-hosts` file of your remote legacy +server. ```shell # check the corresponding db-migration-worker pod diff --git a/db-migration-worker.template.yml b/configmap-db-migration-worker.template.yaml similarity index 100% rename from db-migration-worker.template.yml rename to configmap-db-migration-worker.template.yaml diff --git a/namespace-staging.yml b/namespace-staging.yaml similarity index 100% rename from namespace-staging.yml rename to namespace-staging.yaml diff --git a/secrets.template.yml b/secrets.template.yaml similarity index 100% rename from secrets.template.yml rename to secrets.template.yaml diff --git a/staging/.gitignore b/staging/.gitignore new file mode 100644 index 000000000..599426dbb --- /dev/null +++ b/staging/.gitignore @@ -0,0 +1,2 @@ +configmap-db-migration-worker.yaml +secrets.yaml diff --git a/staging/config/.gitignore b/staging/config/.gitignore deleted file mode 100644 index 6fe22561d..000000000 --- a/staging/config/.gitignore +++ /dev/null @@ -1 +0,0 @@ -db-migration-worker.yml diff --git a/staging/config/backend.yml b/staging/config/backend.yml deleted file mode 100644 index cfb19b538..000000000 --- a/staging/config/backend.yml +++ /dev/null @@ -1,9 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -data: - GRAPHQL_PORT: "4000" - GRAPHQL_URI: "http://nitro-backend.staging:4000" - MOCK: "false" -metadata: - name: staging-backend - namespace: staging diff --git a/staging/config/neo4j.yml b/staging/config/neo4j.yml deleted file mode 100644 index 0165338db..000000000 --- a/staging/config/neo4j.yml +++ /dev/null @@ -1,9 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -data: - NEO4J_URI: "bolt://nitro-neo4j.staging:7687" - NEO4J_USER: "neo4j" - NEO4J_AUTH: none -metadata: - name: staging-neo4j - namespace: staging diff --git a/staging/config/web.yml b/staging/config/web.yml deleted file mode 100644 index 1dbf5e25e..000000000 --- a/staging/config/web.yml +++ /dev/null @@ -1,8 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -data: - CLIENT_URI: "https://nitro-staging.human-connection.org" - MAPBOX_TOKEN: pk.eyJ1IjoiaHVtYW4tY29ubmVjdGlvbiIsImEiOiJjajl0cnBubGoweTVlM3VwZ2lzNTNud3ZtIn0.KZ8KK9l70omjXbEkkbHGsQ -metadata: - name: staging-web - namespace: staging diff --git a/staging/configmaps.yaml b/staging/configmaps.yaml new file mode 100644 index 000000000..c07353141 --- /dev/null +++ b/staging/configmaps.yaml @@ -0,0 +1,29 @@ +--- + apiVersion: v1 + kind: ConfigMap + data: + GRAPHQL_PORT: "4000" + GRAPHQL_URI: "http://nitro-backend.staging:4000" + MOCK: "false" + metadata: + name: staging-backend + namespace: staging +--- + apiVersion: v1 + kind: ConfigMap + data: + NEO4J_URI: "bolt://nitro-neo4j.staging:7687" + NEO4J_USER: "neo4j" + NEO4J_AUTH: none + metadata: + name: staging-neo4j + namespace: staging +--- + apiVersion: v1 + kind: ConfigMap + data: + CLIENT_URI: "https://nitro-staging.human-connection.org" + MAPBOX_TOKEN: pk.eyJ1IjoiaHVtYW4tY29ubmVjdGlvbiIsImEiOiJjajl0cnBubGoweTVlM3VwZ2lzNTNud3ZtIn0.KZ8KK9l70omjXbEkkbHGsQ + metadata: + name: staging-web + namespace: staging diff --git a/staging/deployments/backend.yml b/staging/deployment-backend.yaml similarity index 100% rename from staging/deployments/backend.yml rename to staging/deployment-backend.yaml diff --git a/staging/deployments/db-migration-worker.yml b/staging/deployment-db-migration-worker.yaml similarity index 100% rename from staging/deployments/db-migration-worker.yml rename to staging/deployment-db-migration-worker.yaml diff --git a/staging/deployments/neo4j.yml b/staging/deployment-neo4j.yaml similarity index 100% rename from staging/deployments/neo4j.yml rename to staging/deployment-neo4j.yaml diff --git a/staging/deployments/web.yml b/staging/deployment-web.yaml similarity index 100% rename from staging/deployments/web.yml rename to staging/deployment-web.yaml diff --git a/staging/services/backend.yml b/staging/service-backend.yaml similarity index 100% rename from staging/services/backend.yml rename to staging/service-backend.yaml diff --git a/staging/services/neo4j.yml b/staging/service-neo4j.yaml similarity index 100% rename from staging/services/neo4j.yml rename to staging/service-neo4j.yaml diff --git a/staging/services/web.yml b/staging/service-web.yaml similarity index 100% rename from staging/services/web.yml rename to staging/service-web.yaml diff --git a/staging/volumes/mongo-export.yml b/staging/volume-claim-mongo-exports.yaml similarity index 100% rename from staging/volumes/mongo-export.yml rename to staging/volume-claim-mongo-exports.yaml diff --git a/staging/volumes/uploads.yml b/staging/volume-claim-uploads.yaml similarity index 100% rename from staging/volumes/uploads.yml rename to staging/volume-claim-uploads.yaml From abf623bd51d539c9ec94efce251b037073fc12ea Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Robert=20Sch=C3=A4fer?= Date: Sat, 2 Feb 2019 18:44:21 +0100 Subject: [PATCH 22/55] Rename .yml to .yaml files for consistency See: https://stackoverflow.com/a/21059164 @appinteractive --- .gitignore | 1 - README.md | 2 +- dashboard/{admin-user.yml => admin-user.yaml} | 0 dashboard/{role-binding.yml => role-binding.yaml} | 0 4 files changed, 1 insertion(+), 2 deletions(-) delete mode 100644 .gitignore rename dashboard/{admin-user.yml => admin-user.yaml} (100%) rename dashboard/{role-binding.yml => role-binding.yaml} (100%) diff --git a/.gitignore b/.gitignore deleted file mode 100644 index 8a42d3602..000000000 --- a/.gitignore +++ /dev/null @@ -1 +0,0 @@ -*secrets.yml diff --git a/README.md b/README.md index 0225f0aa9..7f998fcf2 100644 --- a/README.md +++ b/README.md @@ -39,7 +39,7 @@ $ kubectl apply -f dashboard/ ``` Proxy localhost to the remote kubernetes dashboard: ```sh -kubectl proxy +$ kubectl proxy ``` Get your token on the command line: ```sh diff --git a/dashboard/admin-user.yml b/dashboard/admin-user.yaml similarity index 100% rename from dashboard/admin-user.yml rename to dashboard/admin-user.yaml diff --git a/dashboard/role-binding.yml b/dashboard/role-binding.yaml similarity index 100% rename from dashboard/role-binding.yml rename to dashboard/role-binding.yaml From 246a46c2e8807e5ef46aab9b80fba77c4059330f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Robert=20Sch=C3=A4fer?= Date: Sat, 2 Feb 2019 20:47:26 +0100 Subject: [PATCH 23/55] Remove obsolete volume in deployment --- staging/deployment-neo4j.yaml | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/staging/deployment-neo4j.yaml b/staging/deployment-neo4j.yaml index ef394e36f..58ca7f24b 100644 --- a/staging/deployment-neo4j.yaml +++ b/staging/deployment-neo4j.yaml @@ -65,19 +65,6 @@ restartPolicy: Always terminationGracePeriodSeconds: 30 status: {} ---- - apiVersion: v1 - kind: PersistentVolume - metadata: - name: neo4j-data-volume - namespace: staging - spec: - accessModes: - - ReadWriteOnce - capacity: - storage: 4Gi - hostPath: - path: /data/neo4j/ --- kind: PersistentVolumeClaim apiVersion: v1 From fb929da2cd1dc7d5b3436ff3e19283faaa69c3a2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Robert=20Sch=C3=A4fer?= Date: Mon, 4 Feb 2019 01:34:17 +0100 Subject: [PATCH 24/55] Refactor db-migration-worker deployment Instead of creating a deployment with a replica set, we just create the pod once. Also the pod should have everything in the future to run the database migration. Ie. have `cypher-shell` to write directly to the database in the current network. All required configuration is passed manually to the `db-migration-worker`-pod directly. SSH-keys are copied through a secrets file. This altogether made many configuration files obsolete. --- .gitignore | 2 + README.md | 40 ++++++--- configmap-db-migration-worker.template.yaml | 12 --- db-migration-worker.yaml | 39 +++++++++ staging/.gitignore | 2 - staging/deployment-db-migration-worker.yaml | 92 --------------------- staging/volume-claim-mongo-exports.yaml | 12 --- staging/volume-claim-uploads.yaml | 12 --- 8 files changed, 70 insertions(+), 141 deletions(-) create mode 100644 .gitignore delete mode 100644 configmap-db-migration-worker.template.yaml create mode 100644 db-migration-worker.yaml delete mode 100644 staging/.gitignore delete mode 100644 staging/deployment-db-migration-worker.yaml delete mode 100644 staging/volume-claim-mongo-exports.yaml delete mode 100644 staging/volume-claim-uploads.yaml diff --git a/.gitignore b/.gitignore new file mode 100644 index 000000000..18b453e6b --- /dev/null +++ b/.gitignore @@ -0,0 +1,2 @@ +secrets.yaml +*/secrets.yaml diff --git a/README.md b/README.md index 7f998fcf2..6c8227f59 100644 --- a/README.md +++ b/README.md @@ -109,20 +109,38 @@ Wait until all pods turn green and they don't show a warning `Waiting: ContainerCreating` anymore. -### Provision db-migration-worker -Copy your private ssh key and the `.known-hosts` file of your remote legacy -server. -```shell - -# check the corresponding db-migration-worker pod -$ kubectl --namespace=staging get pods -# change below -$ kubectl cp path/to/your/ssh/keys/.ssh staging/nitro-db-migration-worker-:/root/ +### Migrate database of Human Connection legacy server +Create a configmap with the specific connection data of your legacy server: +```sh +$ kubectl create configmap db-migration-worker \ + --namespace=staging \ + --from-literal=SSH_USERNAME=someuser \ + --from-literal=SSH_HOST=yourhost \ + --from-literal=MONGODB_USERNAME=hc-api \ + --from-literal=MONGODB_PASSWORD=secretpassword \ + --from-literal=MONGODB_AUTH_DB=hc_api \ + --from-literal=MONGODB_DATABASE=hc_api \ + --from-literal=UPLOADS_DIRECTORY=/var/www/api/uploads ``` +Create a secret with your public and private ssh keys: +```sh +$ kubectl create secret generic ssh-keys \ + --namespace=staging \ + --from-file=id_rsa=/path/to/.ssh/id_rsa \ + --from-file=id_rsa.pub=/path/to/.ssh/id_rsa.pub \ + --from-file=known_hosts=/path/to/.ssh/known_hosts +``` +As the [kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/secret/#use-case-pod-with-ssh-keys) +points out, you should be careful with your ssh keys. Anyone with access to your +cluster will have access to your ssh keys. Better create a new pair with +`ssh-keygen` and copy the public key to your legacy server with `ssh-copy-id`. +Create the pod and the required volume: +```sh +$ kubectl apply -f db-migration-worker.yaml +``` Run the migration: ```shell # change below -$ kubectl --namespace=staging exec -it nitro-db-migration-worker- ./import.sh -$ kubectl --namespace=staging exec -it nitro-neo4j- ./import/import.sh +$ kubectl --namespace=staging exec -it nitro-db-migration-worker ./import.sh ``` diff --git a/configmap-db-migration-worker.template.yaml b/configmap-db-migration-worker.template.yaml deleted file mode 100644 index e00077577..000000000 --- a/configmap-db-migration-worker.template.yaml +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -data: - SSH_USERNAME: "" - SSH_HOST: "" - MONGODB_USERNAME: "hc-api" - MONGODB_AUTH_DB: "hc_api" - MONGODB_DATABASE: "hc_api" - UPLOADS_DIRECTORY: "/var/www/api/uploads" -metadata: - name: staging-db-migration-worker - namespace: staging diff --git a/db-migration-worker.yaml b/db-migration-worker.yaml new file mode 100644 index 000000000..e0b520e58 --- /dev/null +++ b/db-migration-worker.yaml @@ -0,0 +1,39 @@ +--- + kind: Pod + apiVersion: v1 + metadata: + name: nitro-db-migration-worker + namespace: staging + spec: + volumes: + - name: secret-volume + secret: + secretName: ssh-keys + defaultMode: 0400 + - name: mongo-export + persistentVolumeClaim: + claimName: mongo-export-claim + containers: + - name: nitro-db-migration-worker + image: humanconnection/db-migration-worker:latest + envFrom: + - configMapRef: + name: db-migration-worker + volumeMounts: + - name: secret-volume + readOnly: false + mountPath: /root/.ssh + - name: mongo-export + mountPath: /mongo-export/ +--- + kind: PersistentVolumeClaim + apiVersion: v1 + metadata: + name: mongo-export-claim + namespace: staging + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi diff --git a/staging/.gitignore b/staging/.gitignore deleted file mode 100644 index 599426dbb..000000000 --- a/staging/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -configmap-db-migration-worker.yaml -secrets.yaml diff --git a/staging/deployment-db-migration-worker.yaml b/staging/deployment-db-migration-worker.yaml deleted file mode 100644 index f4d427096..000000000 --- a/staging/deployment-db-migration-worker.yaml +++ /dev/null @@ -1,92 +0,0 @@ ---- - apiVersion: extensions/v1beta1 - kind: Deployment - metadata: - name: nitro-db-migration-worker - namespace: staging - spec: - replicas: 1 - minReadySeconds: 15 - progressDeadlineSeconds: 60 - selector: - matchLabels: - workload.user.cattle.io/workloadselector: deployment-staging-db-migration-worker - template: - metadata: - labels: - workload.user.cattle.io/workloadselector: deployment-staging-db-migration-worker - name: nitro-db-migration-worker - spec: - containers: - - env: - - name: COMMIT - value: - - name: SSH_USERNAME - valueFrom: - configMapKeyRef: - name: staging-db-migration-worker - key: SSH_USERNAME - - name: SSH_HOST - valueFrom: - configMapKeyRef: - name: staging-db-migration-worker - key: SSH_HOST - - name: MONGODB_USERNAME - valueFrom: - configMapKeyRef: - name: staging-db-migration-worker - key: MONGODB_USERNAME - - name: MONGODB_AUTH_DB - valueFrom: - configMapKeyRef: - name: staging-db-migration-worker - key: MONGODB_AUTH_DB - - name: MONGODB_DATABASE - valueFrom: - configMapKeyRef: - name: staging-db-migration-worker - key: MONGODB_DATABASE - - name: UPLOADS_DIRECTORY - valueFrom: - configMapKeyRef: - name: staging-db-migration-worker - key: UPLOADS_DIRECTORY - - name: MONGODB_PASSWORD - valueFrom: - secretKeyRef: - name: staging - key: MONGODB_PASSWORD - optional: false - image: humanconnection/db-migration-worker:latest - name: nitro-db-migration-worker - resources: {} - imagePullPolicy: Always - volumeMounts: - - mountPath: /root/ - name: ssh-keys-directory - - mountPath: /mongo-export/ - name: mongo-export - volumes: - - name: ssh-keys-directory - persistentVolumeClaim: - claimName: ssh-keys-claim - - name: mongo-export - persistentVolumeClaim: - claimName: mongo-export-claim - restartPolicy: Always - terminationGracePeriodSeconds: 30 - status: {} ---- - kind: PersistentVolumeClaim - apiVersion: v1 - metadata: - name: ssh-keys-claim - namespace: staging - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - # waaay too much - # unfortunately Digital Oceans volumes start at 1Gi - storage: 1Gi diff --git a/staging/volume-claim-mongo-exports.yaml b/staging/volume-claim-mongo-exports.yaml deleted file mode 100644 index 563a9cfe6..000000000 --- a/staging/volume-claim-mongo-exports.yaml +++ /dev/null @@ -1,12 +0,0 @@ ---- - kind: PersistentVolumeClaim - apiVersion: v1 - metadata: - name: mongo-export-claim - namespace: staging - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 1Gi diff --git a/staging/volume-claim-uploads.yaml b/staging/volume-claim-uploads.yaml deleted file mode 100644 index a48d28ddc..000000000 --- a/staging/volume-claim-uploads.yaml +++ /dev/null @@ -1,12 +0,0 @@ ---- - kind: PersistentVolumeClaim - apiVersion: v1 - metadata: - name: uploads-claim - namespace: staging - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 8Gi From 81ae557be1da93b5fcc045090553cdf4815e13e7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Robert=20Sch=C3=A4fer?= Date: Mon, 4 Feb 2019 13:00:00 +0100 Subject: [PATCH 25/55] Create a persistent volume claim for uploads --- staging/deployment-backend.yaml | 146 ++++++++++++++++++-------------- 1 file changed, 83 insertions(+), 63 deletions(-) diff --git a/staging/deployment-backend.yaml b/staging/deployment-backend.yaml index 31f8ef357..b3f329ba8 100644 --- a/staging/deployment-backend.yaml +++ b/staging/deployment-backend.yaml @@ -1,64 +1,84 @@ -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - name: nitro-backend - namespace: staging -spec: - replicas: 2 - minReadySeconds: 15 - progressDeadlineSeconds: 60 - selector: - matchLabels: - workload.user.cattle.io/workloadselector: deployment-staging-backend - template: - metadata: - labels: +--- + apiVersion: extensions/v1beta1 + kind: Deployment + metadata: + name: nitro-backend + namespace: staging + spec: + replicas: 2 + minReadySeconds: 15 + progressDeadlineSeconds: 60 + selector: + matchLabels: workload.user.cattle.io/workloadselector: deployment-staging-backend - name: "nitro-backend" - spec: - containers: - - env: - - name: COMMIT - value: - - name: MOCK - value: "false" - - name: CLIENT_URI - valueFrom: - configMapKeyRef: - name: staging-web - key: CLIENT_URI - - name: GRAPHQL_PORT - valueFrom: - configMapKeyRef: - name: staging-backend - key: GRAPHQL_PORT - - name: GRAPHQL_URI - valueFrom: - configMapKeyRef: - name: staging-backend - key: GRAPHQL_URI - - name: MAPBOX_TOKEN - valueFrom: - configMapKeyRef: - name: staging-web - key: MAPBOX_TOKEN - - name: JWT_SECRET - valueFrom: - secretKeyRef: - name: staging - key: JWT_SECRET - optional: false - - name: NEO4J_URI - valueFrom: - configMapKeyRef: - name: staging-neo4j - key: NEO4J_URI - image: humanconnection/nitro-backend:latest - name: nitro-backend - ports: - - containerPort: 4000 - resources: {} - imagePullPolicy: Always - restartPolicy: Always - terminationGracePeriodSeconds: 30 -status: {} + template: + metadata: + labels: + workload.user.cattle.io/workloadselector: deployment-staging-backend + name: "nitro-backend" + spec: + containers: + - env: + - name: COMMIT + value: + - name: MOCK + value: "false" + - name: CLIENT_URI + valueFrom: + configMapKeyRef: + name: staging-web + key: CLIENT_URI + - name: GRAPHQL_PORT + valueFrom: + configMapKeyRef: + name: staging-backend + key: GRAPHQL_PORT + - name: GRAPHQL_URI + valueFrom: + configMapKeyRef: + name: staging-backend + key: GRAPHQL_URI + - name: MAPBOX_TOKEN + valueFrom: + configMapKeyRef: + name: staging-web + key: MAPBOX_TOKEN + - name: JWT_SECRET + valueFrom: + secretKeyRef: + name: staging + key: JWT_SECRET + optional: false + - name: NEO4J_URI + valueFrom: + configMapKeyRef: + name: staging-neo4j + key: NEO4J_URI + image: humanconnection/nitro-backend:latest + name: nitro-backend + ports: + - containerPort: 4000 + resources: {} + imagePullPolicy: Always + volumeMounts: + - mountPath: /nitro-backend/public/uploads + name: uploads + volumes: + - name: uploads + persistentVolumeClaim: + claimName: uploads-claim + restartPolicy: Always + terminationGracePeriodSeconds: 30 + status: {} +--- + kind: PersistentVolumeClaim + apiVersion: v1 + metadata: + name: uploads-claim + namespace: staging + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 10Gi From d876a3f442b12118dce20a974bb36f8cfbe17993 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Robert=20Sch=C3=A4fer?= Date: Mon, 4 Feb 2019 20:44:07 +0100 Subject: [PATCH 26/55] Multiple container per pod setup --- staging/deployment-backend.yaml | 29 +++++++++++++++++------ staging/deployment-neo4j.yaml | 41 ++++++++++++++++++++++++++++----- 2 files changed, 57 insertions(+), 13 deletions(-) diff --git a/staging/deployment-backend.yaml b/staging/deployment-backend.yaml index b3f329ba8..da3c4f148 100644 --- a/staging/deployment-backend.yaml +++ b/staging/deployment-backend.yaml @@ -18,7 +18,24 @@ name: "nitro-backend" spec: containers: - - env: + - name: nitro-db-migration-worker + image: humanconnection/db-migration-worker:latest + imagePullPolicy: Always + envFrom: + - configMapRef: + name: db-migration-worker + volumeMounts: + - name: secret-volume + readOnly: false + mountPath: /root/.ssh + - name: uploads + mountPath: /uploads/ + - name: nitro-backend + image: humanconnection/nitro-backend:latest + imagePullPolicy: Always + ports: + - containerPort: 4000 + env: - name: COMMIT value: - name: MOCK @@ -54,16 +71,14 @@ configMapKeyRef: name: staging-neo4j key: NEO4J_URI - image: humanconnection/nitro-backend:latest - name: nitro-backend - ports: - - containerPort: 4000 - resources: {} - imagePullPolicy: Always volumeMounts: - mountPath: /nitro-backend/public/uploads name: uploads volumes: + - name: secret-volume + secret: + secretName: ssh-keys + defaultMode: 0400 - name: uploads persistentVolumeClaim: claimName: uploads-claim diff --git a/staging/deployment-neo4j.yaml b/staging/deployment-neo4j.yaml index 58ca7f24b..957696341 100644 --- a/staging/deployment-neo4j.yaml +++ b/staging/deployment-neo4j.yaml @@ -17,7 +17,25 @@ name: nitro-neo4j spec: containers: - - env: + - name: nitro-db-migration-worker + image: humanconnection/db-migration-worker:latest + imagePullPolicy: Always + envFrom: + - configMapRef: + name: db-migration-worker + env: + - name: COMMIT + value: + volumeMounts: + - name: secret-volume + readOnly: false + mountPath: /root/.ssh + - name: mongo-export + mountPath: /mongo-export/ + - name: nitro-neo4j + image: humanconnection/neo4j:latest + imagePullPolicy: Always + env: - name: COMMIT value: - name: NEO4J_apoc_import_file_enabled @@ -43,19 +61,19 @@ configMapKeyRef: name: staging-neo4j key: NEO4J_AUTH - image: humanconnection/neo4j:latest - name: nitro-neo4j ports: - containerPort: 7687 - containerPort: 7474 - resources: {} - imagePullPolicy: Always volumeMounts: - mountPath: /data/ name: neo4j-data - mountPath: /mongo-export/ name: mongo-export volumes: + - name: secret-volume + secret: + secretName: ssh-keys + defaultMode: 0400 - name: mongo-export persistentVolumeClaim: claimName: mongo-export-claim @@ -64,7 +82,6 @@ claimName: neo4j-data-claim restartPolicy: Always terminationGracePeriodSeconds: 30 - status: {} --- kind: PersistentVolumeClaim apiVersion: v1 @@ -77,3 +94,15 @@ resources: requests: storage: 4Gi +--- + kind: PersistentVolumeClaim + apiVersion: v1 + metadata: + name: mongo-export-claim + namespace: staging + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi From 1fbec5f52538316bf6b6247ea4812b2b0e94ed83 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Robert=20Sch=C3=A4fer?= Date: Mon, 4 Feb 2019 22:00:53 +0100 Subject: [PATCH 27/55] Set NEO4J_URI to localhost Apparently, if you have a mult-container setup, the same pod is accessible via localhost. --- staging/deployment-neo4j.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/staging/deployment-neo4j.yaml b/staging/deployment-neo4j.yaml index 957696341..720246568 100644 --- a/staging/deployment-neo4j.yaml +++ b/staging/deployment-neo4j.yaml @@ -26,6 +26,8 @@ env: - name: COMMIT value: + - name: NEO4J_URI + value: bolt://localhost:7687 volumeMounts: - name: secret-volume readOnly: false From 8c6bc72bd2232685a1e746dff4e7474ab8c68888 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Robert=20Sch=C3=A4fer?= Date: Mon, 4 Feb 2019 22:05:24 +0100 Subject: [PATCH 28/55] Update README --- README.md | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/README.md b/README.md index 6c8227f59..a61074503 100644 --- a/README.md +++ b/README.md @@ -98,18 +98,7 @@ $ kubectl create -f namespace-staging.yaml ``` Switch to the namespace `staging` in your kubernetes dashboard. -### Run the configuration -```shell -$ kubectl apply -f staging/ -``` - -This can take a while because kubernetes will download the docker images. -Sit back and relax and have a look into your kubernetes dashboard. -Wait until all pods turn green and they don't show a warning -`Waiting: ContainerCreating` anymore. - - -### Migrate database of Human Connection legacy server +### Prepare migration of Human Connection legacy server Create a configmap with the specific connection data of your legacy server: ```sh $ kubectl create configmap db-migration-worker \ @@ -120,7 +109,9 @@ $ kubectl create configmap db-migration-worker \ --from-literal=MONGODB_PASSWORD=secretpassword \ --from-literal=MONGODB_AUTH_DB=hc_api \ --from-literal=MONGODB_DATABASE=hc_api \ - --from-literal=UPLOADS_DIRECTORY=/var/www/api/uploads + --from-literal=UPLOADS_DIRECTORY=/var/www/api/uploads \ + --from-literal=NEO4J_URI= \ + ``` Create a secret with your public and private ssh keys: ```sh @@ -135,12 +126,21 @@ points out, you should be careful with your ssh keys. Anyone with access to your cluster will have access to your ssh keys. Better create a new pair with `ssh-keygen` and copy the public key to your legacy server with `ssh-copy-id`. -Create the pod and the required volume: -```sh -$ kubectl apply -f db-migration-worker.yaml +### Run the configuration +```shell +$ kubectl apply -f staging/ ``` + +This can take a while because kubernetes will download the docker images. +Sit back and relax and have a look into your kubernetes dashboard. +Wait until all pods turn green and they don't show a warning +`Waiting: ContainerCreating` anymore. + +### Migrate legacy database Run the migration: ```shell +$ kubectl --namespace=staging get pods # change below -$ kubectl --namespace=staging exec -it nitro-db-migration-worker ./import.sh +$ kubectl --namespace=staging exec -it nitro-neo4j-65bbdb597c-nc2lv migrate +$ kubectl --namespace=staging exec -it nitro-backend-c6cc5ff69-8h96z sync_uploads ``` From 99262a0d4dd70199a098d20205eee91ae2e4f9c4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Robert=20Sch=C3=A4fer?= Date: Tue, 5 Feb 2019 17:27:59 +0100 Subject: [PATCH 29/55] Fix README --- README.md | 18 +++++++++--------- staging/deployment-web.yaml | 4 ++-- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/README.md b/README.md index a61074503..201724265 100644 --- a/README.md +++ b/README.md @@ -101,16 +101,16 @@ Switch to the namespace `staging` in your kubernetes dashboard. ### Prepare migration of Human Connection legacy server Create a configmap with the specific connection data of your legacy server: ```sh -$ kubectl create configmap db-migration-worker \ - --namespace=staging \ - --from-literal=SSH_USERNAME=someuser \ - --from-literal=SSH_HOST=yourhost \ - --from-literal=MONGODB_USERNAME=hc-api \ - --from-literal=MONGODB_PASSWORD=secretpassword \ - --from-literal=MONGODB_AUTH_DB=hc_api \ - --from-literal=MONGODB_DATABASE=hc_api \ +$ kubectl create configmap db-migration-worker \ + --namespace=staging \ + --from-literal=SSH_USERNAME=someuser \ + --from-literal=SSH_HOST=yourhost \ + --from-literal=MONGODB_USERNAME=hc-api \ + --from-literal=MONGODB_PASSWORD=secretpassword \ + --from-literal=MONGODB_AUTH_DB=hc_api \ + --from-literal=MONGODB_DATABASE=hc_api \ --from-literal=UPLOADS_DIRECTORY=/var/www/api/uploads \ - --from-literal=NEO4J_URI= \ + --from-literal=NEO4J_URI=bolt://neo4j:7687 ``` Create a secret with your public and private ssh keys: diff --git a/staging/deployment-web.yaml b/staging/deployment-web.yaml index 48e99b9c2..de9651528 100644 --- a/staging/deployment-web.yaml +++ b/staging/deployment-web.yaml @@ -17,7 +17,8 @@ spec: name: nitro-web spec: containers: - - env: + - name: web + env: - name: COMMIT value: - name: HOST @@ -39,7 +40,6 @@ spec: key: JWT_SECRET optional: false image: humanconnection/nitro-web:latest - name: web ports: - containerPort: 3000 resources: {} From e1e457abaed92659b52e376d634d5e7edd36c9c1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Robert=20Sch=C3=A4fer?= Date: Tue, 5 Feb 2019 17:44:30 +0100 Subject: [PATCH 30/55] Cleanly separate legacy migration part Use `kubectl patch -f ...` --- README.md | 36 ++++++++++++----- legacy-migration/deployment-backend.yaml | 27 +++++++++++++ legacy-migration/deployment-neo4j.yaml | 39 +++++++++++++++++++ .../volume-claim-mongo-export.yaml | 12 ++++++ staging/deployment-backend.yaml | 16 -------- staging/deployment-neo4j.yaml | 38 ------------------ 6 files changed, 104 insertions(+), 64 deletions(-) create mode 100644 legacy-migration/deployment-backend.yaml create mode 100644 legacy-migration/deployment-neo4j.yaml create mode 100644 legacy-migration/volume-claim-mongo-export.yaml diff --git a/README.md b/README.md index 201724265..6ab975a07 100644 --- a/README.md +++ b/README.md @@ -98,7 +98,24 @@ $ kubectl create -f namespace-staging.yaml ``` Switch to the namespace `staging` in your kubernetes dashboard. -### Prepare migration of Human Connection legacy server + +### Run the configuration +```shell +$ kubectl apply -f staging/ +``` + +This can take a while because kubernetes will download the docker images. +Sit back and relax and have a look into your kubernetes dashboard. +Wait until all pods turn green and they don't show a warning +`Waiting: ContainerCreating` anymore. + +#### Legacy data migration + +This setup is completely optional and only required if you have data on a server +which is running our legacy code and you want to import that data. It will +import the uploads folder and migrate a dump of mongodb into neo4j. + +##### Prepare migration of Human Connection legacy server Create a configmap with the specific connection data of your legacy server: ```sh $ kubectl create configmap db-migration-worker \ @@ -126,17 +143,16 @@ points out, you should be careful with your ssh keys. Anyone with access to your cluster will have access to your ssh keys. Better create a new pair with `ssh-keygen` and copy the public key to your legacy server with `ssh-copy-id`. -### Run the configuration -```shell -$ kubectl apply -f staging/ +##### Migrate legacy database +Patch the existing deployments to use a multi-container setup: +```bash +cd legacy-migration +kubectl apply -f volume-claim-mongo-export.yaml +kubectl patch --namespace=staging deployment nitro-backend --patch "$(cat deployment-backend.yaml)" +kubectl patch --namespace=staging deployment nitro-neo4j --patch "$(cat deployment-neo4j.yaml)" +cd .. ``` -This can take a while because kubernetes will download the docker images. -Sit back and relax and have a look into your kubernetes dashboard. -Wait until all pods turn green and they don't show a warning -`Waiting: ContainerCreating` anymore. - -### Migrate legacy database Run the migration: ```shell $ kubectl --namespace=staging get pods diff --git a/legacy-migration/deployment-backend.yaml b/legacy-migration/deployment-backend.yaml new file mode 100644 index 000000000..e29730cae --- /dev/null +++ b/legacy-migration/deployment-backend.yaml @@ -0,0 +1,27 @@ +--- + apiVersion: extensions/v1beta1 + kind: Deployment + metadata: + name: nitro-backend + namespace: staging + spec: + template: + spec: + containers: + - name: nitro-db-migration-worker + image: humanconnection/db-migration-worker:latest + imagePullPolicy: Always + envFrom: + - configMapRef: + name: db-migration-worker + volumeMounts: + - name: secret-volume + readOnly: false + mountPath: /root/.ssh + - name: uploads + mountPath: /uploads/ + volumes: + - name: secret-volume + secret: + secretName: ssh-keys + defaultMode: 0400 diff --git a/legacy-migration/deployment-neo4j.yaml b/legacy-migration/deployment-neo4j.yaml new file mode 100644 index 000000000..887c02f3a --- /dev/null +++ b/legacy-migration/deployment-neo4j.yaml @@ -0,0 +1,39 @@ +--- + apiVersion: extensions/v1beta1 + kind: Deployment + metadata: + name: nitro-neo4j + namespace: staging + spec: + template: + spec: + containers: + - name: nitro-db-migration-worker + image: humanconnection/db-migration-worker:latest + imagePullPolicy: Always + envFrom: + - configMapRef: + name: db-migration-worker + env: + - name: COMMIT + value: + - name: NEO4J_URI + value: bolt://localhost:7687 + volumeMounts: + - name: secret-volume + readOnly: false + mountPath: /root/.ssh + - name: mongo-export + mountPath: /mongo-export/ + - name: nitro-neo4j + volumeMounts: + - mountPath: /mongo-export/ + name: mongo-export + volumes: + - name: secret-volume + secret: + secretName: ssh-keys + defaultMode: 0400 + - name: mongo-export + persistentVolumeClaim: + claimName: mongo-export-claim diff --git a/legacy-migration/volume-claim-mongo-export.yaml b/legacy-migration/volume-claim-mongo-export.yaml new file mode 100644 index 000000000..563a9cfe6 --- /dev/null +++ b/legacy-migration/volume-claim-mongo-export.yaml @@ -0,0 +1,12 @@ +--- + kind: PersistentVolumeClaim + apiVersion: v1 + metadata: + name: mongo-export-claim + namespace: staging + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi diff --git a/staging/deployment-backend.yaml b/staging/deployment-backend.yaml index da3c4f148..4c2832a71 100644 --- a/staging/deployment-backend.yaml +++ b/staging/deployment-backend.yaml @@ -18,18 +18,6 @@ name: "nitro-backend" spec: containers: - - name: nitro-db-migration-worker - image: humanconnection/db-migration-worker:latest - imagePullPolicy: Always - envFrom: - - configMapRef: - name: db-migration-worker - volumeMounts: - - name: secret-volume - readOnly: false - mountPath: /root/.ssh - - name: uploads - mountPath: /uploads/ - name: nitro-backend image: humanconnection/nitro-backend:latest imagePullPolicy: Always @@ -75,10 +63,6 @@ - mountPath: /nitro-backend/public/uploads name: uploads volumes: - - name: secret-volume - secret: - secretName: ssh-keys - defaultMode: 0400 - name: uploads persistentVolumeClaim: claimName: uploads-claim diff --git a/staging/deployment-neo4j.yaml b/staging/deployment-neo4j.yaml index 720246568..d9aeab542 100644 --- a/staging/deployment-neo4j.yaml +++ b/staging/deployment-neo4j.yaml @@ -17,23 +17,6 @@ name: nitro-neo4j spec: containers: - - name: nitro-db-migration-worker - image: humanconnection/db-migration-worker:latest - imagePullPolicy: Always - envFrom: - - configMapRef: - name: db-migration-worker - env: - - name: COMMIT - value: - - name: NEO4J_URI - value: bolt://localhost:7687 - volumeMounts: - - name: secret-volume - readOnly: false - mountPath: /root/.ssh - - name: mongo-export - mountPath: /mongo-export/ - name: nitro-neo4j image: humanconnection/neo4j:latest imagePullPolicy: Always @@ -69,16 +52,7 @@ volumeMounts: - mountPath: /data/ name: neo4j-data - - mountPath: /mongo-export/ - name: mongo-export volumes: - - name: secret-volume - secret: - secretName: ssh-keys - defaultMode: 0400 - - name: mongo-export - persistentVolumeClaim: - claimName: mongo-export-claim - name: neo4j-data persistentVolumeClaim: claimName: neo4j-data-claim @@ -96,15 +70,3 @@ resources: requests: storage: 4Gi ---- - kind: PersistentVolumeClaim - apiVersion: v1 - metadata: - name: mongo-export-claim - namespace: staging - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 1Gi From 3bbe1f1471281bec1d81d827d44921e543928b47 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Robert=20Sch=C3=A4fer?= Date: Thu, 7 Feb 2019 17:33:20 +0100 Subject: [PATCH 31/55] Create ingress server and add documentation --- README.md | 15 +++++++++++++++ staging/ingress.yaml | 13 +++++++++++++ staging/service-web.yaml | 2 -- 3 files changed, 28 insertions(+), 2 deletions(-) create mode 100644 staging/ingress.yaml diff --git a/README.md b/README.md index 6ab975a07..572ecbc43 100644 --- a/README.md +++ b/README.md @@ -109,6 +109,21 @@ Sit back and relax and have a look into your kubernetes dashboard. Wait until all pods turn green and they don't show a warning `Waiting: ContainerCreating` anymore. +### Setup Loadbalancer and Ingress + +Basically follow [this tutorial](https://www.digitalocean.com/community/tutorials/how-to-set-up-an-nginx-ingress-with-cert-manager-on-digitalocean-kubernetes). + +tl;dr: +```sh +$ kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/mandatory.yaml +$ kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/provider/cloud-generic.yaml +``` +And create an ingress service in namespace `staging`: +```sh +# you should change the domain name according to your needs +$ kubectl apply -f staging/ingress.yaml +``` + #### Legacy data migration This setup is completely optional and only required if you have data on a server diff --git a/staging/ingress.yaml b/staging/ingress.yaml new file mode 100644 index 000000000..fa23a9957 --- /dev/null +++ b/staging/ingress.yaml @@ -0,0 +1,13 @@ +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: ingress + namespace: staging +spec: + rules: + - host: master.nitro.human-connection.org + http: + paths: + - backend: + serviceName: nitro-web + servicePort: 3000 diff --git a/staging/service-web.yaml b/staging/service-web.yaml index ad2b9678b..333983b72 100644 --- a/staging/service-web.yaml +++ b/staging/service-web.yaml @@ -12,5 +12,3 @@ spec: targetPort: 3000 selector: workload.user.cattle.io/workloadselector: deployment-staging-web - type: LoadBalancer - externalTrafficPolicy: Cluster From 066adb9b6edf2ddedb4466012fdbd5c9e4c83e41 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Robert=20Sch=C3=A4fer?= Date: Thu, 7 Feb 2019 23:14:01 +0100 Subject: [PATCH 32/55] Bulk rename namespace staging->`human-connection` --- README.md | 30 +++++++++---------- db-migration-worker.yaml | 4 +-- {staging => human-connection}/configmaps.yaml | 18 +++++------ .../deployment-backend.yaml | 20 ++++++------- .../deployment-neo4j.yaml | 14 ++++----- .../deployment-web.yaml | 12 ++++---- {staging => human-connection}/ingress.yaml | 2 +- human-connection/service-backend.yaml | 14 +++++++++ .../service-neo4j.yaml | 6 ++-- human-connection/service-web.yaml | 14 +++++++++ legacy-migration/deployment-backend.yaml | 2 +- legacy-migration/deployment-neo4j.yaml | 2 +- .../volume-claim-mongo-export.yaml | 2 +- namespace-human-connection.yaml | 6 ++++ namespace-staging.yaml | 6 ---- secrets.template.yaml | 4 +-- staging/service-backend.yaml | 14 --------- staging/service-web.yaml | 14 --------- 18 files changed, 92 insertions(+), 92 deletions(-) rename {staging => human-connection}/configmaps.yaml (50%) rename {staging => human-connection}/deployment-backend.yaml (79%) rename {staging => human-connection}/deployment-neo4j.yaml (82%) rename {staging => human-connection}/deployment-web.yaml (76%) rename {staging => human-connection}/ingress.yaml (88%) create mode 100644 human-connection/service-backend.yaml rename {staging => human-connection}/service-neo4j.yaml (53%) create mode 100644 human-connection/service-web.yaml create mode 100644 namespace-human-connection.yaml delete mode 100644 namespace-staging.yaml delete mode 100644 staging/service-backend.yaml delete mode 100644 staging/service-web.yaml diff --git a/README.md b/README.md index 572ecbc43..6cb31bfec 100644 --- a/README.md +++ b/README.md @@ -28,7 +28,7 @@ If all the pods and services have settled and everything looks green in your minikube dashboard, expose the `nitro-web` service on your host system with: ```shell -$ minikube service nitro-web --namespace=staging +$ minikube service nitro-web --namespace=human-connection ``` ## Digital Ocean @@ -72,14 +72,14 @@ own setup. #### Setup config maps ```shell -$ cp configmap-db-migration-worker.template.yaml staging/configmap-db-migration-worker.yaml +$ cp configmap-db-migration-worker.template.yaml human-connection/configmap-db-migration-worker.yaml ``` Edit all variables according to the setup of the remote legacy server. #### Setup secrets and deploy themn ```sh -$ cp secrets.template.yaml staging/secrets.yaml +$ cp secrets.template.yaml human-connection/secrets.yaml ``` Change all secrets as needed. @@ -94,14 +94,14 @@ Those secrets get `base64` decoded in a kubernetes pod. #### Create a namespace locally ```shell -$ kubectl create -f namespace-staging.yaml +$ kubectl create -f namespace-human-connection.yaml ``` -Switch to the namespace `staging` in your kubernetes dashboard. +Switch to the namespace `human-connection` in your kubernetes dashboard. ### Run the configuration ```shell -$ kubectl apply -f staging/ +$ kubectl apply -f human-connection/ ``` This can take a while because kubernetes will download the docker images. @@ -118,10 +118,10 @@ tl;dr: $ kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/mandatory.yaml $ kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/provider/cloud-generic.yaml ``` -And create an ingress service in namespace `staging`: +And create an ingress service in namespace `human-connection`: ```sh # you should change the domain name according to your needs -$ kubectl apply -f staging/ingress.yaml +$ kubectl apply -f human-connection/ingress.yaml ``` #### Legacy data migration @@ -134,7 +134,7 @@ import the uploads folder and migrate a dump of mongodb into neo4j. Create a configmap with the specific connection data of your legacy server: ```sh $ kubectl create configmap db-migration-worker \ - --namespace=staging \ + --namespace=human-connection \ --from-literal=SSH_USERNAME=someuser \ --from-literal=SSH_HOST=yourhost \ --from-literal=MONGODB_USERNAME=hc-api \ @@ -148,7 +148,7 @@ $ kubectl create configmap db-migration-worker \ Create a secret with your public and private ssh keys: ```sh $ kubectl create secret generic ssh-keys \ - --namespace=staging \ + --namespace=human-connection \ --from-file=id_rsa=/path/to/.ssh/id_rsa \ --from-file=id_rsa.pub=/path/to/.ssh/id_rsa.pub \ --from-file=known_hosts=/path/to/.ssh/known_hosts @@ -163,15 +163,15 @@ Patch the existing deployments to use a multi-container setup: ```bash cd legacy-migration kubectl apply -f volume-claim-mongo-export.yaml -kubectl patch --namespace=staging deployment nitro-backend --patch "$(cat deployment-backend.yaml)" -kubectl patch --namespace=staging deployment nitro-neo4j --patch "$(cat deployment-neo4j.yaml)" +kubectl patch --namespace=human-connection deployment nitro-backend --patch "$(cat deployment-backend.yaml)" +kubectl patch --namespace=human-connection deployment nitro-neo4j --patch "$(cat deployment-neo4j.yaml)" cd .. ``` Run the migration: ```shell -$ kubectl --namespace=staging get pods +$ kubectl --namespace=human-connection get pods # change below -$ kubectl --namespace=staging exec -it nitro-neo4j-65bbdb597c-nc2lv migrate -$ kubectl --namespace=staging exec -it nitro-backend-c6cc5ff69-8h96z sync_uploads +$ kubectl --namespace=human-connection exec -it nitro-neo4j-65bbdb597c-nc2lv migrate +$ kubectl --namespace=human-connection exec -it nitro-backend-c6cc5ff69-8h96z sync_uploads ``` diff --git a/db-migration-worker.yaml b/db-migration-worker.yaml index e0b520e58..55743e360 100644 --- a/db-migration-worker.yaml +++ b/db-migration-worker.yaml @@ -3,7 +3,7 @@ apiVersion: v1 metadata: name: nitro-db-migration-worker - namespace: staging + namespace: human-connection spec: volumes: - name: secret-volume @@ -30,7 +30,7 @@ apiVersion: v1 metadata: name: mongo-export-claim - namespace: staging + namespace: human-connection spec: accessModes: - ReadWriteOnce diff --git a/staging/configmaps.yaml b/human-connection/configmaps.yaml similarity index 50% rename from staging/configmaps.yaml rename to human-connection/configmaps.yaml index c07353141..6c836c220 100644 --- a/staging/configmaps.yaml +++ b/human-connection/configmaps.yaml @@ -3,27 +3,27 @@ kind: ConfigMap data: GRAPHQL_PORT: "4000" - GRAPHQL_URI: "http://nitro-backend.staging:4000" + GRAPHQL_URI: "http://nitro-backend.human-connection:4000" MOCK: "false" metadata: - name: staging-backend - namespace: staging + name: human-connection-backend + namespace: human-connection --- apiVersion: v1 kind: ConfigMap data: - NEO4J_URI: "bolt://nitro-neo4j.staging:7687" + NEO4J_URI: "bolt://nitro-neo4j.human-connection:7687" NEO4J_USER: "neo4j" NEO4J_AUTH: none metadata: - name: staging-neo4j - namespace: staging + name: human-connection-neo4j + namespace: human-connection --- apiVersion: v1 kind: ConfigMap data: - CLIENT_URI: "https://nitro-staging.human-connection.org" + CLIENT_URI: "https://nitro-human-connection.human-connection.org" MAPBOX_TOKEN: pk.eyJ1IjoiaHVtYW4tY29ubmVjdGlvbiIsImEiOiJjajl0cnBubGoweTVlM3VwZ2lzNTNud3ZtIn0.KZ8KK9l70omjXbEkkbHGsQ metadata: - name: staging-web - namespace: staging + name: human-connection-web + namespace: human-connection diff --git a/staging/deployment-backend.yaml b/human-connection/deployment-backend.yaml similarity index 79% rename from staging/deployment-backend.yaml rename to human-connection/deployment-backend.yaml index 4c2832a71..ac1f31ed4 100644 --- a/staging/deployment-backend.yaml +++ b/human-connection/deployment-backend.yaml @@ -3,18 +3,18 @@ kind: Deployment metadata: name: nitro-backend - namespace: staging + namespace: human-connection spec: replicas: 2 minReadySeconds: 15 progressDeadlineSeconds: 60 selector: matchLabels: - workload.user.cattle.io/workloadselector: deployment-staging-backend + human-connection.org/selector: deployment-human-connection-backend template: metadata: labels: - workload.user.cattle.io/workloadselector: deployment-staging-backend + human-connection.org/selector: deployment-human-connection-backend name: "nitro-backend" spec: containers: @@ -31,33 +31,33 @@ - name: CLIENT_URI valueFrom: configMapKeyRef: - name: staging-web + name: human-connection-web key: CLIENT_URI - name: GRAPHQL_PORT valueFrom: configMapKeyRef: - name: staging-backend + name: human-connection-backend key: GRAPHQL_PORT - name: GRAPHQL_URI valueFrom: configMapKeyRef: - name: staging-backend + name: human-connection-backend key: GRAPHQL_URI - name: MAPBOX_TOKEN valueFrom: configMapKeyRef: - name: staging-web + name: human-connection-web key: MAPBOX_TOKEN - name: JWT_SECRET valueFrom: secretKeyRef: - name: staging + name: human-connection key: JWT_SECRET optional: false - name: NEO4J_URI valueFrom: configMapKeyRef: - name: staging-neo4j + name: human-connection-neo4j key: NEO4J_URI volumeMounts: - mountPath: /nitro-backend/public/uploads @@ -74,7 +74,7 @@ apiVersion: v1 metadata: name: uploads-claim - namespace: staging + namespace: human-connection spec: accessModes: - ReadWriteOnce diff --git a/staging/deployment-neo4j.yaml b/human-connection/deployment-neo4j.yaml similarity index 82% rename from staging/deployment-neo4j.yaml rename to human-connection/deployment-neo4j.yaml index d9aeab542..c84431bb5 100644 --- a/staging/deployment-neo4j.yaml +++ b/human-connection/deployment-neo4j.yaml @@ -3,17 +3,17 @@ kind: Deployment metadata: name: nitro-neo4j - namespace: staging + namespace: human-connection spec: replicas: 1 strategy: {} selector: matchLabels: - workload.user.cattle.io/workloadselector: deployment-staging-neo4j + human-connection.org/selector: deployment-human-connection-neo4j template: metadata: labels: - workload.user.cattle.io/workloadselector: deployment-staging-neo4j + human-connection.org/selector: deployment-human-connection-neo4j name: nitro-neo4j spec: containers: @@ -34,17 +34,17 @@ - name: NEO4J_URI valueFrom: configMapKeyRef: - name: staging-neo4j + name: human-connection-neo4j key: NEO4J_URI - name: NEO4J_USER valueFrom: configMapKeyRef: - name: staging-neo4j + name: human-connection-neo4j key: NEO4J_USER - name: NEO4J_AUTH valueFrom: configMapKeyRef: - name: staging-neo4j + name: human-connection-neo4j key: NEO4J_AUTH ports: - containerPort: 7687 @@ -63,7 +63,7 @@ apiVersion: v1 metadata: name: neo4j-data-claim - namespace: staging + namespace: human-connection spec: accessModes: - ReadWriteOnce diff --git a/staging/deployment-web.yaml b/human-connection/deployment-web.yaml similarity index 76% rename from staging/deployment-web.yaml rename to human-connection/deployment-web.yaml index de9651528..769b44469 100644 --- a/staging/deployment-web.yaml +++ b/human-connection/deployment-web.yaml @@ -2,18 +2,18 @@ apiVersion: extensions/v1beta1 kind: Deployment metadata: name: nitro-web - namespace: staging + namespace: human-connection spec: replicas: 2 minReadySeconds: 15 progressDeadlineSeconds: 60 selector: matchLabels: - workload.user.cattle.io/workloadselector: deployment-staging-web + human-connection.org/selector: deployment-human-connection-web template: metadata: labels: - workload.user.cattle.io/workloadselector: deployment-staging-web + human-connection.org/selector: deployment-human-connection-web name: nitro-web spec: containers: @@ -26,17 +26,17 @@ spec: - name: BACKEND_URL valueFrom: configMapKeyRef: - name: staging-backend + name: human-connection-backend key: GRAPHQL_URI - name: MAPBOX_TOKEN valueFrom: configMapKeyRef: - name: staging-web + name: human-connection-web key: MAPBOX_TOKEN - name: JWT_SECRET valueFrom: secretKeyRef: - name: staging + name: human-connection key: JWT_SECRET optional: false image: humanconnection/nitro-web:latest diff --git a/staging/ingress.yaml b/human-connection/ingress.yaml similarity index 88% rename from staging/ingress.yaml rename to human-connection/ingress.yaml index fa23a9957..b6028b9aa 100644 --- a/staging/ingress.yaml +++ b/human-connection/ingress.yaml @@ -2,7 +2,7 @@ apiVersion: extensions/v1beta1 kind: Ingress metadata: name: ingress - namespace: staging + namespace: human-connection spec: rules: - host: master.nitro.human-connection.org diff --git a/human-connection/service-backend.yaml b/human-connection/service-backend.yaml new file mode 100644 index 000000000..52e4621b2 --- /dev/null +++ b/human-connection/service-backend.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Service +metadata: + name: nitro-backend + namespace: human-connection + labels: + human-connection.org/selector: deployment-human-connection-backend +spec: + ports: + - name: web + port: 4000 + targetPort: 4000 + selector: + human-connection.org/selector: deployment-human-connection-backend diff --git a/staging/service-neo4j.yaml b/human-connection/service-neo4j.yaml similarity index 53% rename from staging/service-neo4j.yaml rename to human-connection/service-neo4j.yaml index d6c7a95b4..ebe7c5208 100644 --- a/staging/service-neo4j.yaml +++ b/human-connection/service-neo4j.yaml @@ -2,9 +2,9 @@ apiVersion: v1 kind: Service metadata: name: nitro-neo4j - namespace: staging + namespace: human-connection labels: - workload.user.cattle.io/workloadselector: deployment-staging-neo4j + human-connection.org/selector: deployment-human-connection-neo4j spec: ports: - name: bolt @@ -14,4 +14,4 @@ spec: port: 7474 targetPort: 7474 selector: - workload.user.cattle.io/workloadselector: deployment-staging-neo4j + human-connection.org/selector: deployment-human-connection-neo4j diff --git a/human-connection/service-web.yaml b/human-connection/service-web.yaml new file mode 100644 index 000000000..548b874c2 --- /dev/null +++ b/human-connection/service-web.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Service +metadata: + name: nitro-web + namespace: human-connection + labels: + human-connection.org/selector: deployment-human-connection-web +spec: + ports: + - name: web + port: 3000 + targetPort: 3000 + selector: + human-connection.org/selector: deployment-human-connection-web diff --git a/legacy-migration/deployment-backend.yaml b/legacy-migration/deployment-backend.yaml index e29730cae..1adeb0665 100644 --- a/legacy-migration/deployment-backend.yaml +++ b/legacy-migration/deployment-backend.yaml @@ -3,7 +3,7 @@ kind: Deployment metadata: name: nitro-backend - namespace: staging + namespace: human-connection spec: template: spec: diff --git a/legacy-migration/deployment-neo4j.yaml b/legacy-migration/deployment-neo4j.yaml index 887c02f3a..2852b90cb 100644 --- a/legacy-migration/deployment-neo4j.yaml +++ b/legacy-migration/deployment-neo4j.yaml @@ -3,7 +3,7 @@ kind: Deployment metadata: name: nitro-neo4j - namespace: staging + namespace: human-connection spec: template: spec: diff --git a/legacy-migration/volume-claim-mongo-export.yaml b/legacy-migration/volume-claim-mongo-export.yaml index 563a9cfe6..106ef4736 100644 --- a/legacy-migration/volume-claim-mongo-export.yaml +++ b/legacy-migration/volume-claim-mongo-export.yaml @@ -3,7 +3,7 @@ apiVersion: v1 metadata: name: mongo-export-claim - namespace: staging + namespace: human-connection spec: accessModes: - ReadWriteOnce diff --git a/namespace-human-connection.yaml b/namespace-human-connection.yaml new file mode 100644 index 000000000..0710da55b --- /dev/null +++ b/namespace-human-connection.yaml @@ -0,0 +1,6 @@ +kind: Namespace +apiVersion: v1 +metadata: + name: human-connection + labels: + name: human-connection diff --git a/namespace-staging.yaml b/namespace-staging.yaml deleted file mode 100644 index d63b4e0f9..000000000 --- a/namespace-staging.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: Namespace -apiVersion: v1 -metadata: - name: staging - labels: - name: staging diff --git a/secrets.template.yaml b/secrets.template.yaml index 755cd2d06..915a31be5 100644 --- a/secrets.template.yaml +++ b/secrets.template.yaml @@ -4,5 +4,5 @@ data: JWT_SECRET: "Yi8mJjdiNzhCRiZmdi9WZA==" MONGODB_PASSWORD: "TU9OR09EQl9QQVNTV09SRA==" metadata: - name: staging - namespace: staging + name: human-connection + namespace: human-connection diff --git a/staging/service-backend.yaml b/staging/service-backend.yaml deleted file mode 100644 index 39cfca63a..000000000 --- a/staging/service-backend.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: nitro-backend - namespace: staging - labels: - workload.user.cattle.io/workloadselector: deployment-staging-backend -spec: - ports: - - name: web - port: 4000 - targetPort: 4000 - selector: - workload.user.cattle.io/workloadselector: deployment-staging-backend diff --git a/staging/service-web.yaml b/staging/service-web.yaml deleted file mode 100644 index 333983b72..000000000 --- a/staging/service-web.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: nitro-web - namespace: staging - labels: - workload.user.cattle.io/workloadselector: deployment-staging-web -spec: - ports: - - name: web - port: 3000 - targetPort: 3000 - selector: - workload.user.cattle.io/workloadselector: deployment-staging-web From a25f806d0e27a8a60bf6542ad42576699e534dfe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Robert=20Sch=C3=A4fer?= Date: Thu, 7 Feb 2019 23:18:50 +0100 Subject: [PATCH 33/55] Add missing kubernetes dashboard deployment --- README.md | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index 6cb31bfec..e339d90d5 100644 --- a/README.md +++ b/README.md @@ -36,6 +36,8 @@ $ minikube service nitro-web --namespace=human-connection First, install kubernetes dashboard: ```sh $ kubectl apply -f dashboard/ +$ kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/master/aio/deploy/recommended/kubernetes-dashboard.yaml + ``` Proxy localhost to the remote kubernetes dashboard: ```sh @@ -70,13 +72,7 @@ Grab the token and paste it into the login screen at [http://localhost:8001/api/ You have to do some prerequisites e.g. change some secrets according to your own setup. -#### Setup config maps -```shell -$ cp configmap-db-migration-worker.template.yaml human-connection/configmap-db-migration-worker.yaml -``` -Edit all variables according to the setup of the remote legacy server. - -#### Setup secrets and deploy themn +### Edit secrets ```sh $ cp secrets.template.yaml human-connection/secrets.yaml @@ -92,7 +88,7 @@ YWRtaW4= ``` Those secrets get `base64` decoded in a kubernetes pod. -#### Create a namespace locally +### Create a namespace ```shell $ kubectl create -f namespace-human-connection.yaml ``` From 5d5db1ea47bc25893a97e898e7b181942bb02403 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Robert=20Sch=C3=A4fer?= Date: Thu, 7 Feb 2019 23:41:27 +0100 Subject: [PATCH 34/55] Use single configmap for simplicity --- .../{configmaps.yaml => configmap.yaml} | 16 +--------------- human-connection/deployment-backend.yaml | 12 ++++++------ human-connection/deployment-neo4j.yaml | 6 +++--- human-connection/deployment-web.yaml | 6 +++--- 4 files changed, 13 insertions(+), 27 deletions(-) rename human-connection/{configmaps.yaml => configmap.yaml} (63%) diff --git a/human-connection/configmaps.yaml b/human-connection/configmap.yaml similarity index 63% rename from human-connection/configmaps.yaml rename to human-connection/configmap.yaml index 6c836c220..50ae17e23 100644 --- a/human-connection/configmaps.yaml +++ b/human-connection/configmap.yaml @@ -5,25 +5,11 @@ GRAPHQL_PORT: "4000" GRAPHQL_URI: "http://nitro-backend.human-connection:4000" MOCK: "false" - metadata: - name: human-connection-backend - namespace: human-connection ---- - apiVersion: v1 - kind: ConfigMap - data: NEO4J_URI: "bolt://nitro-neo4j.human-connection:7687" NEO4J_USER: "neo4j" NEO4J_AUTH: none - metadata: - name: human-connection-neo4j - namespace: human-connection ---- - apiVersion: v1 - kind: ConfigMap - data: CLIENT_URI: "https://nitro-human-connection.human-connection.org" MAPBOX_TOKEN: pk.eyJ1IjoiaHVtYW4tY29ubmVjdGlvbiIsImEiOiJjajl0cnBubGoweTVlM3VwZ2lzNTNud3ZtIn0.KZ8KK9l70omjXbEkkbHGsQ metadata: - name: human-connection-web + name: configmap namespace: human-connection diff --git a/human-connection/deployment-backend.yaml b/human-connection/deployment-backend.yaml index ac1f31ed4..49bb85f5c 100644 --- a/human-connection/deployment-backend.yaml +++ b/human-connection/deployment-backend.yaml @@ -31,33 +31,33 @@ - name: CLIENT_URI valueFrom: configMapKeyRef: - name: human-connection-web + name: configmap key: CLIENT_URI - name: GRAPHQL_PORT valueFrom: configMapKeyRef: - name: human-connection-backend + name: configmap key: GRAPHQL_PORT - name: GRAPHQL_URI valueFrom: configMapKeyRef: - name: human-connection-backend + name: configmap key: GRAPHQL_URI - name: MAPBOX_TOKEN valueFrom: configMapKeyRef: - name: human-connection-web + name: configmap key: MAPBOX_TOKEN - name: JWT_SECRET valueFrom: secretKeyRef: - name: human-connection + name: secret key: JWT_SECRET optional: false - name: NEO4J_URI valueFrom: configMapKeyRef: - name: human-connection-neo4j + name: configmap key: NEO4J_URI volumeMounts: - mountPath: /nitro-backend/public/uploads diff --git a/human-connection/deployment-neo4j.yaml b/human-connection/deployment-neo4j.yaml index c84431bb5..e3110cac2 100644 --- a/human-connection/deployment-neo4j.yaml +++ b/human-connection/deployment-neo4j.yaml @@ -34,17 +34,17 @@ - name: NEO4J_URI valueFrom: configMapKeyRef: - name: human-connection-neo4j + name: configmap key: NEO4J_URI - name: NEO4J_USER valueFrom: configMapKeyRef: - name: human-connection-neo4j + name: configmap key: NEO4J_USER - name: NEO4J_AUTH valueFrom: configMapKeyRef: - name: human-connection-neo4j + name: configmap key: NEO4J_AUTH ports: - containerPort: 7687 diff --git a/human-connection/deployment-web.yaml b/human-connection/deployment-web.yaml index 769b44469..a3dafe766 100644 --- a/human-connection/deployment-web.yaml +++ b/human-connection/deployment-web.yaml @@ -26,17 +26,17 @@ spec: - name: BACKEND_URL valueFrom: configMapKeyRef: - name: human-connection-backend + name: configmap key: GRAPHQL_URI - name: MAPBOX_TOKEN valueFrom: configMapKeyRef: - name: human-connection-web + name: configmap key: MAPBOX_TOKEN - name: JWT_SECRET valueFrom: secretKeyRef: - name: human-connection + name: secret key: JWT_SECRET optional: false image: humanconnection/nitro-web:latest From 9bb2361b18079761427693124fb1c98254a55bfb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Robert=20Sch=C3=A4fer?= Date: Fri, 8 Feb 2019 02:23:24 +0100 Subject: [PATCH 35/55] Unverified documentation of TLS setup --- README.md | 14 ++++++++++++++ certmanager/ingress.yaml | 22 ++++++++++++++++++++++ certmanager/issuer-production.yaml | 16 ++++++++++++++++ certmanager/issuer-staging.yaml | 16 ++++++++++++++++ human-connection/ingress.yaml | 13 ------------- 5 files changed, 68 insertions(+), 13 deletions(-) create mode 100644 certmanager/ingress.yaml create mode 100644 certmanager/issuer-production.yaml create mode 100644 certmanager/issuer-staging.yaml delete mode 100644 human-connection/ingress.yaml diff --git a/README.md b/README.md index e339d90d5..70438c754 100644 --- a/README.md +++ b/README.md @@ -120,6 +120,20 @@ And create an ingress service in namespace `human-connection`: $ kubectl apply -f human-connection/ingress.yaml ``` +#### Setup SSL + +Follow [this quick start guide](https://docs.cert-manager.io/en/latest/tutorials/acme/quick-start/index.html): +``` +$ kubectl create serviceaccount tiller --namespace=kube-system +$ kubectl create clusterrolebinding tiller-admin --serviceaccount=kube-system:tiller --clusterrole=cluster-admin +$ helm init --service-account=tiller +$ helm repo update +$ helm install stable/nginx-ingress --name quickstart +$ kubectl apply -f https://raw.githubusercontent.com/jetstack/cert-manager/release-0.6/deploy/manifests/00-crds.yaml +$ helm install --name cert-manager --namespace cert-manager stable/cert-manager +$ kubectl apply -f certmanager/ +``` + #### Legacy data migration This setup is completely optional and only required if you have data on a server diff --git a/certmanager/ingress.yaml b/certmanager/ingress.yaml new file mode 100644 index 000000000..d0ef9a2af --- /dev/null +++ b/certmanager/ingress.yaml @@ -0,0 +1,22 @@ +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: ingress + namespace: human-connection + annotations: + kubernetes.io/ingress.class: "nginx" + certmanager.k8s.io/issuer: "letsencrypt-staging" + certmanager.k8s.io/acme-challenge-type: http01 +spec: + tls: + - hosts: + - master.nitro.human-connection.org + secretName: quickstart-example-tls + rules: + - host: master.nitro.human-connection.org + http: + paths: + - path: / + backend: + serviceName: nitro-web + servicePort: 3000 diff --git a/certmanager/issuer-production.yaml b/certmanager/issuer-production.yaml new file mode 100644 index 000000000..6977a2178 --- /dev/null +++ b/certmanager/issuer-production.yaml @@ -0,0 +1,16 @@ +apiVersion: certmanager.k8s.io/v1alpha1 +kind: Issuer +metadata: + name: letsencrypt-prod + namespace: human-connection +spec: + acme: + # The ACME server URL + server: https://acme-v02.api.letsencrypt.org/directory + # Email address used for ACME registration + email: letsencrypt-prod@roschaefer.de + # Name of a secret used to store the ACME account private key + privateKeySecretRef: + name: letsencrypt-prod + # Enable the HTTP-01 challenge provider + http01: {} diff --git a/certmanager/issuer-staging.yaml b/certmanager/issuer-staging.yaml new file mode 100644 index 000000000..ef0f40faa --- /dev/null +++ b/certmanager/issuer-staging.yaml @@ -0,0 +1,16 @@ +apiVersion: certmanager.k8s.io/v1alpha1 +kind: Issuer +metadata: + name: letsencrypt-staging + namespace: human-connection +spec: + acme: + # The ACME server URL + server: https://acme-staging-v02.api.letsencrypt.org/directory + # Email address used for ACME registration + email: letsencrypt-staging@roschaefer.de + # Name of a secret used to store the ACME account private key + privateKeySecretRef: + name: letsencrypt-staging + # Enable the HTTP-01 challenge provider + http01: {} diff --git a/human-connection/ingress.yaml b/human-connection/ingress.yaml deleted file mode 100644 index b6028b9aa..000000000 --- a/human-connection/ingress.yaml +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: ingress - namespace: human-connection -spec: - rules: - - host: master.nitro.human-connection.org - http: - paths: - - backend: - serviceName: nitro-web - servicePort: 3000 From 5d04c4ccc041d345c07e45ab056be18d0a09a316 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Robert=20Sch=C3=A4fer?= Date: Fri, 8 Feb 2019 11:00:34 +0100 Subject: [PATCH 36/55] HTTPS works! --- README.md | 17 +++++++++++++---- .../certmanager}/ingress.yaml | 4 ++-- .../certmanager}/issuer-production.yaml | 2 +- .../certmanager}/issuer-staging.yaml | 2 +- 4 files changed, 17 insertions(+), 8 deletions(-) rename {certmanager => human-connection/certmanager}/ingress.yaml (84%) rename {certmanager => human-connection/certmanager}/issuer-production.yaml (91%) rename {certmanager => human-connection/certmanager}/issuer-staging.yaml (91%) diff --git a/README.md b/README.md index 70438c754..026387628 100644 --- a/README.md +++ b/README.md @@ -3,7 +3,7 @@ Todos: - [x] check labels and selectors if they all are correct - [x] configure NGINX from yml -- [ ] configure Let's Encrypt cert-manager from yml +- [x] configure Let's Encrypt cert-manager from yml - [x] configure ingress from yml - [x] configure persistent & shared storage between nodes - [x] reproduce setup locally @@ -122,16 +122,25 @@ $ kubectl apply -f human-connection/ingress.yaml #### Setup SSL -Follow [this quick start guide](https://docs.cert-manager.io/en/latest/tutorials/acme/quick-start/index.html): +Follow [this quick start guide](https://docs.cert-manager.io/en/latest/tutorials/acme/quick-start/index.html) +and install certmanager via helm and tiller: ``` $ kubectl create serviceaccount tiller --namespace=kube-system $ kubectl create clusterrolebinding tiller-admin --serviceaccount=kube-system:tiller --clusterrole=cluster-admin $ helm init --service-account=tiller $ helm repo update $ helm install stable/nginx-ingress --name quickstart -$ kubectl apply -f https://raw.githubusercontent.com/jetstack/cert-manager/release-0.6/deploy/manifests/00-crds.yaml +$ kubectl apply -f https://raw.githubusercontent.com/jetstack/cert-manager/release-0.6/deploy/manifests/00-crds.yaml $ helm install --name cert-manager --namespace cert-manager stable/cert-manager -$ kubectl apply -f certmanager/ +``` + +We provided some configuration in a folder `human-connection/certmanager`. To +avoid letsencrypt very strict rate limits, the default issuer is +`letsencrypt-staging`. If certmanager is working properly, change it to +`letsencrypt-prod`. Please updated the email address in the configuration, too. + +```sh +$ kubectl apply -f human-connection/certmanager/ ``` #### Legacy data migration diff --git a/certmanager/ingress.yaml b/human-connection/certmanager/ingress.yaml similarity index 84% rename from certmanager/ingress.yaml rename to human-connection/certmanager/ingress.yaml index d0ef9a2af..4e55adaf2 100644 --- a/certmanager/ingress.yaml +++ b/human-connection/certmanager/ingress.yaml @@ -10,10 +10,10 @@ metadata: spec: tls: - hosts: - - master.nitro.human-connection.org + - nitro-master.human-connection.org secretName: quickstart-example-tls rules: - - host: master.nitro.human-connection.org + - host: nitro-master.human-connection.org http: paths: - path: / diff --git a/certmanager/issuer-production.yaml b/human-connection/certmanager/issuer-production.yaml similarity index 91% rename from certmanager/issuer-production.yaml rename to human-connection/certmanager/issuer-production.yaml index 6977a2178..93090b160 100644 --- a/certmanager/issuer-production.yaml +++ b/human-connection/certmanager/issuer-production.yaml @@ -8,7 +8,7 @@ spec: # The ACME server URL server: https://acme-v02.api.letsencrypt.org/directory # Email address used for ACME registration - email: letsencrypt-prod@roschaefer.de + email: yourmail@example.org # Name of a secret used to store the ACME account private key privateKeySecretRef: name: letsencrypt-prod diff --git a/certmanager/issuer-staging.yaml b/human-connection/certmanager/issuer-staging.yaml similarity index 91% rename from certmanager/issuer-staging.yaml rename to human-connection/certmanager/issuer-staging.yaml index ef0f40faa..c7b4e0cc3 100644 --- a/certmanager/issuer-staging.yaml +++ b/human-connection/certmanager/issuer-staging.yaml @@ -8,7 +8,7 @@ spec: # The ACME server URL server: https://acme-staging-v02.api.letsencrypt.org/directory # Email address used for ACME registration - email: letsencrypt-staging@roschaefer.de + email: yourmail@example.org # Name of a secret used to store the ACME account private key privateKeySecretRef: name: letsencrypt-staging From 85f9a5ef131292f28f54bdfc0acfd12914e388d4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Robert=20Sch=C3=A4fer?= Date: Fri, 8 Feb 2019 13:58:33 +0100 Subject: [PATCH 37/55] Save some money and decrease volume size We can resize it later --- human-connection/deployment-backend.yaml | 2 +- human-connection/deployment-neo4j.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/human-connection/deployment-backend.yaml b/human-connection/deployment-backend.yaml index 49bb85f5c..00e08f1e7 100644 --- a/human-connection/deployment-backend.yaml +++ b/human-connection/deployment-backend.yaml @@ -80,4 +80,4 @@ - ReadWriteOnce resources: requests: - storage: 10Gi + storage: 2Gi diff --git a/human-connection/deployment-neo4j.yaml b/human-connection/deployment-neo4j.yaml index e3110cac2..5ef5204a2 100644 --- a/human-connection/deployment-neo4j.yaml +++ b/human-connection/deployment-neo4j.yaml @@ -69,4 +69,4 @@ - ReadWriteOnce resources: requests: - storage: 4Gi + storage: 1Gi From 6aea79608e845620a41d017532c4889820677800 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Robert=20Sch=C3=A4fer?= Date: Fri, 8 Feb 2019 14:15:16 +0100 Subject: [PATCH 38/55] Slight improvements of README --- README.md | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index 026387628..1b7235326 100644 --- a/README.md +++ b/README.md @@ -120,7 +120,7 @@ And create an ingress service in namespace `human-connection`: $ kubectl apply -f human-connection/ingress.yaml ``` -#### Setup SSL +#### Setup HTTPS Follow [this quick start guide](https://docs.cert-manager.io/en/latest/tutorials/acme/quick-start/index.html) and install certmanager via helm and tiller: @@ -164,7 +164,13 @@ $ kubectl create configmap db-migration-worker \ --from-literal=NEO4J_URI=bolt://neo4j:7687 ``` -Create a secret with your public and private ssh keys: + +Create a secret with your public and private ssh keys. As the +[kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/secret/#use-case-pod-with-ssh-keys) +points out, you should be careful with your ssh keys. Anyone with access to your +cluster will have access to your ssh keys. Better create a new pair with +`ssh-keygen` and copy the public key to your legacy server with `ssh-copy-id`: + ```sh $ kubectl create secret generic ssh-keys \ --namespace=human-connection \ @@ -172,10 +178,6 @@ $ kubectl create secret generic ssh-keys \ --from-file=id_rsa.pub=/path/to/.ssh/id_rsa.pub \ --from-file=known_hosts=/path/to/.ssh/known_hosts ``` -As the [kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/secret/#use-case-pod-with-ssh-keys) -points out, you should be careful with your ssh keys. Anyone with access to your -cluster will have access to your ssh keys. Better create a new pair with -`ssh-keygen` and copy the public key to your legacy server with `ssh-copy-id`. ##### Migrate legacy database Patch the existing deployments to use a multi-container setup: From 7e4a616a7af6ce5bf445620052c504b0375ba0dc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Robert=20Sch=C3=A4fer?= Date: Fri, 8 Feb 2019 15:48:20 +0100 Subject: [PATCH 39/55] Git grep for quickstart and replace it @appinteractive I'm having serious issues with kubernetes and I'm not able to connect with the cluster anymore. If your ``` kubectl get nodes ``` returns anything, could you try out the current deployment README? --- README.md | 2 +- human-connection/certmanager/ingress.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 1b7235326..7f85e4b82 100644 --- a/README.md +++ b/README.md @@ -129,7 +129,7 @@ $ kubectl create serviceaccount tiller --namespace=kube-system $ kubectl create clusterrolebinding tiller-admin --serviceaccount=kube-system:tiller --clusterrole=cluster-admin $ helm init --service-account=tiller $ helm repo update -$ helm install stable/nginx-ingress --name quickstart +$ helm install stable/nginx-ingress $ kubectl apply -f https://raw.githubusercontent.com/jetstack/cert-manager/release-0.6/deploy/manifests/00-crds.yaml $ helm install --name cert-manager --namespace cert-manager stable/cert-manager ``` diff --git a/human-connection/certmanager/ingress.yaml b/human-connection/certmanager/ingress.yaml index 4e55adaf2..52e358196 100644 --- a/human-connection/certmanager/ingress.yaml +++ b/human-connection/certmanager/ingress.yaml @@ -11,7 +11,7 @@ spec: tls: - hosts: - nitro-master.human-connection.org - secretName: quickstart-example-tls + secretName: tls rules: - host: nitro-master.human-connection.org http: From 9dfedfd3171a23f8471900bb3f79a359b639ac8a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Robert=20Sch=C3=A4fer?= Date: Sat, 9 Feb 2019 14:57:12 +0100 Subject: [PATCH 40/55] ReadWriteOnce cannot scale the backend This kind of voume access apparently does not scale on a multi node setup. Last time I tried, it worked, but last time was on just one node. So I assume that's why I see the following error: ``` Multi-attach error for volume XY volume is already used by pod XZ ``` --- human-connection/deployment-backend.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/human-connection/deployment-backend.yaml b/human-connection/deployment-backend.yaml index 00e08f1e7..8f8c6bf51 100644 --- a/human-connection/deployment-backend.yaml +++ b/human-connection/deployment-backend.yaml @@ -5,7 +5,7 @@ name: nitro-backend namespace: human-connection spec: - replicas: 2 + replicas: 1 minReadySeconds: 15 progressDeadlineSeconds: 60 selector: From 84da458ff1cfbe6e1759dc7680524cc6600959f3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Robert=20Sch=C3=A4fer?= Date: Sat, 9 Feb 2019 16:04:22 +0100 Subject: [PATCH 41/55] Repeat HTTPS tutorial and update README --- README.md | 58 ++++++++++++------- .../certmanager/issuer-production.yaml | 16 ----- .../certmanager/issuer-staging.yaml | 16 ----- human-connection/https/issuer.yaml | 34 +++++++++++ .../{certmanager => ingress}/ingress.yaml | 0 5 files changed, 70 insertions(+), 54 deletions(-) delete mode 100644 human-connection/certmanager/issuer-production.yaml delete mode 100644 human-connection/certmanager/issuer-staging.yaml create mode 100644 human-connection/https/issuer.yaml rename human-connection/{certmanager => ingress}/ingress.yaml (100%) diff --git a/README.md b/README.md index 7f85e4b82..4efbdec4d 100644 --- a/README.md +++ b/README.md @@ -90,7 +90,7 @@ Those secrets get `base64` decoded in a kubernetes pod. ### Create a namespace ```shell -$ kubectl create -f namespace-human-connection.yaml +$ kubectl apply -f namespace-human-connection.yaml ``` Switch to the namespace `human-connection` in your kubernetes dashboard. @@ -105,22 +105,7 @@ Sit back and relax and have a look into your kubernetes dashboard. Wait until all pods turn green and they don't show a warning `Waiting: ContainerCreating` anymore. -### Setup Loadbalancer and Ingress - -Basically follow [this tutorial](https://www.digitalocean.com/community/tutorials/how-to-set-up-an-nginx-ingress-with-cert-manager-on-digitalocean-kubernetes). - -tl;dr: -```sh -$ kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/mandatory.yaml -$ kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/provider/cloud-generic.yaml -``` -And create an ingress service in namespace `human-connection`: -```sh -# you should change the domain name according to your needs -$ kubectl apply -f human-connection/ingress.yaml -``` - -#### Setup HTTPS +#### Setup Ingress and HTTPS Follow [this quick start guide](https://docs.cert-manager.io/en/latest/tutorials/acme/quick-start/index.html) and install certmanager via helm and tiller: @@ -134,14 +119,43 @@ $ kubectl apply -f https://raw.githubusercontent.com/jetstack/cert-manager/relea $ helm install --name cert-manager --namespace cert-manager stable/cert-manager ``` -We provided some configuration in a folder `human-connection/certmanager`. To -avoid letsencrypt very strict rate limits, the default issuer is -`letsencrypt-staging`. If certmanager is working properly, change it to -`letsencrypt-prod`. Please updated the email address in the configuration, too. +Create letsencrypt issuers. *Change the email address* in these files before +running this command. +```sh +$ kubectl apply -f human-connection/https/ +``` +Create an ingress service in namespace `human-connection`. *Change the domain +name* according to your needs: +```sh +$ kubectl apply -f human-connection/ingress/ +``` +Check the ingress server is working correctly: +```sh +$ curl -kivL -H 'Host: ' 'https://' +``` +If the response looks good, configure your domain registrar for the new IP +address and the domain. + +Now let's get a valid HTTPS certificate. According to the tutorial above, check +your tls certificate for staging: +```sh +$ kubectl describe --namespace=human-connection certificate tls +$ kubectl describe --namespace=human-connection secret tls +``` + +If everything looks good, update the issuer of your ingress. Change the +annotation `certmanager.k8s.io/issuer` from `letsencrypt-staging` to +`letsencrypt-prod` in your ingress configuration in +`human-connection/ingress/ingress.yaml`. ```sh -$ kubectl apply -f human-connection/certmanager/ +$ kubectl apply -f human-connection/ingress/ingress.yaml ``` +Delete the former secret to force a refresh: +``` +$ kubectl --namespace=human-connection delete secret tls +``` +Now, HTTPS should be configured on your domain. Congrats. #### Legacy data migration diff --git a/human-connection/certmanager/issuer-production.yaml b/human-connection/certmanager/issuer-production.yaml deleted file mode 100644 index 93090b160..000000000 --- a/human-connection/certmanager/issuer-production.yaml +++ /dev/null @@ -1,16 +0,0 @@ -apiVersion: certmanager.k8s.io/v1alpha1 -kind: Issuer -metadata: - name: letsencrypt-prod - namespace: human-connection -spec: - acme: - # The ACME server URL - server: https://acme-v02.api.letsencrypt.org/directory - # Email address used for ACME registration - email: yourmail@example.org - # Name of a secret used to store the ACME account private key - privateKeySecretRef: - name: letsencrypt-prod - # Enable the HTTP-01 challenge provider - http01: {} diff --git a/human-connection/certmanager/issuer-staging.yaml b/human-connection/certmanager/issuer-staging.yaml deleted file mode 100644 index c7b4e0cc3..000000000 --- a/human-connection/certmanager/issuer-staging.yaml +++ /dev/null @@ -1,16 +0,0 @@ -apiVersion: certmanager.k8s.io/v1alpha1 -kind: Issuer -metadata: - name: letsencrypt-staging - namespace: human-connection -spec: - acme: - # The ACME server URL - server: https://acme-staging-v02.api.letsencrypt.org/directory - # Email address used for ACME registration - email: yourmail@example.org - # Name of a secret used to store the ACME account private key - privateKeySecretRef: - name: letsencrypt-staging - # Enable the HTTP-01 challenge provider - http01: {} diff --git a/human-connection/https/issuer.yaml b/human-connection/https/issuer.yaml new file mode 100644 index 000000000..8cb554fc6 --- /dev/null +++ b/human-connection/https/issuer.yaml @@ -0,0 +1,34 @@ +--- + apiVersion: certmanager.k8s.io/v1alpha1 + kind: Issuer + metadata: + name: letsencrypt-staging + namespace: human-connection + spec: + acme: + # The ACME server URL + server: https://acme-staging-v02.api.letsencrypt.org/directory + # Email address used for ACME registration + email: user@example.com + # Name of a secret used to store the ACME account private key + privateKeySecretRef: + name: letsencrypt-staging + # Enable the HTTP-01 challenge provider + http01: {} +--- + apiVersion: certmanager.k8s.io/v1alpha1 + kind: Issuer + metadata: + name: letsencrypt-prod + namespace: human-connection + spec: + acme: + # The ACME server URL + server: https://acme-v02.api.letsencrypt.org/directory + # Email address used for ACME registration + email: user@example.com + # Name of a secret used to store the ACME account private key + privateKeySecretRef: + name: letsencrypt-prod + # Enable the HTTP-01 challenge provider + http01: {} diff --git a/human-connection/certmanager/ingress.yaml b/human-connection/ingress/ingress.yaml similarity index 100% rename from human-connection/certmanager/ingress.yaml rename to human-connection/ingress/ingress.yaml From ec0b229e61a722658e76815069261c09e3ac2cd0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Robert=20Sch=C3=A4fer?= Date: Sat, 9 Feb 2019 16:39:31 +0100 Subject: [PATCH 42/55] Tiny fix in README --- README.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/README.md b/README.md index 4efbdec4d..366598bd1 100644 --- a/README.md +++ b/README.md @@ -175,8 +175,7 @@ $ kubectl create configmap db-migration-worker \ --from-literal=MONGODB_AUTH_DB=hc_api \ --from-literal=MONGODB_DATABASE=hc_api \ --from-literal=UPLOADS_DIRECTORY=/var/www/api/uploads \ - --from-literal=NEO4J_URI=bolt://neo4j:7687 - + --from-literal=NEO4J_URI=bolt://localhost:7687 ``` Create a secret with your public and private ssh keys. As the From f6c38412202d6063cd2cf4078ab06e7084514bdb Mon Sep 17 00:00:00 2001 From: Grzegorz Leoniec Date: Tue, 26 Feb 2019 13:04:59 +0100 Subject: [PATCH 43/55] Fix Digital Ocean Documentation The order of commands was wrong as the dashboard was not correctly setup when calling `kubectl proxy` --- README.md | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index 366598bd1..f0c71ba84 100644 --- a/README.md +++ b/README.md @@ -38,10 +38,6 @@ First, install kubernetes dashboard: $ kubectl apply -f dashboard/ $ kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/master/aio/deploy/recommended/kubernetes-dashboard.yaml -``` -Proxy localhost to the remote kubernetes dashboard: -```sh -$ kubectl proxy ``` Get your token on the command line: ```sh @@ -64,7 +60,13 @@ namespace: 11 bytes token: eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJhZG1pbi11c2VyLXRva2VuLTZnbDZsIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImFkbWluLXVzZXIiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiJiMTZhZmJhOS1kZmVjLTExZTctYmJiOS05MDFiMGU1MzI1MTYiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZS1zeXN0ZW06YWRtaW4tdXNlciJ9.M70CU3lbu3PP4OjhFms8PVL5pQKj-jj4RNSLA4YmQfTXpPUuxqXjiTf094_Rzr0fgN_IVX6gC4fiNUL5ynx9KU-lkPfk0HnX8scxfJNzypL039mpGt0bbe1IXKSIRaq_9VW59Xz-yBUhycYcKPO9RM2Qa1Ax29nqNVko4vLn1_1wPqJ6XSq3GYI8anTzV8Fku4jasUwjrws6Cn6_sPEGmL54sq5R4Z5afUtv-mItTmqZZdxnkRqcJLlg2Y8WbCPogErbsaCDJoABQ7ppaqHetwfM_0yMun6ABOQbIwwl8pspJhpplKwyo700OSpvTT9zlBsu-b35lzXGBRHzv5g_RA ``` -Grab the token and paste it into the login screen at [http://localhost:8001/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/](http://localhost:8001/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/) + +Proxy localhost to the remote kubernetes dashboard: +```sh +$ kubectl proxy +``` + +Grab the token from above and paste it into the login screen at [http://localhost:8001/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/](http://localhost:8001/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/) ## Installation with kubernetes From 1e35278d8c0481576d653fc453473cd359261677 Mon Sep 17 00:00:00 2001 From: Grzegorz Leoniec Date: Tue, 26 Feb 2019 13:33:34 +0100 Subject: [PATCH 44/55] Fix wrong secret references --- human-connection/deployment-backend.yaml | 2 +- human-connection/deployment-web.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/human-connection/deployment-backend.yaml b/human-connection/deployment-backend.yaml index 8f8c6bf51..61f533f52 100644 --- a/human-connection/deployment-backend.yaml +++ b/human-connection/deployment-backend.yaml @@ -51,7 +51,7 @@ - name: JWT_SECRET valueFrom: secretKeyRef: - name: secret + name: human-connection key: JWT_SECRET optional: false - name: NEO4J_URI diff --git a/human-connection/deployment-web.yaml b/human-connection/deployment-web.yaml index a3dafe766..64111a8ae 100644 --- a/human-connection/deployment-web.yaml +++ b/human-connection/deployment-web.yaml @@ -36,7 +36,7 @@ spec: - name: JWT_SECRET valueFrom: secretKeyRef: - name: secret + name: human-connection key: JWT_SECRET optional: false image: humanconnection/nitro-web:latest From 1c20305c21c77821271fa3c4a3f0009f705d7412 Mon Sep 17 00:00:00 2001 From: Grzegorz Leoniec Date: Tue, 26 Feb 2019 13:36:43 +0100 Subject: [PATCH 45/55] Fix staging urls --- human-connection/configmap.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/human-connection/configmap.yaml b/human-connection/configmap.yaml index 50ae17e23..d38fb66f8 100644 --- a/human-connection/configmap.yaml +++ b/human-connection/configmap.yaml @@ -3,12 +3,12 @@ kind: ConfigMap data: GRAPHQL_PORT: "4000" - GRAPHQL_URI: "http://nitro-backend.human-connection:4000" + GRAPHQL_URI: "https://nitro-staging.human-connection.org/api" MOCK: "false" NEO4J_URI: "bolt://nitro-neo4j.human-connection:7687" NEO4J_USER: "neo4j" NEO4J_AUTH: none - CLIENT_URI: "https://nitro-human-connection.human-connection.org" + CLIENT_URI: "https://nitro-staging.human-connection.org" MAPBOX_TOKEN: pk.eyJ1IjoiaHVtYW4tY29ubmVjdGlvbiIsImEiOiJjajl0cnBubGoweTVlM3VwZ2lzNTNud3ZtIn0.KZ8KK9l70omjXbEkkbHGsQ metadata: name: configmap From 7c05aadcf76271bbd34f0f745d4f9619c7a6d833 Mon Sep 17 00:00:00 2001 From: Grzegorz Leoniec Date: Tue, 26 Feb 2019 13:46:03 +0100 Subject: [PATCH 46/55] Add some more information how to setup the configuration right before deploying to a remote cluster --- README.md | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/README.md b/README.md index f0c71ba84..6eab3988d 100644 --- a/README.md +++ b/README.md @@ -33,6 +33,17 @@ $ minikube service nitro-web --namespace=human-connection ## Digital Ocean +1. At first, create a cluster on Digital Ocean. +2. Download the config.yaml if the process has finished. +3. Put the config file where you can find it later (preferable in your home directory under `~/.kube/`) +4. In the open terminal you can set the current config for the active session: `export KUBECONFIG=~/.kube/THE-NAME-OF-YOUR-CLUSTER-kubeconfig.yaml` + + Otherwise you would have to always add `--kubeconfig ~/.kube/THE-NAME-OF-YOUR-CLUSTER-kubeconfig.yaml` on every `kubectl` command that our are running. + +5. Now check if you can connect to the cluster and if its your newly created one by running: `kubectl get nodes` + +If you got the steps right above and see your nodes you can precede. + First, install kubernetes dashboard: ```sh $ kubectl apply -f dashboard/ From f13f1d39c91bf0db708a9fd494f5589d270a5d46 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Robert=20Sch=C3=A4fer?= Date: Tue, 26 Feb 2019 13:59:24 +0100 Subject: [PATCH 47/55] Update README.md Co-Authored-By: appinteractive --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 6eab3988d..93c530a7f 100644 --- a/README.md +++ b/README.md @@ -42,7 +42,7 @@ $ minikube service nitro-web --namespace=human-connection 5. Now check if you can connect to the cluster and if its your newly created one by running: `kubectl get nodes` -If you got the steps right above and see your nodes you can precede. +If you got the steps right above and see your nodes you can continue. First, install kubernetes dashboard: ```sh From 64bf1bb2a404e3fc71db5f0a05a8b130e73df65c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Robert=20Sch=C3=A4fer?= Date: Tue, 26 Feb 2019 13:59:41 +0100 Subject: [PATCH 48/55] Update README.md Co-Authored-By: appinteractive --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 93c530a7f..8af46e528 100644 --- a/README.md +++ b/README.md @@ -36,7 +36,7 @@ $ minikube service nitro-web --namespace=human-connection 1. At first, create a cluster on Digital Ocean. 2. Download the config.yaml if the process has finished. 3. Put the config file where you can find it later (preferable in your home directory under `~/.kube/`) -4. In the open terminal you can set the current config for the active session: `export KUBECONFIG=~/.kube/THE-NAME-OF-YOUR-CLUSTER-kubeconfig.yaml` +4. In the open terminal you can set the current config for the active session: `export KUBECONFIG=~/.kube/THE-NAME-OF-YOUR-CLUSTER-kubeconfig.yaml`. You could make this change permanent by adding the line to your `.bashrc` or `~/.config/fish/config.fish` depending on your shell. Otherwise you would have to always add `--kubeconfig ~/.kube/THE-NAME-OF-YOUR-CLUSTER-kubeconfig.yaml` on every `kubectl` command that our are running. From 3eda8691bd3b4579d41b47128baf25b68701f58c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Robert=20Sch=C3=A4fer?= Date: Tue, 26 Feb 2019 14:29:03 +0100 Subject: [PATCH 49/55] Typo @appinteractive you could use a translation service to easily stop typos --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 8af46e528..e4d0c5396 100644 --- a/README.md +++ b/README.md @@ -38,7 +38,7 @@ $ minikube service nitro-web --namespace=human-connection 3. Put the config file where you can find it later (preferable in your home directory under `~/.kube/`) 4. In the open terminal you can set the current config for the active session: `export KUBECONFIG=~/.kube/THE-NAME-OF-YOUR-CLUSTER-kubeconfig.yaml`. You could make this change permanent by adding the line to your `.bashrc` or `~/.config/fish/config.fish` depending on your shell. - Otherwise you would have to always add `--kubeconfig ~/.kube/THE-NAME-OF-YOUR-CLUSTER-kubeconfig.yaml` on every `kubectl` command that our are running. + Otherwise you would have to always add `--kubeconfig ~/.kube/THE-NAME-OF-YOUR-CLUSTER-kubeconfig.yaml` on every `kubectl` command that you are running. 5. Now check if you can connect to the cluster and if its your newly created one by running: `kubectl get nodes` From b0c9d818fd5dc66cf0a712ad536fa80a0efc9389 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Robert=20Sch=C3=A4fer?= Date: Sat, 2 Mar 2019 20:37:10 +0100 Subject: [PATCH 50/55] Start an automated build by adding a .travis.yml --- .travis.yml | 7 +++++++ 1 file changed, 7 insertions(+) create mode 100644 .travis.yml diff --git a/.travis.yml b/.travis.yml new file mode 100644 index 000000000..0a332d673 --- /dev/null +++ b/.travis.yml @@ -0,0 +1,7 @@ +language: generic + +install: + - curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl + - chmod +x ./kubectl + - sudo mv ./kubectl /usr/local/bin/kubectl + From ac6cc8ab2c7e2e3b6b4a65bc241a2f9b4657f59c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Robert=20Sch=C3=A4fer?= Date: Sat, 2 Mar 2019 20:45:01 +0100 Subject: [PATCH 51/55] Check in encrypted kubeconfig file and test it with `kubectl get nodes` --- .gitignore | 1 + .travis.yml | 7 +++++++ kubeconfig.yaml.enc | Bin 0 -> 6064 bytes 3 files changed, 8 insertions(+) create mode 100644 kubeconfig.yaml.enc diff --git a/.gitignore b/.gitignore index 18b453e6b..aad0daea8 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,3 @@ secrets.yaml */secrets.yaml +kubeconfig.yaml diff --git a/.travis.yml b/.travis.yml index 0a332d673..0efdffb09 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,7 +1,14 @@ language: generic +before_install: + - openssl aes-256-cbc -K $encrypted_87342d90efbe_key -iv $encrypted_87342d90efbe_iv -in kubeconfig.yaml.enc -out kubeconfig.yaml -d install: - curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl - chmod +x ./kubectl - sudo mv ./kubectl /usr/local/bin/kubectl + - mkdir ${HOME}/.kube + - cp kubeconfig.yaml ${HOME}/.kube/config + +script: + - kubectl get nodes diff --git a/kubeconfig.yaml.enc b/kubeconfig.yaml.enc new file mode 100644 index 0000000000000000000000000000000000000000..bbbd8ef34f134e32225378d1df423414152b3b50 GIT binary patch literal 6064 zcmV;h7f&uD7OP!D4 z7{Kp`WhL^gX-Urj>N9{{4J$;0|fq3F?XwYn4YCJj8f2_ z-xZ=aanJ&YzwV|1KK(D=ZlA-@gc6@0S5Y7gIhzt)H>KOktO6UN0zkseY#+(w9Umo3 z(M)RSFzt@I(`&Gi8;>qC>H(u9nkU*f9zk&xr64|hK*m@QZ1FDdh?^M9V#_H(Yshmi z%0eGkwWbAzAhOp*(@Jt7yKtUE(62VlA(8GS>oydSAmr%eI2aayqlftVnJ*e--iebX zKnA-fkOe>&V}x)>tS{0GP#A27bz11BncV+VS)wtN<@O4-PiumW3G?cQkWk70&{NyD zyGISms?+3pymFUdV8>=z;D&Jw_9v=vUG*3 zCTEM<>ESg)cwpit(s9cXOi(`J-9^(tWy3$V8pwtvT4;RMKjLh({1l$Q5MH|3*{bMi z-*}r3GE_A|dG97MF4~k4?M!6ZtXpRK*sR)yIa1GpGSQK6S!DX=W)9i{OHBg4duotN zDhm0QZ0_VjG#3)em#p$$O>#`xm;4kqCi`jx>!dRuvG9Pm2IP3{$(<%xm}7=i&P1_3!Xk(ZdSD650>EzWbYNQ z?wx4ppO+CH7bRr5q8F;Nh8)Z?46ac?3SLbn8||QGR42tVBGfuZG=|z&yszHwA#fp# z`S~K>c@bi0!~KP85pDHM>~_u{|G}9hGL6>X$ohTObI=Ho`}hq`39}^0np@*3f{c0J z?b#YSPvA8vk0#W#GNWs6AYg>+=bY_mhVUG0J4~HpcO~Z!yQ>Tx=wc(VUUxzbe?N53}TqRHaZW{6k5La+U7efr2}KgdLi{IbpU9bSH?_h%Pyts|LvOX zB3V_u+S$hvCftH|Z$!zR2SdT^JcKZ$*p5^W8uKZ^Ar7F;A34SXVZ7mt>X8e#B#&6L z>0Ozf!-x2NSQL>m+Ch>o1KkDC0}q{w)bWObc9M53V3<_`gjKHf8E&oW`8u|7=?t0ijaCs~{;$VTPa160{8Io={C+N;H{o_X>`CIO%X~-L_oI)6bnne~@ z)xj{4pd8Pzt!LZIvN{{D=aTLiIuG&7eEu#Ufu50k;vU$#mjf^Ot9NrgQjnmM5ud`s zSq?E5(9UCOGk*XQJlVjg9 z42#B7^~*3`9`BRdWESyv1OIjY_O+L(|GF!t(0G$SLEbo*4@)7~uhf z0&4opb-0Kkvm&x7KTWpsmx`Yc@suG`zoC)`Q?E(=yd`>mIM~WmMTNsiLKv#UX-8li zEeag-9c2Xwx{oO@zpa&33SVm}kC8N6!;@gj=?SjrwAa}d(b-dtBs%%y+0Lb#C}6ih zDqGTu!5r4PAgvRz(H&eLNX@QYYRB?(GpWmyc$r-*aBl+3t>TfD$caXR9dbm;#5&m)$gymQV&(_ z`Ms^2L^{-hb&R-jd%TQIw@Z|H{N}lfyf+nN7O;s##K|)9rAsYJ-rx%>Y>GU$r2xj*9|&BKVqR7+MFr z-QvolQl%|J4`5H#P7fzHw#-5pA_U(A*jb>*aul%y|C5#$^zpFXII0PDX$S|-7CjzB z>35gm`$A%(pi~Pp_VzCW!%Ad0Nd`sp<^vWmTUaEp{Lk0MD}$=#{q1$92Pt%pF8Wu5HwJRjo9yvuoa5cl6tUi9#lC0)!kFbv|9gg&${X_kn)|>l%fz zJ}9Wysiy>Wd2Cab=x-cvT;jPy%P)$-26rCxr00N)bQc>y%1~z#;Y4o=25!5I)P$aZ zvx({^)OCK7l2TVN<(%3lB_Vybu30!%4M3+K(uesS2(K5 zTD&L>Zz^Zv;UdTPHH;Ec=QlNkAA7iQvA*?HW_sQ;S~2j-?41PRAQYQupfoifaDMn; zQI1~!NDF(^A}RGCmakc|n#0_N`SXNoIZow54!BCHF&H6P5g>X_cz6fb7>)+Oi}n7f zP&Q#p;j*TXsXT<6V(h+>n0f-mlElTun;mQhkeHYn9E5Jt%~RA!4}k6FdVcCsqCNJs zC13-QXtZlP^7hvLSD(lo?rEFMXPo@UNpMFq??_AM9xLMbRB7+Iw}HZPcD{ZKS6bwuFy6 zDc1iv`ukFDu;!?3X2(iI!FmU?qT3P;N2Ie2_sfUsZ~8(siAGx3TYLU@x4yde-ADka zG0b1!c0eowKQcFRWXEg7zM5CLyTCo-bol&zoiDPf|8&Fxd+yGN@oD~dm1`!JyTluGG*ZrX2Nwh94t0fQ+8+*4^O(~g;)q7Z(b@Q%-u;yPR+(1f@|bc z)&??=4-`AKykl@zX@%(r;Y<1x6#)IUs)&w0OJ~HXF$dNLYM8M!!qu(L&k=fnhom-$ zlrX=fDS?7_@DgF-|rIt}&wFaZLpv3sLHA`qTX%$M)g80k7(iJ$w(BXUVJb!gB zt}MjpO2M{;sC0kV5hf#HjQj`vy%3cE3RyM6Lw&=3T3o>ECP_vSmJ`%v)}=Gwh{}yz z!sGjo-=@>YDfi--&GUGm@RE+xU72P=UlG7D(eDB_8H|p*O&<5{Ml5E;W*18OjdwB( zFa+`wj`LlS+6IwtCY!|CzQe<==Opb`Bxhv*Ap39>v%>N6~RqlK05O2wRsKnYZUCE;as$dOH zQp8tEqm}6C_LU-lddQB+=D zw;l>GPb(LA2otMSyaexnf;CoMfal<WDIb2Rs5GMpi zy9C);rQ5&Szm}c>AyMQ#3-dxYPI8g;is5$4$g=cv+BtWZ&c2BchpwZ;13^hrjsNQ! zXh7eDV9zeycRKQl{J{2b5BW2tPg;`{h-VwN@vheh-}wkzidbb$p_GD_T36ExE@6Fo zDPCD?#c@sLtTe-3p<@agl4NamzudHL*v-0z_lS#pjK({1dgATN zf288i8fk_pTh|~U2{OkkXI=JdYojDm%b6T|W~lJw$6nZ43&SSC1Kf<9{@H)FkUKm@ zc|CQZN*!x!ha2R}-g$;}QY5MH-gvoSoE2!l`+ML;<8Sei9CXKZ|p-R3bEPJv{L`Re7@QuH{Sx%gh5n-gP1k zC1$6dyu=B9*HAMkN^D)Q(ddrWaIGl*Un~03M0>!1jNnQ6Yj4S)xbH2c$y^8wi*vkF z9&?*!joHH_RXnnDHR^KU4M&sn2pN= zlHnEPXDIk;?Hxc7;ptwo)pnmf!;)Kn;F*>h2RUJy_}8J4HPsPIp~5bof;w#NaCK-~ zms-5GK^=)9pwj7Rc7G<9fT{`IlhE`i=*IA|U?1-r$%Hwd_C^kcvdBs1X-IlrMQ;AB z2!h66=UomOjyX-OloRG8FMzZJnqR4d?B(NnQ#}JH24#2d& zr`qvTg|kxme6D1>oAxe}eb2aPux zz}~S98vLqDVof)cMcrK?!04`=fk%xI=?j0|$LDHyZltw^O=yG8>C!7#eaM!bQGTA+ z6?p)iz7m@|6cT()Yk?_XT3%(SkSRiY3U1rVP2U417IZe>0w5TYq??}1}0hNZ$?i2S}(mfQAd(^pD)%O@#4 zQ7?eI-j$8~6Law-c`|Li$Cs~X-1#xb8D5y6pf3_xZaYx)a|i95(X^SaU^ZD zzfbkVIhlMVy(n8=U+*O|z-R^sGNU70nK&*2k~rOE)G^ms@QiCchy3$B}~q!FmDM+@iI z70>8MNyYo7-;;|lXGQLrL_-Pblk54y0#Fy}(6Ujdm-^Jn>>Od3lU=f3UJvf^G z+tK)02}#RjY(^yXsHR{<=4=m5lXzB&Y@380XmBa39Wqb27sR0Nr@pBRvzpi1i##u@ z3o3l{zL`yZRkxdp6VC#2D%d|dh_^ST6C@f->$(tY11o&G$`?!eo_YCh1KJQdCVpV3 znU@@w2c%3WC-?yzz#{9vZ;*tEpvxVtffdQM$Lvva7%O}9C%lO1O0t?LaX-6E7(sr= znAbJnq5tk&yA?@yhS2JOU_Xsv@eI-~n3zjNBXJLSad*kko-{=>GV0l?ozoC6uCsF8 zZbMw)Htwcbjzy0=51O{9K-pX)y5L=148pG*1ZCg>0+wfYod&$Zvr#?3{`-zxu>Bqr zxtMpzt8j58AKLD6)FY$;*1Va_1+3Mpsc{*JP>(~=NY6JGzv)^4G=ff2&1xKz<5t5( zDKVX@7l7K7XK~mMVX+Nw-4t}dF{4@28v+PRSM$zi5{D8cDGvzj;K<4C5x`f=FAr`w)_h>^XKnL}GU}LPalm&u7# zYMT9k+kEYj)jGQ8o8Ia0?!$k_x5+eGq`t?ci5F4!K+a=4!k9O^L*$bCeWp)-Fi@mQ zu?K+YS5KS5*7r_{UN~p;T)`r2Y$C{kN!gRj)-m6O*%Z8bIqlG?G+8B@)b~8rS$<*t z@D2D=2t&CJr%Iyz8 zDM1^d{%ZVDW2mGzP$-#ukLN&JrP8~wSiTZ@gL^1U+z(ooVgxo;7{P}{?QA$28S6Zl zMPXh=A`jduCF@~=VWY-??vyQf{1$Lo32<$s@F+ZI~{MQ`ZEjfu`(WEVq$T8 z;J}@)52D7CNIrJ-@}!B{2ExrAdD#JiQbH0mjAN1UdgMecnpl}W%i6jOC;ebsX#?Zx zx)}vE=m^v7Wnkr*7J+qRN0ZOAi8YW`C$)f;fceLpfwV%_CV^~UkOF?g2ky%JR|Xyu qvvZUIk3SPH#_mZ7^4Nr?Ju4Lhlfo}fPo#-TR7D;!MJMbgVP!S literal 0 HcmV?d00001 From 036ce541520e456324e1958d832be3cf61225169 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Robert=20Sch=C3=A4fer?= Date: Sat, 2 Mar 2019 20:51:17 +0100 Subject: [PATCH 52/55] Second try: maybe the env variables weren't set --- .travis.yml | 3 --- kubeconfig.yaml.enc | Bin 6064 -> 6064 bytes 2 files changed, 3 deletions(-) diff --git a/.travis.yml b/.travis.yml index 0efdffb09..cc797dceb 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,5 +1,4 @@ language: generic - before_install: - openssl aes-256-cbc -K $encrypted_87342d90efbe_key -iv $encrypted_87342d90efbe_iv -in kubeconfig.yaml.enc -out kubeconfig.yaml -d install: @@ -8,7 +7,5 @@ install: - sudo mv ./kubectl /usr/local/bin/kubectl - mkdir ${HOME}/.kube - cp kubeconfig.yaml ${HOME}/.kube/config - script: - kubectl get nodes - diff --git a/kubeconfig.yaml.enc b/kubeconfig.yaml.enc index bbbd8ef34f134e32225378d1df423414152b3b50..0edbe1bbc3bf782bcf8212b58903edba92e7cf3a 100644 GIT binary patch literal 6064 zcmV;h7fXI zu?>MMGZVG=4Ny|(vkswZ0^nJzb?#lJ{66D)Eu?}Ws-fY)D=$@Ae@-GF390#m4$Bfb zUpI9mcP`rLf`Nzq`in*~#|>s@JXb!P1IQ~psX1BY96WJ-uYGPSkK%?$%Y%^iZ=RER zo>>cy5~%QPhw?^i8K5e^kTgr(py|jWFI0k;DSMVD;-<2{aF3T#opDI0&v5{n`nMci zgW1Z&%UXI*Taa-bm7=c1vXSs5sXmJGaa`g2X?I}>O$HRS6M8p9_Lnxyl6C}fYK!|xSR6$IF9GN$D`XWnytCsk>g&E2pe5W_Orl)w87ywy z^qn4Eoi-dAydTpEXK4OSh}YAJf4-fz94%S?pw)FP`U(=7ZtPrwI%vY1<5@--P&Qr_*vSVpwlmsPrL= z2{H#XAsEwU+|AebL!>>5lN(5jL~b!veQ{cDUvqOQs#U@Y1IHcPg`4iR!M7sUOp5T{ zMSomRgah!<#ls%VgDGuhBGZQv^bsj6*~&MFF5qijJBD!JYa}&~hd_&f*maiVoF%oU zX~sUNkRs%f)d}i9Ka`P0Mv-ALRxZ%4ki`<4+En;ZOiWij>%DstwJ3`QWl;8SV0;)x z!Jq#g8pqkpr;*g4P{5)?^gs#PYh`oAw*f?g73h&4Ze*fed{Ho@KqQIRqJ;n#eka4U ztbHKyIcf`YR(Mc7MKfq;%&7ix14c05l8^nAu12u_f}P4CFhtR zE)>`3f5!Vcr5e;OB~{`Am_b*SpbaTMbgUq8b)kbtuhvb3R>zShUdS?vM$jo^i_yd? z-{EZ=0cuC>dmD|wJR@7un3Po@hg!t%O;4CYHCwc1zP7vw#E6#5jgF1e7?4F|Fl&L# zw>&Gm2Bcf-!bO%k>2WP4rP=qRwMu+rB<6c{qF0=CjTao?s*8T`7{lb2U_^EPCfi1t zUcuCXf>)d~?y66M;r#H=MXN4AmZp0J);_0pzoYj5HrD}s7?MOjj@=2sdOR8v3RaNz z(t*&`8rQ3rEECSsL<9(dmX8NH7?=I$jIhgEi$TN`>OHe;3%->DT;8L0TCTR@HZ!Hs zEq7`4Soe}2I?u^*vyAe$zdYCa7haMp(7VRfs9o|cP^65*kk>iYSiUlKl>>Isb#wr8&5<3rVppPfT+aDR{+4%@=cEV9CdN+JFNDa|-f zpVNwS1Ro(2{KFz)TQLXECr$rY}(l_*7jEN_# zf!siLBhAiF-D_EeripTXJYwY640Abg!7AK+60FJWBXF4=W0GSVdbOu& z9w~xo3K=}a&WoQH3MaN{UE}_sdnF`PvT!NcF^ub%zrU50mSa;7#n$Jo`|uylg$nD; zUW_^8%p#K5s!A(N2=z!~#1 zWeALEL7;&*i@{sqBk9uF--E~%%B9gvUA2LDq|&bl9~bFu$d00gKm&?E+U- zkOV_-TCatdF~^oa+uLyRj{Hn1&BIQiy*X1{cwX4}C0&*&D%{DQ#ab*?a9mq4rp3Lf z2%ph6S;VU=HK&k2I!zLTWjf0VNo%6G&I3?>i9ze07@l%cyNL|J06W;snoz!L z<}A5Q1r7;#(8r*A=Y{#uk=^3-S5G>8_2R2>F5u0} zz`halePG?#XK5Q6F3K65tnAoGxqD{v`5A_=A!2gAyw-I7*X7WIwXitJd=D`fVx;<) z3(&{Y*ScP^G02A$~&V>Gf&Cwe1RuR3<9B?+zakEr@iDzX3kuMrsw_W02A$l+L|5hLr zfTau9%a<~eM7(>mj5Y8KC6O|2zshxVl-+{k%SO7vc!04l_>0vcx4w#*&Yr>c`MpBJjgcgcyILPj1Gac7dQ*#AH z?oI3E#5N0HjH8Flbv<9QJ8Ka-vLKj4Kjw(kK{_fxXE>?+wtn1dnXA_HiS~8TEK+xq z*Yr9K)dTcAIU(EarDTk=6u-epTC_}Tt&4ohp*}#|byJmc+hM3|hCuT&y5qKje;%e< z6Uds&#U*hP>qji-h3Z4`%Q2(F_WNL6s+%6`!9Dv%vd)OGUWnrC0t#pk!kBQ7te@{C zgPVXz^mj?kP2f5LzDQu@SU8J2qrjRfVu$wwBRbH&K#v;mh{aN_Go9l_oC(e~ecKF7 zPSq3#k;iYwyHSKl<;Pn8dyNaul!l3pYc=!;suti!ChsRga@Tw$XpL4D8iLXN-Ca@L zbI29CayqQS!D0zSw_uQ0?>8mXzU8f!9_Lx!bz#`_8YEsqf+X$NS?CltBy6Sd3mg0R z3WV~tff^SmrIZ&E~EQlwN19v|pyRzV)CqwWD5w*?{o-FlUHyzr#1TGS&tM~2$ z0YFo!w>OWAx<01w< z7h7pKhdt3j+|N$y#br}z;i&iIrKWKUHU**8{%d6vxdJiCQS{zI#m|&UJ{EWqH-zao zb%`>OJE_%!U#}pc`33sN1T5Kbdq%+ZO423Ft;{9HwJL}5o{jG#d1S-#79~yhPNz>V zQ<$2K>p@8OZ@w1?iQ-z1GEdywD@$8JR_ywtud?A|!L8CmVMzMs@aoBCxWpu+bEjd0 zI(Z+g0X{3$Nj(Xg$bEgj1p2KTI(X36xl3ulKv$e>>OJh{S*#FfXmv(ewWBA|7IWD` z7rfIqXD9{!cgei7=t2`l)cTlur9HluUMlMR&zufT#e3_1Tn0vwYksY8rY(;DN%+r zI4m?)L;X-R%a)b_=3$JVr>o~~k20X!b^7y9c*0b^OF|7KO~()J$Yt?i*uFNxt73T` zuLJjegmKr)7f1eO+|u$FZr*LPH#87ku6Go%Pp{j*1g;|qDBvmRMhaOqYGZ~Bn zK&pBWJA>5=IWQf|iGz`FI@u7$bMJ0&eWT3Wkvp1d-Vq2qv`Aw`rClok=K}vlmqVLs z6KjngYb;aTndsmtWN5Unk?vUJxR-SL736@J6_O)99_iNqp-RqddNE6#)?yTEAD(Z1 z-(ZoIpF4#IQsJOY1=Pfrm~D8co2Lk|6=UnsBQl-PdX99uAc^aLzz0>Y5GDbZuuc zGOuE3^UO=-KBp*@Cx?TXpf-KlL<5dp7F+>aR-o3IVvU!D_hG>bmgzgVX-8!(a2U^7 zjgjh9_0d{^D#jTlA1PZqp^?>@cIj<5o23SRjZ01LL7T@Id6pV9BZq*JY)vsQf@HQ? z35?93K*uQ62#VUUB8Kt}#6T1{6(#r}*;{1(C@A1LfssgLl)kqFzb!gGMv*~?NTh6xgIeu?1-I{qIY2b)1aj(z33DxjGraoN z9p_$F_vPxt-VGvJ@A6nwa}L%F-piW#qE-@tk6H02OtRG&fV~THW6tp`0$?uQWJ$qa*p+X8Ak99i6HmrhS2RG zpz5RMyR*b5gFWXy*lZz+;vlti=RyhrpRvzDX;#_7fSFLagJhJF0&{Qc1T%L-PRZ24 zxD8#jkHh#-x}x4jr~`;lSOh^6BQ8cAp;NjLGa^Ihybl?~+x4&axYVb0@58MUN*=5S z+1`gSoz89zwlFKL{n&DgP}dsf6hOuVe2KFcAawWyl=ySEuyOTL~=n6CSD=2F@#wp#Qqbn>RPw%XxXQ2d+gsCdHlzCg@Gl2Jx;-8o~B8!>h=H_*xjo+sQtkW3^%nTu3}zh4BXb`Z~LK)2!9N zjd5>Co&G^^I(${hV^W0J+2qQ{`6W(<)bq7h8Wo+TMKb$0H3O{JyMlq_|6rJ7QiLkB zj;%laGS+$}fPcGud{weEmD-@ni7IoQM$4v&C4}Pb_7leX)NPChp@#PQ8^sgZ{l{PS zb#FO;)CBfE)Zd$LL7aFcV;&oqGc-r9a^WfE25~AsYnay7yE(ErjJKXY+3d(0+hV+P z2O*L7a`=dqTyLwkAs9chC)~(!1s7>P-eJ!i^oZfIxo@QMK_~z7S$|d(SjhxOAfb@+ zy8dHo_67dYQtM^B_t0hid6`9eVUST{R-nOE(!TDU}x4)M+OUkgYkD@LYG{P-@UpQb@Jt(y^8)X zG}ULA>17Z@+BlXI4eO4Yzw3{&bE@^_3uBLwN>|r&<@V*R?dIdCQ27o6%!IqjN5L`V z!Zxf0O0*!E29E;@dWe@^s3!>K*>C;x+bL)%L92X=h(J$Daf?nL`va-aIQ*Je9`}|F zGeTRzocX2nqk3=yD{-$@Yr@urIixvA+Wk`etRRFEVy5{Sy6w^5!1P09Q9V{=8Vlg3 zTL5Xon7MlSCEywTlw*vGKOMo{R|d`;y)b7zu7O4!O44GPB&qe2 z*~N2YtZ&Vwtk3$eroz7NdnbiXT;lSm_k<*PFtYL&`;pKj0wSTTdQ@kUC?`gQZ4(~! z2(C$Io7Fm&<8XP%n+&vmNx*N?(mtqU*8MMV?!TSXK$gRh%hdF|O3Q z0!6qc|3$YqozQ9GHj~?|AZ1AS`i94hZ^k0GSW@fR6sIK=O#^QqOaXn+u7iWXn80)Z z-!rzlpo24Y!Ra3TciNHHP;^l8c1errz7t>4`+tL&dia-l|AHC%6|&L}E^w2Yf8jC* zPKCCEeG)0`Q7VEwI!s4YV%RuRCrTsc)`>=WCfYz{?CKTcE_&)Z0TINCSaa*#uLQjY ziL)oIy_@`9NJub_PH}$q(mG4@yh4g;w#WeT>v(E zC_6vbAnchym7)pL_0OOHG9FvIy*y_GuP+ya3j|qz9jLvTcT}>6wT;2SCKwL0*L721 z15Z9(8Eni@g8@7Rf|(S@NVTHSeYsso71nmgm}gkcSnb>#Os)j_MGs`eA#|%RE>eo;N{*pIln2acop#iHF;FCsLZTsOu{))=6>9~7jTW|lq z!XQoP++3cbk2YUWP1-wu@sWZT)bBU)QwP9&CY_sLwk5zW3IUkeQSCuZntmPitwnmU zLv7RTVT>^iYP_Ox@J65aS!wQQxwMzWOo_-poWKGiF^RNi4zxv!l>d3kshx*^wW z-!Q9@E2Wzuc05GmDsOo#%?vxi+E}IfFWt!VXcI4223SC}N|u!tu3RXyc7Es;U|2I0 zfrODH1-6FFuXv`C?vXZ?5z{I?Or0EYxbp42u{(XiSVj2^c`(n z9Z}*=J;et}48zVqr;$#2 zrP%5aDjt^(9(hd1n(it;o~sL9*DOLPG{`n_LZP*Ys`v#M*Ky_4^d2w<*U`Ffxjzf+ z1rClV-45yNKp`-Jk_n*3h5>2a(PzKWW>oRJB3Q9&LnoTDyuomM7nk&vq$XlxJJgzj zHP67OXOX_aNv*LnzZdJR6;eZv5r~28SYT-ZMCQgvQ4-FEDyfY<()Y}?=d8Y`m@owk6b9Z81)Kn+Vj(&Dg#-;C&6peFh2(#vpyF&wEtspJTA7Z9!fcepfv9T?BUHt*l~vpIX2}P4 z;NTYLWPq(x`FiQq?^1Fk1R6RuCS1x*qNkQr&?RgfgR8jz2Lt@})Q@8c0DvzX>9CGuX{Argpu*}dB?h} z0dWe1i=-(3Kzp~}q~*|W2(`fH;k>06vaYmB`#CE;)UqrAtc@MQSW=ADJI1A7!%q?6 z;<};P@=k-vF^UhA;NSt&`BKeGOtQY^6Sfm7HNJ5%1t{^bR@1v8T`JHy2n_3N>8tt$ zAZ~nJx48!Vo`(^4C%bGY4N)X)CaJ7@R8rfDdWl?*z*BS1;{Vr+VeL5l0j#a1M*2gI zPsING3_<(&uD7OP!D4 z7{Kp`WhL^gX-Urj>N9{{4J$;0|fq3F?XwYn4YCJj8f2_ z-xZ=aanJ&YzwV|1KK(D=ZlA-@gc6@0S5Y7gIhzt)H>KOktO6UN0zkseY#+(w9Umo3 z(M)RSFzt@I(`&Gi8;>qC>H(u9nkU*f9zk&xr64|hK*m@QZ1FDdh?^M9V#_H(Yshmi z%0eGkwWbAzAhOp*(@Jt7yKtUE(62VlA(8GS>oydSAmr%eI2aayqlftVnJ*e--iebX zKnA-fkOe>&V}x)>tS{0GP#A27bz11BncV+VS)wtN<@O4-PiumW3G?cQkWk70&{NyD zyGISms?+3pymFUdV8>=z;D&Jw_9v=vUG*3 zCTEM<>ESg)cwpit(s9cXOi(`J-9^(tWy3$V8pwtvT4;RMKjLh({1l$Q5MH|3*{bMi z-*}r3GE_A|dG97MF4~k4?M!6ZtXpRK*sR)yIa1GpGSQK6S!DX=W)9i{OHBg4duotN zDhm0QZ0_VjG#3)em#p$$O>#`xm;4kqCi`jx>!dRuvG9Pm2IP3{$(<%xm}7=i&P1_3!Xk(ZdSD650>EzWbYNQ z?wx4ppO+CH7bRr5q8F;Nh8)Z?46ac?3SLbn8||QGR42tVBGfuZG=|z&yszHwA#fp# z`S~K>c@bi0!~KP85pDHM>~_u{|G}9hGL6>X$ohTObI=Ho`}hq`39}^0np@*3f{c0J z?b#YSPvA8vk0#W#GNWs6AYg>+=bY_mhVUG0J4~HpcO~Z!yQ>Tx=wc(VUUxzbe?N53}TqRHaZW{6k5La+U7efr2}KgdLi{IbpU9bSH?_h%Pyts|LvOX zB3V_u+S$hvCftH|Z$!zR2SdT^JcKZ$*p5^W8uKZ^Ar7F;A34SXVZ7mt>X8e#B#&6L z>0Ozf!-x2NSQL>m+Ch>o1KkDC0}q{w)bWObc9M53V3<_`gjKHf8E&oW`8u|7=?t0ijaCs~{;$VTPa160{8Io={C+N;H{o_X>`CIO%X~-L_oI)6bnne~@ z)xj{4pd8Pzt!LZIvN{{D=aTLiIuG&7eEu#Ufu50k;vU$#mjf^Ot9NrgQjnmM5ud`s zSq?E5(9UCOGk*XQJlVjg9 z42#B7^~*3`9`BRdWESyv1OIjY_O+L(|GF!t(0G$SLEbo*4@)7~uhf z0&4opb-0Kkvm&x7KTWpsmx`Yc@suG`zoC)`Q?E(=yd`>mIM~WmMTNsiLKv#UX-8li zEeag-9c2Xwx{oO@zpa&33SVm}kC8N6!;@gj=?SjrwAa}d(b-dtBs%%y+0Lb#C}6ih zDqGTu!5r4PAgvRz(H&eLNX@QYYRB?(GpWmyc$r-*aBl+3t>TfD$caXR9dbm;#5&m)$gymQV&(_ z`Ms^2L^{-hb&R-jd%TQIw@Z|H{N}lfyf+nN7O;s##K|)9rAsYJ-rx%>Y>GU$r2xj*9|&BKVqR7+MFr z-QvolQl%|J4`5H#P7fzHw#-5pA_U(A*jb>*aul%y|C5#$^zpFXII0PDX$S|-7CjzB z>35gm`$A%(pi~Pp_VzCW!%Ad0Nd`sp<^vWmTUaEp{Lk0MD}$=#{q1$92Pt%pF8Wu5HwJRjo9yvuoa5cl6tUi9#lC0)!kFbv|9gg&${X_kn)|>l%fz zJ}9Wysiy>Wd2Cab=x-cvT;jPy%P)$-26rCxr00N)bQc>y%1~z#;Y4o=25!5I)P$aZ zvx({^)OCK7l2TVN<(%3lB_Vybu30!%4M3+K(uesS2(K5 zTD&L>Zz^Zv;UdTPHH;Ec=QlNkAA7iQvA*?HW_sQ;S~2j-?41PRAQYQupfoifaDMn; zQI1~!NDF(^A}RGCmakc|n#0_N`SXNoIZow54!BCHF&H6P5g>X_cz6fb7>)+Oi}n7f zP&Q#p;j*TXsXT<6V(h+>n0f-mlElTun;mQhkeHYn9E5Jt%~RA!4}k6FdVcCsqCNJs zC13-QXtZlP^7hvLSD(lo?rEFMXPo@UNpMFq??_AM9xLMbRB7+Iw}HZPcD{ZKS6bwuFy6 zDc1iv`ukFDu;!?3X2(iI!FmU?qT3P;N2Ie2_sfUsZ~8(siAGx3TYLU@x4yde-ADka zG0b1!c0eowKQcFRWXEg7zM5CLyTCo-bol&zoiDPf|8&Fxd+yGN@oD~dm1`!JyTluGG*ZrX2Nwh94t0fQ+8+*4^O(~g;)q7Z(b@Q%-u;yPR+(1f@|bc z)&??=4-`AKykl@zX@%(r;Y<1x6#)IUs)&w0OJ~HXF$dNLYM8M!!qu(L&k=fnhom-$ zlrX=fDS?7_@DgF-|rIt}&wFaZLpv3sLHA`qTX%$M)g80k7(iJ$w(BXUVJb!gB zt}MjpO2M{;sC0kV5hf#HjQj`vy%3cE3RyM6Lw&=3T3o>ECP_vSmJ`%v)}=Gwh{}yz z!sGjo-=@>YDfi--&GUGm@RE+xU72P=UlG7D(eDB_8H|p*O&<5{Ml5E;W*18OjdwB( zFa+`wj`LlS+6IwtCY!|CzQe<==Opb`Bxhv*Ap39>v%>N6~RqlK05O2wRsKnYZUCE;as$dOH zQp8tEqm}6C_LU-lddQB+=D zw;l>GPb(LA2otMSyaexnf;CoMfal<WDIb2Rs5GMpi zy9C);rQ5&Szm}c>AyMQ#3-dxYPI8g;is5$4$g=cv+BtWZ&c2BchpwZ;13^hrjsNQ! zXh7eDV9zeycRKQl{J{2b5BW2tPg;`{h-VwN@vheh-}wkzidbb$p_GD_T36ExE@6Fo zDPCD?#c@sLtTe-3p<@agl4NamzudHL*v-0z_lS#pjK({1dgATN zf288i8fk_pTh|~U2{OkkXI=JdYojDm%b6T|W~lJw$6nZ43&SSC1Kf<9{@H)FkUKm@ zc|CQZN*!x!ha2R}-g$;}QY5MH-gvoSoE2!l`+ML;<8Sei9CXKZ|p-R3bEPJv{L`Re7@QuH{Sx%gh5n-gP1k zC1$6dyu=B9*HAMkN^D)Q(ddrWaIGl*Un~03M0>!1jNnQ6Yj4S)xbH2c$y^8wi*vkF z9&?*!joHH_RXnnDHR^KU4M&sn2pN= zlHnEPXDIk;?Hxc7;ptwo)pnmf!;)Kn;F*>h2RUJy_}8J4HPsPIp~5bof;w#NaCK-~ zms-5GK^=)9pwj7Rc7G<9fT{`IlhE`i=*IA|U?1-r$%Hwd_C^kcvdBs1X-IlrMQ;AB z2!h66=UomOjyX-OloRG8FMzZJnqR4d?B(NnQ#}JH24#2d& zr`qvTg|kxme6D1>oAxe}eb2aPux zz}~S98vLqDVof)cMcrK?!04`=fk%xI=?j0|$LDHyZltw^O=yG8>C!7#eaM!bQGTA+ z6?p)iz7m@|6cT()Yk?_XT3%(SkSRiY3U1rVP2U417IZe>0w5TYq??}1}0hNZ$?i2S}(mfQAd(^pD)%O@#4 zQ7?eI-j$8~6Law-c`|Li$Cs~X-1#xb8D5y6pf3_xZaYx)a|i95(X^SaU^ZD zzfbkVIhlMVy(n8=U+*O|z-R^sGNU70nK&*2k~rOE)G^ms@QiCchy3$B}~q!FmDM+@iI z70>8MNyYo7-;;|lXGQLrL_-Pblk54y0#Fy}(6Ujdm-^Jn>>Od3lU=f3UJvf^G z+tK)02}#RjY(^yXsHR{<=4=m5lXzB&Y@380XmBa39Wqb27sR0Nr@pBRvzpi1i##u@ z3o3l{zL`yZRkxdp6VC#2D%d|dh_^ST6C@f->$(tY11o&G$`?!eo_YCh1KJQdCVpV3 znU@@w2c%3WC-?yzz#{9vZ;*tEpvxVtffdQM$Lvva7%O}9C%lO1O0t?LaX-6E7(sr= znAbJnq5tk&yA?@yhS2JOU_Xsv@eI-~n3zjNBXJLSad*kko-{=>GV0l?ozoC6uCsF8 zZbMw)Htwcbjzy0=51O{9K-pX)y5L=148pG*1ZCg>0+wfYod&$Zvr#?3{`-zxu>Bqr zxtMpzt8j58AKLD6)FY$;*1Va_1+3Mpsc{*JP>(~=NY6JGzv)^4G=ff2&1xKz<5t5( zDKVX@7l7K7XK~mMVX+Nw-4t}dF{4@28v+PRSM$zi5{D8cDGvzj;K<4C5x`f=FAr`w)_h>^XKnL}GU}LPalm&u7# zYMT9k+kEYj)jGQ8o8Ia0?!$k_x5+eGq`t?ci5F4!K+a=4!k9O^L*$bCeWp)-Fi@mQ zu?K+YS5KS5*7r_{UN~p;T)`r2Y$C{kN!gRj)-m6O*%Z8bIqlG?G+8B@)b~8rS$<*t z@D2D=2t&CJr%Iyz8 zDM1^d{%ZVDW2mGzP$-#ukLN&JrP8~wSiTZ@gL^1U+z(ooVgxo;7{P}{?QA$28S6Zl zMPXh=A`jduCF@~=VWY-??vyQf{1$Lo32<$s@F+ZI~{MQ`ZEjfu`(WEVq$T8 z;J}@)52D7CNIrJ-@}!B{2ExrAdD#JiQbH0mjAN1UdgMecnpl}W%i6jOC;ebsX#?Zx zx)}vE=m^v7Wnkr*7J+qRN0ZOAi8YW`C$)f;fceLpfwV%_CV^~UkOF?g2ky%JR|Xyu qvvZUIk3SPH#_mZ7^4Nr?Ju4Lhlfo}fPo#-TR7D;!MJMbgVP!S From 0b678f77592b0efc238193706e58830294ba0c53 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Robert=20Sch=C3=A4fer?= Date: Sat, 2 Mar 2019 20:59:33 +0100 Subject: [PATCH 53/55] Now the environment variables finally show up --- .travis.yml | 15 ++++++++------- kubeconfig.yaml.enc | Bin 6064 -> 6064 bytes 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/.travis.yml b/.travis.yml index cc797dceb..d0e134388 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,11 +1,12 @@ language: generic before_install: - - openssl aes-256-cbc -K $encrypted_87342d90efbe_key -iv $encrypted_87342d90efbe_iv -in kubeconfig.yaml.enc -out kubeconfig.yaml -d +- openssl aes-256-cbc -K $encrypted_87342d90efbe_key -iv $encrypted_87342d90efbe_iv + -in kubeconfig.yaml.enc -out kubeconfig.yaml -d install: - - curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl - - chmod +x ./kubectl - - sudo mv ./kubectl /usr/local/bin/kubectl - - mkdir ${HOME}/.kube - - cp kubeconfig.yaml ${HOME}/.kube/config +- curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl +- chmod +x ./kubectl +- sudo mv ./kubectl /usr/local/bin/kubectl +- mkdir ${HOME}/.kube +- cp kubeconfig.yaml ${HOME}/.kube/config script: - - kubectl get nodes +- kubectl get nodes diff --git a/kubeconfig.yaml.enc b/kubeconfig.yaml.enc index 0edbe1bbc3bf782bcf8212b58903edba92e7cf3a..e8db7817185fe140b51a93341b572acb558af03e 100644 GIT binary patch literal 6064 zcmV;h7fj(f%QIkoPPXHT>1ujr)^^fYyiYcW##2i&5)t?08Cpd@>4kVOb3e-o3VBBk5 ze|~&Q#MqoLcSAwm*6zBm~GY&-|x*L1UuC-6v*k{tqym=m$;7NjR%6_mJ}wGxb*9Z z;`ptz~NbRF5 zn+z5t!48qasJ>8KVBO+XapGp8v1Y4DE~Ndm`vGI2uGo_Nztoxz%{@M6&N_SS-v!%= zmc*Qip>u)YKN3;%$U4EeVc1N{O%1N!j-D761J4lYWqz1N{N{N zq-}r=E`REmKGlzHl%9(MJ)=@Zs;OS?f+#kJV8pty1aMlIs@8~-pO${YF)LtIh&a%* zbSp*BI-|;3tHLU;+{MF8>CPLh^2i6pktd+y>PAk*zA)`@^+ZHt!cfe_OS2`JoyS|L zJ6XN@e{ObP-;<0Px*l9w16?En;YqhCVR~M+f54#Sv}xy7S$5Tomem&t(jQTZNQk1| z{(4ztZl0F)zAMP$#?VLdFp)PS#7f!8OsuRc2&eG`z+mBC6453g%y_sSKu}M}gJm^0 zoR{cL!I{COash{7k&GozQ;=!=B?ytfdE#y@g?{s!#6}TU2R3QQ-%PJYnnz0|a*}uI z5n**SWJr)_<_j{mf}XBW<;!>A$EJm^%$3g?*>Tkqdt% z38**Z;ZEM^!Pe=j>~||48gGjK_kN(6`1Rw*Kt!`;!8%LtvNDzW87&#?8geMfQ3^s& zOM|fi1h34Srbt@qHplaqM2q1KfjU=a`!}3Lh9)H_@K=Xb2n64eodZ=ZLYXO5W+5a8 z_uA0wH`*%TUqUbpZH=4~F zPkMq6{Gtn&SPXFmIM|{222OOR>~s`)!_4S+S~2t-J*?BJY7kBR%N;YFn6 z3w*n%3Jp!KIRV!YxIQ0?VuImD6=Y%cMHhX0(#*1F|Hg~;VC9aog)9+4825AR)%i8$ zzwrhnJZ@O8==&rC?s2^-d%}(DM76FCi*ENnq^TfZU=5GS{`d3JX{T=1T&(4;LonQ|3$|1!jYAI*5Y7vp&~Nr~hW zxNV+#-En(_S3Vi8CvoTB>7I)HZXmNC$Nw6i?*vqML^1PS$u1Y4GYz7_X2q~4^?{-* z9QMUt$TM)i+Lb=~{7?GcfsQ4aI(x&z*X2y1fXy#Wlq_h|GT0v_7KTs~wsmqk1gAm_ zZ8I>Uc&S14In_;~H_H{pc2OhHTcm;yA7_EhNzLpKrwx4Vw0nCSDktUfeA=i{``Ng+ z$nW>^X0DKXHw{HuXT#qo+VeJ)>r&lmuNh6oIrQM)KJ+-Wf%SZ$x3OW))0%^JRS6<~ zfSf%n*2d5#A6phROIFpSFx&Z}ahj57%g1sk@(w-r4~HdVffTZ<@h{Ih&r;8IQlt<7 z-zO5X6Isq^2P*K1_cgAW*&%}DzIhg+xJ1=}ImH|>gVxGH~GLEhfc{9`#4C>^Ff=05%w#`j- z8DK8d!*#X_QdRcY0k#yJ2y1h7i1x;1_%@+0TJ;mPqG?I+gVTRW&_5F7AeB zy*fTHdyRQmFRXW?I2zsoM0+De>P`G#fGX&7{b8sU>mnhPi$Xq$FtYhPYot1MgQ#uP z)sP9C^c~`o3x*uf@Bud7E4PVmmtQFlJGAc3_=5y(OBu(M&LHtV2q((HRE=m=H*Bfl zg9Diybd$R4BwviGZNJgg7bEt2=G+^;(}Pe0`mPuBYpLH*ihaX#lPx!wgf+sSc&Rc` zfb%DaIsbySd4sJu9W;VLJIDm?894q>j^N9Z+JgGfV zbSjUwd>Wi>MThGJtZ--a$9aEYTTT!SAECT|%l^CzHP`>T$kA$|N^g2eLF#Qp#AL5t z*O<5H9f?T!0{vOz;_|8*7P;i7vxP$JU0-WX_p$6!aM9VO$t@397RCl5&m9$=ZfnSx zD>wf!iX`o@)^Us#OMKT0ZftLul8uB@y8d}!r}m+fH+Y*4#6UI0Og(9wX~Ww$DB%s< z|3OM!2DBFVQ|bX{{b^Ngkk!dXv*=P%&clQUmFjujSZ)JzTuYiFJ)G|b74f9gaANiE z+4Y%AV)L4)>8IM-8URQCA_yLwygebPx(iZvhFX+drfQ8&>bYj{T}7;G@mB4JwuENU z*gpduW8VKwDBs1}37;lk7Q@LGilkNa9#TEy%>qz%KuHmb!|k4Pe&@oJe}&*QYVoAR zMP>)Rn!9B`G1~RnQb=+WraPS_nPyl?vnGffqa^m^RjzK0OqG4qe%%S-F;Z0A*|JRm z$~X(ky{w;VY-M70<2h5FfZiCBFEb1Kh)b4;G7qlZ#Ap<0nD?bydSDemH(4NH3 zEkk*_i39pps*J49ozQN6bMey%E=Sq8#cIgFhEk;mkD?XH+4xd-v}$tb)N61>I+mfV zY^pzq)^N+D;45B>bujtw7uDr`niW4#>VtKCYU+KiT7c}OvY(u&A(BC>0&v4Ph$<(J zmB=Y|DCx7|bPw84oA-G!Z+6zw)!kFVucTDgrHPGhtN|6t_3(FU?7r^8Z|O;XE1T{P_@%yE(==-%WwB1ElttdVc-4H#?5hK% zx%p;quC*H;xa<6~+zKgaLA?vr@{UiUbvzLxiQo#oHmT~uL`o&{S51Onx7xxlv@3N@w>jl`yRgEt%H zSlh;8zK+kbeS5p+Cw$UHz_%X5)}A1z2e2#AnS^F|6Vd4lg(;w0JR75CQU`b7je0yL z8v~ru153gz8r0y1eXR7q!l%f3t*fjHryAnws*C#tHx$glptf}mLmmus`;{qg5dJ>{ zn@T$_2VNDTxCY)79AuAhYBC+YNr{uUD4QSn{)HX8uDUM)kY$g7HJxchJ} zr=F(aeS_o#12~rfX=^MV6okuiEe?H0Tzp>sqw!vY3#wLkU<}t4?<_fv-?54Gbsvg4 zk`0Yxm=k*}$98gJ-!QNmHaKQjhH8iX_)q>I8+AQqcD>i0c}jjNm-*Ef)Wh)8dM%l; zrMb6dVPdO9?8Ys++rzf-O6>_?SE6lFmDx>*1wU@D(J$_2w{-Q&pBjsynp5IyM=vuW z7uc|tasqp(Om1|j7R}Bf-JwSo76NxR{dKwji7~!h+iR_;390Dh{!!sKOm(*7kdDyB z?%y@1&@|Rj%F(F}lme{X|8gQrIORVc=gY&k>|Pzu6Ryp=h7^<55XdfvlG@@wISr~P z9o?Ja6Swin3Wz93TF42SPS)m;YC=Bcr6_*v^iu`)m6)(>vtCjJM21#205`5QZ}BNV z6#n$+^pXtJXRsq5n6TF0b5f(e6d@%(YzxzvoPuRCH{7KVZek;jMJD^@i0hxWID0Ui zob1DcLYSvDX#m^V-zoR_D+N2V)LwmVr+A28QHJS{nr*g?5~U-Ry@eqKUQPJUc~?(u zV;s~L;L$uWPOKh5`M7V|)&%pM;gzy!Fw+=CY>+dLw`F0#2x^pzQGN6u$<{1?poNDW zuy|BCC-@%LXtRX(7>qrc0b-bfOI@{;mMOVFstky%7j-Q}Fasg}l1mY>{LR`Ma?k z=!1NEQ-1=2k_X0d?Hc1}X<&;p;G`Qr>ZqkRtNX_y-BE*$av6>@S0n+#4cYL z_JkNUaM!KZGGOn(kMhJspKo_KaAkD%D9@7*T3`-k4Gcw3;XhDQj# z{T-4jlOt(u5IU>mW!~~mvqWUzFRU-leQ1|=?8BG7`f4L9eOIspVjTYsuGllNPZZJ_ z^kWsGnSpm8Dbz$QHPaYRPD_Zen*|S@PP-R~mch}XBsaLfHPQy?Fk%l>%gx)02d5uK z>*6*sCn1JbmH17>1|!yXs^iQhu1#4~l21!#swV6dXI+p;q})03V2d|o#IbruYUlhN zAgvXQ5 zT^V5(7zpUCC4OQ?kxgfw6c=-TxWIM{%W(Hj)9P)sxBXOP=O_yPA7>EObyCYTkG%I193Dn;HtjTfnnBs(xnx{e(#mT$=2pDl768&=RukR z?0a6pfSxkbC@#jX+{8;;H)=f|HM`X!YHI>3$|)3}itiC;R;P|p*hH;)=2WV9MvLI; z-{kq~AHLFC#v1u@eEy5Soyu)k!+0>}R>ydqRLd@G@{ULc@bbz`u{IqTPrA?UIQMdPlp&kJfl_msxki4 zFA+(-AcHpaU|p5!y?6LugHxg?0oe&vH#vH;-kjj+pGQ!9*<05{KY%uvQ}TAvhw^l8e=Y>YUKO+ZD;0NB8TWu%=kOUEGlcEn!PdRffrOCfWqiE8j4kJ}6! zc#ixs(`dLn;3e-F*k4_YOr)0p8%VrZ3Av|44qrmjy7G;yIq(MZ-Vp^{Rr7K>x~J44 z9HThNqhP!xfNZ%Sj{Bb*_zVaS`hCXQ6#W5txy0|Y@Zt8(QPQ1fuGHrO&Rt|1 z$U`;q#?V092W|y*@+4GI&l{seUi0*f`a~Ow5fF^|iwJ^F%e$u-3fz?Ja!T0Cd`cwOl@r~_wZ3veuNMn@*7 zNDzK@#*2Q1!n8Z?hbR7PUMjHrS~h33BxYVvI%2CH<*aOQm6tct%n{+X>WS3sKt@?9yID6^Q^>>gQ> zh3Rw!ohsL59t?jG>L9dYH_?93#L-0X@pd%4Y8^rF037qH0){>pz0hak+W}nO zN0(R8Adq}O+zN152p{Bq8-Fk8{3{zD86*H4gas}eXqNdH*z0s6v1I?Hex+qkSKmRU3m*u}ZVRq3_R<=!wox|x@3X&k0Qz{vLl!*| zbnLTLILAh7Tejc<%a8}ULY02958m!JPxzl+1POGfKIQ)&-SvvP1p>H^lA&j-9 zV^Uo4!@m;gY)?KZ9E10muh)gt7EgI@{204tgTxhF-JIBHY;V6=HUmC;H<7%{S~(z9 ztlJ?QYoNSv?AN{|lmB^cy&p^25%pFGJUiIXf815<@o15g-#Y$ZRWqOE(Ea{n!BP2p q`$ykTHYU6&Z#z<)s=%UBd5@930JufHtKPGmYbb1=qmhuHUogbjSkUAE literal 6064 zcmV;h7fXI zu?>MMGZVG=4Ny|(vkswZ0^nJzb?#lJ{66D)Eu?}Ws-fY)D=$@Ae@-GF390#m4$Bfb zUpI9mcP`rLf`Nzq`in*~#|>s@JXb!P1IQ~psX1BY96WJ-uYGPSkK%?$%Y%^iZ=RER zo>>cy5~%QPhw?^i8K5e^kTgr(py|jWFI0k;DSMVD;-<2{aF3T#opDI0&v5{n`nMci zgW1Z&%UXI*Taa-bm7=c1vXSs5sXmJGaa`g2X?I}>O$HRS6M8p9_Lnxyl6C}fYK!|xSR6$IF9GN$D`XWnytCsk>g&E2pe5W_Orl)w87ywy z^qn4Eoi-dAydTpEXK4OSh}YAJf4-fz94%S?pw)FP`U(=7ZtPrwI%vY1<5@--P&Qr_*vSVpwlmsPrL= z2{H#XAsEwU+|AebL!>>5lN(5jL~b!veQ{cDUvqOQs#U@Y1IHcPg`4iR!M7sUOp5T{ zMSomRgah!<#ls%VgDGuhBGZQv^bsj6*~&MFF5qijJBD!JYa}&~hd_&f*maiVoF%oU zX~sUNkRs%f)d}i9Ka`P0Mv-ALRxZ%4ki`<4+En;ZOiWij>%DstwJ3`QWl;8SV0;)x z!Jq#g8pqkpr;*g4P{5)?^gs#PYh`oAw*f?g73h&4Ze*fed{Ho@KqQIRqJ;n#eka4U ztbHKyIcf`YR(Mc7MKfq;%&7ix14c05l8^nAu12u_f}P4CFhtR zE)>`3f5!Vcr5e;OB~{`Am_b*SpbaTMbgUq8b)kbtuhvb3R>zShUdS?vM$jo^i_yd? z-{EZ=0cuC>dmD|wJR@7un3Po@hg!t%O;4CYHCwc1zP7vw#E6#5jgF1e7?4F|Fl&L# zw>&Gm2Bcf-!bO%k>2WP4rP=qRwMu+rB<6c{qF0=CjTao?s*8T`7{lb2U_^EPCfi1t zUcuCXf>)d~?y66M;r#H=MXN4AmZp0J);_0pzoYj5HrD}s7?MOjj@=2sdOR8v3RaNz z(t*&`8rQ3rEECSsL<9(dmX8NH7?=I$jIhgEi$TN`>OHe;3%->DT;8L0TCTR@HZ!Hs zEq7`4Soe}2I?u^*vyAe$zdYCa7haMp(7VRfs9o|cP^65*kk>iYSiUlKl>>Isb#wr8&5<3rVppPfT+aDR{+4%@=cEV9CdN+JFNDa|-f zpVNwS1Ro(2{KFz)TQLXECr$rY}(l_*7jEN_# zf!siLBhAiF-D_EeripTXJYwY640Abg!7AK+60FJWBXF4=W0GSVdbOu& z9w~xo3K=}a&WoQH3MaN{UE}_sdnF`PvT!NcF^ub%zrU50mSa;7#n$Jo`|uylg$nD; zUW_^8%p#K5s!A(N2=z!~#1 zWeALEL7;&*i@{sqBk9uF--E~%%B9gvUA2LDq|&bl9~bFu$d00gKm&?E+U- zkOV_-TCatdF~^oa+uLyRj{Hn1&BIQiy*X1{cwX4}C0&*&D%{DQ#ab*?a9mq4rp3Lf z2%ph6S;VU=HK&k2I!zLTWjf0VNo%6G&I3?>i9ze07@l%cyNL|J06W;snoz!L z<}A5Q1r7;#(8r*A=Y{#uk=^3-S5G>8_2R2>F5u0} zz`halePG?#XK5Q6F3K65tnAoGxqD{v`5A_=A!2gAyw-I7*X7WIwXitJd=D`fVx;<) z3(&{Y*ScP^G02A$~&V>Gf&Cwe1RuR3<9B?+zakEr@iDzX3kuMrsw_W02A$l+L|5hLr zfTau9%a<~eM7(>mj5Y8KC6O|2zshxVl-+{k%SO7vc!04l_>0vcx4w#*&Yr>c`MpBJjgcgcyILPj1Gac7dQ*#AH z?oI3E#5N0HjH8Flbv<9QJ8Ka-vLKj4Kjw(kK{_fxXE>?+wtn1dnXA_HiS~8TEK+xq z*Yr9K)dTcAIU(EarDTk=6u-epTC_}Tt&4ohp*}#|byJmc+hM3|hCuT&y5qKje;%e< z6Uds&#U*hP>qji-h3Z4`%Q2(F_WNL6s+%6`!9Dv%vd)OGUWnrC0t#pk!kBQ7te@{C zgPVXz^mj?kP2f5LzDQu@SU8J2qrjRfVu$wwBRbH&K#v;mh{aN_Go9l_oC(e~ecKF7 zPSq3#k;iYwyHSKl<;Pn8dyNaul!l3pYc=!;suti!ChsRga@Tw$XpL4D8iLXN-Ca@L zbI29CayqQS!D0zSw_uQ0?>8mXzU8f!9_Lx!bz#`_8YEsqf+X$NS?CltBy6Sd3mg0R z3WV~tff^SmrIZ&E~EQlwN19v|pyRzV)CqwWD5w*?{o-FlUHyzr#1TGS&tM~2$ z0YFo!w>OWAx<01w< z7h7pKhdt3j+|N$y#br}z;i&iIrKWKUHU**8{%d6vxdJiCQS{zI#m|&UJ{EWqH-zao zb%`>OJE_%!U#}pc`33sN1T5Kbdq%+ZO423Ft;{9HwJL}5o{jG#d1S-#79~yhPNz>V zQ<$2K>p@8OZ@w1?iQ-z1GEdywD@$8JR_ywtud?A|!L8CmVMzMs@aoBCxWpu+bEjd0 zI(Z+g0X{3$Nj(Xg$bEgj1p2KTI(X36xl3ulKv$e>>OJh{S*#FfXmv(ewWBA|7IWD` z7rfIqXD9{!cgei7=t2`l)cTlur9HluUMlMR&zufT#e3_1Tn0vwYksY8rY(;DN%+r zI4m?)L;X-R%a)b_=3$JVr>o~~k20X!b^7y9c*0b^OF|7KO~()J$Yt?i*uFNxt73T` zuLJjegmKr)7f1eO+|u$FZr*LPH#87ku6Go%Pp{j*1g;|qDBvmRMhaOqYGZ~Bn zK&pBWJA>5=IWQf|iGz`FI@u7$bMJ0&eWT3Wkvp1d-Vq2qv`Aw`rClok=K}vlmqVLs z6KjngYb;aTndsmtWN5Unk?vUJxR-SL736@J6_O)99_iNqp-RqddNE6#)?yTEAD(Z1 z-(ZoIpF4#IQsJOY1=Pfrm~D8co2Lk|6=UnsBQl-PdX99uAc^aLzz0>Y5GDbZuuc zGOuE3^UO=-KBp*@Cx?TXpf-KlL<5dp7F+>aR-o3IVvU!D_hG>bmgzgVX-8!(a2U^7 zjgjh9_0d{^D#jTlA1PZqp^?>@cIj<5o23SRjZ01LL7T@Id6pV9BZq*JY)vsQf@HQ? z35?93K*uQ62#VUUB8Kt}#6T1{6(#r}*;{1(C@A1LfssgLl)kqFzb!gGMv*~?NTh6xgIeu?1-I{qIY2b)1aj(z33DxjGraoN z9p_$F_vPxt-VGvJ@A6nwa}L%F-piW#qE-@tk6H02OtRG&fV~THW6tp`0$?uQWJ$qa*p+X8Ak99i6HmrhS2RG zpz5RMyR*b5gFWXy*lZz+;vlti=RyhrpRvzDX;#_7fSFLagJhJF0&{Qc1T%L-PRZ24 zxD8#jkHh#-x}x4jr~`;lSOh^6BQ8cAp;NjLGa^Ihybl?~+x4&axYVb0@58MUN*=5S z+1`gSoz89zwlFKL{n&DgP}dsf6hOuVe2KFcAawWyl=ySEuyOTL~=n6CSD=2F@#wp#Qqbn>RPw%XxXQ2d+gsCdHlzCg@Gl2Jx;-8o~B8!>h=H_*xjo+sQtkW3^%nTu3}zh4BXb`Z~LK)2!9N zjd5>Co&G^^I(${hV^W0J+2qQ{`6W(<)bq7h8Wo+TMKb$0H3O{JyMlq_|6rJ7QiLkB zj;%laGS+$}fPcGud{weEmD-@ni7IoQM$4v&C4}Pb_7leX)NPChp@#PQ8^sgZ{l{PS zb#FO;)CBfE)Zd$LL7aFcV;&oqGc-r9a^WfE25~AsYnay7yE(ErjJKXY+3d(0+hV+P z2O*L7a`=dqTyLwkAs9chC)~(!1s7>P-eJ!i^oZfIxo@QMK_~z7S$|d(SjhxOAfb@+ zy8dHo_67dYQtM^B_t0hid6`9eVUST{R-nOE(!TDU}x4)M+OUkgYkD@LYG{P-@UpQb@Jt(y^8)X zG}ULA>17Z@+BlXI4eO4Yzw3{&bE@^_3uBLwN>|r&<@V*R?dIdCQ27o6%!IqjN5L`V z!Zxf0O0*!E29E;@dWe@^s3!>K*>C;x+bL)%L92X=h(J$Daf?nL`va-aIQ*Je9`}|F zGeTRzocX2nqk3=yD{-$@Yr@urIixvA+Wk`etRRFEVy5{Sy6w^5!1P09Q9V{=8Vlg3 zTL5Xon7MlSCEywTlw*vGKOMo{R|d`;y)b7zu7O4!O44GPB&qe2 z*~N2YtZ&Vwtk3$eroz7NdnbiXT;lSm_k<*PFtYL&`;pKj0wSTTdQ@kUC?`gQZ4(~! z2(C$Io7Fm&<8XP%n+&vmNx*N?(mtqU*8MMV?!TSXK$gRh%hdF|O3Q z0!6qc|3$YqozQ9GHj~?|AZ1AS`i94hZ^k0GSW@fR6sIK=O#^QqOaXn+u7iWXn80)Z z-!rzlpo24Y!Ra3TciNHHP;^l8c1errz7t>4`+tL&dia-l|AHC%6|&L}E^w2Yf8jC* zPKCCEeG)0`Q7VEwI!s4YV%RuRCrTsc)`>=WCfYz{?CKTcE_&)Z0TINCSaa*#uLQjY ziL)oIy_@`9NJub_PH}$q(mG4@yh4g;w#WeT>v(E zC_6vbAnchym7)pL_0OOHG9FvIy*y_GuP+ya3j|qz9jLvTcT}>6wT;2SCKwL0*L721 z15Z9(8Eni@g8@7Rf|(S@NVTHSeYsso71nmgm}gkcSnb>#Os)j_MGs`eA#|%RE>eo;N{*pIln2acop#iHF;FCsLZTsOu{))=6>9~7jTW|lq z!XQoP++3cbk2YUWP1-wu@sWZT)bBU)QwP9&CY_sLwk5zW3IUkeQSCuZntmPitwnmU zLv7RTVT>^iYP_Ox@J65aS!wQQxwMzWOo_-poWKGiF^RNi4zxv!l>d3kshx*^wW z-!Q9@E2Wzuc05GmDsOo#%?vxi+E}IfFWt!VXcI4223SC}N|u!tu3RXyc7Es;U|2I0 zfrODH1-6FFuXv`C?vXZ?5z{I?Or0EYxbp42u{(XiSVj2^c`(n z9Z}*=J;et}48zVqr;$#2 zrP%5aDjt^(9(hd1n(it;o~sL9*DOLPG{`n_LZP*Ys`v#M*Ky_4^d2w<*U`Ffxjzf+ z1rClV-45yNKp`-Jk_n*3h5>2a(PzKWW>oRJB3Q9&LnoTDyuomM7nk&vq$XlxJJgzj zHP67OXOX_aNv*LnzZdJR6;eZv5r~28SYT-ZMCQgvQ4-FEDyfY<()Y}?=d8Y`m@owk6b9Z81)Kn+Vj(&Dg#-;C&6peFh2(#vpyF&wEtspJTA7Z9!fcepfv9T?BUHt*l~vpIX2}P4 z;NTYLWPq(x`FiQq?^1Fk1R6RuCS1x*qNkQr&?RgfgR8jz2Lt@})Q@8c0DvzX>9CGuX{Argpu*}dB?h} z0dWe1i=-(3Kzp~}q~*|W2(`fH;k>06vaYmB`#CE;)UqrAtc@MQSW=ADJI1A7!%q?6 z;<};P@=k-vF^UhA;NSt&`BKeGOtQY^6Sfm7HNJ5%1t{^bR@1v8T`JHy2n_3N>8tt$ zAZ~nJx48!Vo`(^4C%bGY4N)X)CaJ7@R8rfDdWl?*z*BS1;{Vr+VeL5l0j#a1M*2gI zPsING3_<( Date: Sat, 2 Mar 2019 21:05:14 +0100 Subject: [PATCH 54/55] Deploy by deleting all pods For now let's have it on all branches and check if it works. I will change the line once it works. --- .travis.yml | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/.travis.yml b/.travis.yml index d0e134388..b20c2f2b5 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,12 +1,25 @@ language: generic + before_install: - openssl aes-256-cbc -K $encrypted_87342d90efbe_key -iv $encrypted_87342d90efbe_iv -in kubeconfig.yaml.enc -out kubeconfig.yaml -d + install: - curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl - chmod +x ./kubectl - sudo mv ./kubectl /usr/local/bin/kubectl - mkdir ${HOME}/.kube - cp kubeconfig.yaml ${HOME}/.kube/config + script: - kubectl get nodes + +deploy: + provider: script + # TODO: fix downtime + # instead of deleting all pods, update the deployment and make a rollout + # TODO: fix multiple access error on volumes + # this happens if more than two pods access a volume + script: kubectl --namespace=human-connection delete pods --all + on: + all_branches: true From e6a980d2ff7fd596e7ed63ffa15dae1389457441 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Robert=20Sch=C3=A4fer?= Date: Sat, 2 Mar 2019 21:15:15 +0100 Subject: [PATCH 55/55] Add badge to README, only deploy on master branch --- .travis.yml | 2 +- README.md | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index b20c2f2b5..b39322894 100644 --- a/.travis.yml +++ b/.travis.yml @@ -22,4 +22,4 @@ deploy: # this happens if more than two pods access a volume script: kubectl --namespace=human-connection delete pods --all on: - all_branches: true + branch: master diff --git a/README.md b/README.md index e4d0c5396..f8cff30eb 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,5 @@ # Human-Connection Nitro | Deployment Configuration +[![Build Status](https://travis-ci.com/Human-Connection/Nitro-Deployment.svg?branch=master)](https://travis-ci.com/Human-Connection/Nitro-Deployment) Todos: - [x] check labels and selectors if they all are correct