Cleanly separate legacy migration part

Use `kubectl patch -f ...`
This commit is contained in:
Robert Schäfer 2019-02-05 17:44:30 +01:00
parent 99262a0d4d
commit e1e457abae
6 changed files with 104 additions and 64 deletions

View File

@ -98,7 +98,24 @@ $ kubectl create -f namespace-staging.yaml
```
Switch to the namespace `staging` in your kubernetes dashboard.
### Prepare migration of Human Connection legacy server
### Run the configuration
```shell
$ kubectl apply -f staging/
```
This can take a while because kubernetes will download the docker images.
Sit back and relax and have a look into your kubernetes dashboard.
Wait until all pods turn green and they don't show a warning
`Waiting: ContainerCreating` anymore.
#### Legacy data migration
This setup is completely optional and only required if you have data on a server
which is running our legacy code and you want to import that data. It will
import the uploads folder and migrate a dump of mongodb into neo4j.
##### Prepare migration of Human Connection legacy server
Create a configmap with the specific connection data of your legacy server:
```sh
$ kubectl create configmap db-migration-worker \
@ -126,17 +143,16 @@ points out, you should be careful with your ssh keys. Anyone with access to your
cluster will have access to your ssh keys. Better create a new pair with
`ssh-keygen` and copy the public key to your legacy server with `ssh-copy-id`.
### Run the configuration
```shell
$ kubectl apply -f staging/
##### Migrate legacy database
Patch the existing deployments to use a multi-container setup:
```bash
cd legacy-migration
kubectl apply -f volume-claim-mongo-export.yaml
kubectl patch --namespace=staging deployment nitro-backend --patch "$(cat deployment-backend.yaml)"
kubectl patch --namespace=staging deployment nitro-neo4j --patch "$(cat deployment-neo4j.yaml)"
cd ..
```
This can take a while because kubernetes will download the docker images.
Sit back and relax and have a look into your kubernetes dashboard.
Wait until all pods turn green and they don't show a warning
`Waiting: ContainerCreating` anymore.
### Migrate legacy database
Run the migration:
```shell
$ kubectl --namespace=staging get pods

View File

@ -0,0 +1,27 @@
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: nitro-backend
namespace: staging
spec:
template:
spec:
containers:
- name: nitro-db-migration-worker
image: humanconnection/db-migration-worker:latest
imagePullPolicy: Always
envFrom:
- configMapRef:
name: db-migration-worker
volumeMounts:
- name: secret-volume
readOnly: false
mountPath: /root/.ssh
- name: uploads
mountPath: /uploads/
volumes:
- name: secret-volume
secret:
secretName: ssh-keys
defaultMode: 0400

View File

@ -0,0 +1,39 @@
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: nitro-neo4j
namespace: staging
spec:
template:
spec:
containers:
- name: nitro-db-migration-worker
image: humanconnection/db-migration-worker:latest
imagePullPolicy: Always
envFrom:
- configMapRef:
name: db-migration-worker
env:
- name: COMMIT
value: <BACKEND_COMMIT>
- name: NEO4J_URI
value: bolt://localhost:7687
volumeMounts:
- name: secret-volume
readOnly: false
mountPath: /root/.ssh
- name: mongo-export
mountPath: /mongo-export/
- name: nitro-neo4j
volumeMounts:
- mountPath: /mongo-export/
name: mongo-export
volumes:
- name: secret-volume
secret:
secretName: ssh-keys
defaultMode: 0400
- name: mongo-export
persistentVolumeClaim:
claimName: mongo-export-claim

View File

@ -0,0 +1,12 @@
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: mongo-export-claim
namespace: staging
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi

View File

@ -18,18 +18,6 @@
name: "nitro-backend"
spec:
containers:
- name: nitro-db-migration-worker
image: humanconnection/db-migration-worker:latest
imagePullPolicy: Always
envFrom:
- configMapRef:
name: db-migration-worker
volumeMounts:
- name: secret-volume
readOnly: false
mountPath: /root/.ssh
- name: uploads
mountPath: /uploads/
- name: nitro-backend
image: humanconnection/nitro-backend:latest
imagePullPolicy: Always
@ -75,10 +63,6 @@
- mountPath: /nitro-backend/public/uploads
name: uploads
volumes:
- name: secret-volume
secret:
secretName: ssh-keys
defaultMode: 0400
- name: uploads
persistentVolumeClaim:
claimName: uploads-claim

View File

@ -17,23 +17,6 @@
name: nitro-neo4j
spec:
containers:
- name: nitro-db-migration-worker
image: humanconnection/db-migration-worker:latest
imagePullPolicy: Always
envFrom:
- configMapRef:
name: db-migration-worker
env:
- name: COMMIT
value: <BACKEND_COMMIT>
- name: NEO4J_URI
value: bolt://localhost:7687
volumeMounts:
- name: secret-volume
readOnly: false
mountPath: /root/.ssh
- name: mongo-export
mountPath: /mongo-export/
- name: nitro-neo4j
image: humanconnection/neo4j:latest
imagePullPolicy: Always
@ -69,16 +52,7 @@
volumeMounts:
- mountPath: /data/
name: neo4j-data
- mountPath: /mongo-export/
name: mongo-export
volumes:
- name: secret-volume
secret:
secretName: ssh-keys
defaultMode: 0400
- name: mongo-export
persistentVolumeClaim:
claimName: mongo-export-claim
- name: neo4j-data
persistentVolumeClaim:
claimName: neo4j-data-claim
@ -96,15 +70,3 @@
resources:
requests:
storage: 4Gi
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: mongo-export-claim
namespace: staging
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi