mirror of
https://github.com/IT4Change/Ocelot-Social.git
synced 2025-12-13 07:45:56 +00:00
Refactor db-migration-worker deployment
Instead of creating a deployment with a replica set, we just create the pod once. Also the pod should have everything in the future to run the database migration. Ie. have `cypher-shell` to write directly to the database in the current network. All required configuration is passed manually to the `db-migration-worker`-pod directly. SSH-keys are copied through a secrets file. This altogether made many configuration files obsolete.
This commit is contained in:
parent
246a46c2e8
commit
fb929da2cd
2
.gitignore
vendored
Normal file
2
.gitignore
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
secrets.yaml
|
||||
*/secrets.yaml
|
||||
40
README.md
40
README.md
@ -109,20 +109,38 @@ Wait until all pods turn green and they don't show a warning
|
||||
`Waiting: ContainerCreating` anymore.
|
||||
|
||||
|
||||
### Provision db-migration-worker
|
||||
Copy your private ssh key and the `.known-hosts` file of your remote legacy
|
||||
server.
|
||||
```shell
|
||||
|
||||
# check the corresponding db-migration-worker pod
|
||||
$ kubectl --namespace=staging get pods
|
||||
# change <POD_ID> below
|
||||
$ kubectl cp path/to/your/ssh/keys/.ssh staging/nitro-db-migration-worker-<POD_ID>:/root/
|
||||
### Migrate database of Human Connection legacy server
|
||||
Create a configmap with the specific connection data of your legacy server:
|
||||
```sh
|
||||
$ kubectl create configmap db-migration-worker \
|
||||
--namespace=staging \
|
||||
--from-literal=SSH_USERNAME=someuser \
|
||||
--from-literal=SSH_HOST=yourhost \
|
||||
--from-literal=MONGODB_USERNAME=hc-api \
|
||||
--from-literal=MONGODB_PASSWORD=secretpassword \
|
||||
--from-literal=MONGODB_AUTH_DB=hc_api \
|
||||
--from-literal=MONGODB_DATABASE=hc_api \
|
||||
--from-literal=UPLOADS_DIRECTORY=/var/www/api/uploads
|
||||
```
|
||||
Create a secret with your public and private ssh keys:
|
||||
```sh
|
||||
$ kubectl create secret generic ssh-keys \
|
||||
--namespace=staging \
|
||||
--from-file=id_rsa=/path/to/.ssh/id_rsa \
|
||||
--from-file=id_rsa.pub=/path/to/.ssh/id_rsa.pub \
|
||||
--from-file=known_hosts=/path/to/.ssh/known_hosts
|
||||
```
|
||||
As the [kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/secret/#use-case-pod-with-ssh-keys)
|
||||
points out, you should be careful with your ssh keys. Anyone with access to your
|
||||
cluster will have access to your ssh keys. Better create a new pair with
|
||||
`ssh-keygen` and copy the public key to your legacy server with `ssh-copy-id`.
|
||||
|
||||
Create the pod and the required volume:
|
||||
```sh
|
||||
$ kubectl apply -f db-migration-worker.yaml
|
||||
```
|
||||
Run the migration:
|
||||
```shell
|
||||
# change <POD_IDs> below
|
||||
$ kubectl --namespace=staging exec -it nitro-db-migration-worker-<POD_ID> ./import.sh
|
||||
$ kubectl --namespace=staging exec -it nitro-neo4j-<POD_ID> ./import/import.sh
|
||||
$ kubectl --namespace=staging exec -it nitro-db-migration-worker ./import.sh
|
||||
```
|
||||
|
||||
@ -1,12 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
data:
|
||||
SSH_USERNAME: "<REPLACE>"
|
||||
SSH_HOST: "<REPLACE>"
|
||||
MONGODB_USERNAME: "hc-api"
|
||||
MONGODB_AUTH_DB: "hc_api"
|
||||
MONGODB_DATABASE: "hc_api"
|
||||
UPLOADS_DIRECTORY: "/var/www/api/uploads"
|
||||
metadata:
|
||||
name: staging-db-migration-worker
|
||||
namespace: staging
|
||||
39
db-migration-worker.yaml
Normal file
39
db-migration-worker.yaml
Normal file
@ -0,0 +1,39 @@
|
||||
---
|
||||
kind: Pod
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: nitro-db-migration-worker
|
||||
namespace: staging
|
||||
spec:
|
||||
volumes:
|
||||
- name: secret-volume
|
||||
secret:
|
||||
secretName: ssh-keys
|
||||
defaultMode: 0400
|
||||
- name: mongo-export
|
||||
persistentVolumeClaim:
|
||||
claimName: mongo-export-claim
|
||||
containers:
|
||||
- name: nitro-db-migration-worker
|
||||
image: humanconnection/db-migration-worker:latest
|
||||
envFrom:
|
||||
- configMapRef:
|
||||
name: db-migration-worker
|
||||
volumeMounts:
|
||||
- name: secret-volume
|
||||
readOnly: false
|
||||
mountPath: /root/.ssh
|
||||
- name: mongo-export
|
||||
mountPath: /mongo-export/
|
||||
---
|
||||
kind: PersistentVolumeClaim
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: mongo-export-claim
|
||||
namespace: staging
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
||||
2
staging/.gitignore
vendored
2
staging/.gitignore
vendored
@ -1,2 +0,0 @@
|
||||
configmap-db-migration-worker.yaml
|
||||
secrets.yaml
|
||||
@ -1,92 +0,0 @@
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: nitro-db-migration-worker
|
||||
namespace: staging
|
||||
spec:
|
||||
replicas: 1
|
||||
minReadySeconds: 15
|
||||
progressDeadlineSeconds: 60
|
||||
selector:
|
||||
matchLabels:
|
||||
workload.user.cattle.io/workloadselector: deployment-staging-db-migration-worker
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
workload.user.cattle.io/workloadselector: deployment-staging-db-migration-worker
|
||||
name: nitro-db-migration-worker
|
||||
spec:
|
||||
containers:
|
||||
- env:
|
||||
- name: COMMIT
|
||||
value: <BACKEND_COMMIT>
|
||||
- name: SSH_USERNAME
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: staging-db-migration-worker
|
||||
key: SSH_USERNAME
|
||||
- name: SSH_HOST
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: staging-db-migration-worker
|
||||
key: SSH_HOST
|
||||
- name: MONGODB_USERNAME
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: staging-db-migration-worker
|
||||
key: MONGODB_USERNAME
|
||||
- name: MONGODB_AUTH_DB
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: staging-db-migration-worker
|
||||
key: MONGODB_AUTH_DB
|
||||
- name: MONGODB_DATABASE
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: staging-db-migration-worker
|
||||
key: MONGODB_DATABASE
|
||||
- name: UPLOADS_DIRECTORY
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: staging-db-migration-worker
|
||||
key: UPLOADS_DIRECTORY
|
||||
- name: MONGODB_PASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: staging
|
||||
key: MONGODB_PASSWORD
|
||||
optional: false
|
||||
image: humanconnection/db-migration-worker:latest
|
||||
name: nitro-db-migration-worker
|
||||
resources: {}
|
||||
imagePullPolicy: Always
|
||||
volumeMounts:
|
||||
- mountPath: /root/
|
||||
name: ssh-keys-directory
|
||||
- mountPath: /mongo-export/
|
||||
name: mongo-export
|
||||
volumes:
|
||||
- name: ssh-keys-directory
|
||||
persistentVolumeClaim:
|
||||
claimName: ssh-keys-claim
|
||||
- name: mongo-export
|
||||
persistentVolumeClaim:
|
||||
claimName: mongo-export-claim
|
||||
restartPolicy: Always
|
||||
terminationGracePeriodSeconds: 30
|
||||
status: {}
|
||||
---
|
||||
kind: PersistentVolumeClaim
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: ssh-keys-claim
|
||||
namespace: staging
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
# waaay too much
|
||||
# unfortunately Digital Oceans volumes start at 1Gi
|
||||
storage: 1Gi
|
||||
@ -1,12 +0,0 @@
|
||||
---
|
||||
kind: PersistentVolumeClaim
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: mongo-export-claim
|
||||
namespace: staging
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
||||
@ -1,12 +0,0 @@
|
||||
---
|
||||
kind: PersistentVolumeClaim
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: uploads-claim
|
||||
namespace: staging
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 8Gi
|
||||
Loading…
x
Reference in New Issue
Block a user