Ocelot-Social/staging/deployment-db-migration-worker.yaml
2019-02-02 13:38:43 +01:00

93 lines
2.7 KiB
YAML

---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: nitro-db-migration-worker
namespace: staging
spec:
replicas: 1
minReadySeconds: 15
progressDeadlineSeconds: 60
selector:
matchLabels:
workload.user.cattle.io/workloadselector: deployment-staging-db-migration-worker
template:
metadata:
labels:
workload.user.cattle.io/workloadselector: deployment-staging-db-migration-worker
name: nitro-db-migration-worker
spec:
containers:
- env:
- name: COMMIT
value: <BACKEND_COMMIT>
- name: SSH_USERNAME
valueFrom:
configMapKeyRef:
name: staging-db-migration-worker
key: SSH_USERNAME
- name: SSH_HOST
valueFrom:
configMapKeyRef:
name: staging-db-migration-worker
key: SSH_HOST
- name: MONGODB_USERNAME
valueFrom:
configMapKeyRef:
name: staging-db-migration-worker
key: MONGODB_USERNAME
- name: MONGODB_AUTH_DB
valueFrom:
configMapKeyRef:
name: staging-db-migration-worker
key: MONGODB_AUTH_DB
- name: MONGODB_DATABASE
valueFrom:
configMapKeyRef:
name: staging-db-migration-worker
key: MONGODB_DATABASE
- name: UPLOADS_DIRECTORY
valueFrom:
configMapKeyRef:
name: staging-db-migration-worker
key: UPLOADS_DIRECTORY
- name: MONGODB_PASSWORD
valueFrom:
secretKeyRef:
name: staging
key: MONGODB_PASSWORD
optional: false
image: humanconnection/db-migration-worker:latest
name: nitro-db-migration-worker
resources: {}
imagePullPolicy: Always
volumeMounts:
- mountPath: /root/
name: ssh-keys-directory
- mountPath: /mongo-export/
name: mongo-export
volumes:
- name: ssh-keys-directory
persistentVolumeClaim:
claimName: ssh-keys-claim
- name: mongo-export
persistentVolumeClaim:
claimName: mongo-export-claim
restartPolicy: Always
terminationGracePeriodSeconds: 30
status: {}
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: ssh-keys-claim
namespace: staging
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
# waaay too much
# unfortunately Digital Oceans volumes start at 1Gi
storage: 1Gi