Use folders for groups of YAML files

This keeps our configuration DRY and helps us to save keystrokes.
This commit is contained in:
Robert Schäfer 2019-01-31 23:24:20 +01:00
parent 5cd0485117
commit bbfe39e076
32 changed files with 93 additions and 193 deletions

View File

@ -19,35 +19,33 @@ There are many Kubernetes distributions, but if you're just getting started, Min
## Create a namespace locally
```shell
kubectl create -f namespace-staging.json
kubectl create -f namespace-staging.yml
```
## Change config maps according to your needs
```shell
cd ./staging
cd config/
cp db-migration-worker-configmap.template.yaml db-migration-worker-configmap.yaml
# edit all variables according to the setup of the remote legacy server
cd ..
```
## Apply the config map to staging namespace
```shell
cd ./staging
kubectl apply -f neo4j-configmap.yaml -f backend-configmap.yaml -f web-configmap.yaml -f db-migration-worker-configmap.yaml
kubectl apply -f config/
```
## Setup secrets and deploy themn
```shell
cd ./staging
cp secrets.yaml.template secrets.yaml
# change all vars as needed and deploy it afterwards
kubectl apply -f secrets.yaml
```
## Deploy the app
## Create volumes and deployments
```shell
cd ./staging
kubectl apply -f ./volumes
kubectl apply -f neo4j-deployment.yaml -f backend-deployment.yaml -f web-deployment.yaml -f db-migration-worker-deployment.yaml
kubectl apply -f volumes/
kubectl apply -f deployments/
```
This can take a while.
Sit back and relax and have a look into your minikube dashboard:
@ -59,7 +57,7 @@ Wait until all pods turn green and they don't show a warning `Waiting: Container
## Expose the services
```shell
kubectl create -f services/
kubectl apply -f services/
```
## Access the service
@ -77,11 +75,12 @@ Copy your private ssh key and the `.known-hosts` file of your remote legacy serv
# check the corresponding db-migration-worker pod
kubectl --namespace=staging get pods
# change <POD_ID> below
kubectl cp path/to/your/ssh/keys/folder staging/nitro-db-migration-worker-<POD_ID>:/root/
kubectl cp path/to/your/ssh/keys/.ssh staging/nitro-db-migration-worker-<POD_ID>:/root/
```
Run the migration:
```shell
# change <POD_ID> below
# change <POD_IDs> below
kubectl --namespace=staging exec -it nitro-db-migration-worker-<POD_ID> ./import.sh
kubectl --namespace=staging exec -it nitro-neo4j-<POD_ID> ./import/import.sh
```

1
config/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
db-migration-worker.yml

View File

@ -1,13 +0,0 @@
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: nitro-backend
name: nitro-backend
namespace: staging
spec:
ports:
- port: 4000
targetPort: 4000
selector:
k8s-app: nitro-backend

View File

@ -1,12 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: sample-load-balancer
namespace: staging
spec:
type: LoadBalancer
ports:
- protocol: TCP
port: 80
targetPort: 80
name: http

View File

@ -1,15 +0,0 @@
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: backend-ingress
namespace: staging
annotations:
nginx.ingress.kubernetes.io/rewrite-target: /
spec:
rules:
- http:
paths:
- path: /
backend:
serviceName: backend
servicePort: 4000

View File

@ -1,22 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: ingress-nginx
namespace: staging
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
spec:
type: NodePort
ports:
- name: http
port: 80
targetPort: 80
protocol: TCP
- name: https
port: 443
targetPort: 443
protocol: TCP
selector:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx

View File

@ -1,13 +0,0 @@
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: nitro-web
name: nitro-web
namespace: staging
spec:
ports:
- port: 3000
targetPort: 3000
selector:
k8s-app: nitro-web

View File

@ -1,10 +0,0 @@
{
"kind": "Namespace",
"apiVersion": "v1",
"metadata": {
"name": "staging",
"labels": {
"name": "staging"
}
}
}

6
namespace-staging.yml Normal file
View File

@ -0,0 +1,6 @@
kind: Namespace
apiVersion: v1
metadata:
name: staging
labels:
name: staging

View File

@ -8,10 +8,8 @@ metadata:
spec:
ports:
- name: web
protocol: TCP
port: 4000
targetPort: 4000
nodePort: 32612
selector:
workload.user.cattle.io/workloadselector: deployment-staging-backend
type: LoadBalancer

View File

@ -10,11 +10,9 @@ spec:
workload.user.cattle.io/workloadselector: deployment-staging-neo4j
ports:
- name: bolt
protocol: TCP
port: 7687
targetPort: 7687
- name: web
protocol: TCP
port: 7474
targetPort: 7474
type: LoadBalancer

View File

@ -8,7 +8,6 @@ metadata:
spec:
ports:
- name: web
protocol: "TCP"
port: 3000
targetPort: 3000
selector:

View File

@ -1,21 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: nitro-web
namespace: staging
labels:
workload.user.cattle.io/workloadselector: deployment-staging-web
spec:
ports:
- name: web
protocol: "TCP"
port: 3000
targetPort: 3000
selector:
workload.user.cattle.io/workloadselector: deployment-staging-web
type: LoadBalancer
sessionAffinity: None
externalTrafficPolicy: Cluster
status:
loadBalancer: {}

1
staging/.gitignore vendored
View File

@ -1 +0,0 @@
db-migration-worker-configmap.yaml

View File

@ -1,11 +0,0 @@
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: mongo-export-claim
namespace: staging
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi

View File

@ -1,12 +0,0 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: mongo-export-volume
namespace: staging
spec:
accessModes:
- ReadWriteMany
capacity:
storage: 1Gi
hostPath:
path: /data/shared/mongo-exports/

View File

@ -1,11 +0,0 @@
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: ssh-keys-claim
namespace: staging
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Mi

View File

@ -1,12 +0,0 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: ssh-keys-volume
namespace: staging
spec:
accessModes:
- ReadWriteOnce
capacity:
storage: 1Mi
hostPath:
path: /data/pv0001/

View File

@ -1,11 +0,0 @@
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: uploads-claim
namespace: staging
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 8Gi

View File

@ -1,12 +0,0 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: uploads-volume
namespace: staging
spec:
accessModes:
- ReadWriteMany
capacity:
storage: 8Gi
hostPath:
path: /data/shared/uploads/

25
volumes/mongo-export.yml Normal file
View File

@ -0,0 +1,25 @@
---
kind: PersistentVolume
apiVersion: v1
metadata:
name: mongo-export-volume
namespace: staging
spec:
accessModes:
- ReadWriteMany
capacity:
storage: 1Gi
hostPath:
path: /data/shared/mongo-exports/
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: mongo-export-claim
namespace: staging
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi

25
volumes/ssh-keys.yml Normal file
View File

@ -0,0 +1,25 @@
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: ssh-keys-volume
namespace: staging
spec:
accessModes:
- ReadWriteOnce
capacity:
storage: 1Mi
hostPath:
path: /data/pv0001/
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: ssh-keys-claim
namespace: staging
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Mi

25
volumes/uploads.yml Normal file
View File

@ -0,0 +1,25 @@
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: uploads-volume
namespace: staging
spec:
accessModes:
- ReadWriteMany
capacity:
storage: 8Gi
hostPath:
path: /data/shared/uploads/
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: uploads-claim
namespace: staging
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 8Gi