mirror of
https://github.com/Ocelot-Social-Community/Ocelot-Social.git
synced 2025-12-13 07:46:06 +00:00
Merge pull request #485 from Human-Connection/353-restore_neo4j_backup
Refactor maintenance-worker docker image
This commit is contained in:
commit
3cfa5b0cd4
@ -21,8 +21,14 @@
|
||||
* [Backend tests](backend/testing.md)
|
||||
* [Contributing](CONTRIBUTING.md)
|
||||
* [Kubernetes Deployment](deployment/README.md)
|
||||
* [Minikube](deployment/minikube/README.md)
|
||||
* [Digital Ocean](deployment/digital-ocean/README.md)
|
||||
* [Kubernetes Dashboard](deployment/digital-ocean/dashboard/README.md)
|
||||
* [HTTPS](deployment/digital-ocean/https/README.md)
|
||||
* [Human Connection](deployment/human-connection/README.md)
|
||||
* [Volumes](deployment/volumes/README.md)
|
||||
* [Neo4J DB Backup](deployment/backup.md)
|
||||
* [Maintenance](maintenance/README.md)
|
||||
* [Legacy Migration](deployment/legacy-migration/README.md)
|
||||
* [Feature Specification](cypress/features.md)
|
||||
* [Code of conduct](CODE_OF_CONDUCT.md)
|
||||
* [License](LICENSE.md)
|
||||
|
||||
@ -15,7 +15,7 @@ node_modules/
|
||||
scripts/
|
||||
dist/
|
||||
|
||||
db-migration-worker/
|
||||
maintenance-worker/
|
||||
neo4j/
|
||||
|
||||
public/uploads/*
|
||||
|
||||
@ -1,13 +0,0 @@
|
||||
FROM mongo:4
|
||||
|
||||
RUN apt-get update && apt-get -y install --no-install-recommends wget apt-transport-https \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
RUN wget -O - https://debian.neo4j.org/neotechnology.gpg.key | apt-key add -
|
||||
RUN echo 'deb https://debian.neo4j.org/repo stable/' | tee /etc/apt/sources.list.d/neo4j.list
|
||||
RUN apt-get update && apt-get -y install --no-install-recommends openjdk-8-jre openssh-client neo4j rsync \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
COPY migration ./migration
|
||||
COPY migrate.sh /usr/local/bin/migrate
|
||||
COPY sync_uploads.sh /usr/local/bin/sync_uploads
|
||||
@ -1,40 +0,0 @@
|
||||
# Legacy Migration
|
||||
|
||||
This guide helps you to import data from our legacy servers, which are using FeathersJS and MongoDB.
|
||||
|
||||
**You can skip this if you don't plan to migrate any legacy applications!**
|
||||
|
||||
## Prerequisites
|
||||
|
||||
You need [docker](https://www.docker.com/) installed on your machine. Furthermore you need SSH access to the server and you need to know the following login credentials and server settings:
|
||||
|
||||
| Environment variable | Description |
|
||||
| :--- | :--- |
|
||||
| SSH\_USERNAME | Your ssh username on the server |
|
||||
| SSH\_HOST | The IP address of the server |
|
||||
| MONGODB\_USERNAME | Mongo username on the server |
|
||||
| MONGODB\_PASSWORD | Mongo password on the server |
|
||||
| MONGODB\_AUTH\_DB | Mongo authentication database |
|
||||
| MONGODB\_DATABASE | The name of the mongo database |
|
||||
| UPLOADS\_DIRECTORY | Path to remote uploads folder |
|
||||
|
||||
## Run the database migration
|
||||
|
||||
Run `docker-compose` with all environment variables specified:
|
||||
|
||||
```bash
|
||||
SSH_USERNAME=username SSH_HOST=some.server.com MONGODB_USERNAME='hc-api' MONGODB_PASSWORD='secret' MONGODB_DATABASE=hc_api MONGODB_AUTH_DB=hc_api UPLOADS_DIRECTORY=/var/www/api/uploads docker-compose up
|
||||
```
|
||||
|
||||
Download the remote mongo database:
|
||||
|
||||
```bash
|
||||
docker-compose exec db-migration-worker ./import.sh
|
||||
```
|
||||
|
||||
Import the local download into Neo4J:
|
||||
|
||||
```bash
|
||||
docker-compose exec neo4j import/import.sh
|
||||
```
|
||||
|
||||
@ -1,18 +0,0 @@
|
||||
version: "3.7"
|
||||
|
||||
services:
|
||||
neo4j:
|
||||
environment:
|
||||
- NEO4J_AUTH=none
|
||||
ports:
|
||||
- 7687:7687
|
||||
- 7474:7474
|
||||
backend:
|
||||
ports:
|
||||
- 4001:4001
|
||||
- 4123:4123
|
||||
image: humanconnection/nitro-backend:builder
|
||||
build:
|
||||
context: .
|
||||
target: builder
|
||||
command: yarn run test:cypress
|
||||
@ -1,36 +0,0 @@
|
||||
version: "3.7"
|
||||
|
||||
services:
|
||||
backend:
|
||||
volumes:
|
||||
- uploads:/nitro-backend/public/uploads
|
||||
neo4j:
|
||||
volumes:
|
||||
- mongo-export:/mongo-export
|
||||
environment:
|
||||
- NEO4J_apoc_import_file_enabled=true
|
||||
db-migration-worker:
|
||||
build:
|
||||
context: db-migration-worker
|
||||
volumes:
|
||||
- mongo-export:/mongo-export
|
||||
- uploads:/uploads
|
||||
- ./db-migration-worker/migration/:/migration
|
||||
- ./db-migration-worker/.ssh/:/root/.ssh/
|
||||
networks:
|
||||
- hc-network
|
||||
depends_on:
|
||||
- backend
|
||||
environment:
|
||||
- NEO4J_URI=bolt://neo4j:7687
|
||||
- "SSH_USERNAME=${SSH_USERNAME}"
|
||||
- "SSH_HOST=${SSH_HOST}"
|
||||
- "MONGODB_USERNAME=${MONGODB_USERNAME}"
|
||||
- "MONGODB_PASSWORD=${MONGODB_PASSWORD}"
|
||||
- "MONGODB_AUTH_DB=${MONGODB_AUTH_DB}"
|
||||
- "MONGODB_DATABASE=${MONGODB_DATABASE}"
|
||||
- "UPLOADS_DIRECTORY=${UPLOADS_DIRECTORY}"
|
||||
|
||||
volumes:
|
||||
mongo-export:
|
||||
uploads:
|
||||
@ -1,23 +0,0 @@
|
||||
version: "3.7"
|
||||
|
||||
services:
|
||||
backend:
|
||||
image: humanconnection/nitro-backend:builder
|
||||
build:
|
||||
context: .
|
||||
target: builder
|
||||
volumes:
|
||||
- .:/nitro-backend
|
||||
- /nitro-backend/node_modules
|
||||
command: yarn run dev
|
||||
neo4j:
|
||||
environment:
|
||||
- NEO4J_AUTH=none
|
||||
ports:
|
||||
- 7687:7687
|
||||
- 7474:7474
|
||||
volumes:
|
||||
- neo4j-data:/data
|
||||
|
||||
volumes:
|
||||
neo4j-data:
|
||||
@ -1,14 +0,0 @@
|
||||
version: "3.7"
|
||||
|
||||
services:
|
||||
neo4j:
|
||||
environment:
|
||||
- NEO4J_AUTH=none
|
||||
ports:
|
||||
- 7687:7687
|
||||
- 7474:7474
|
||||
backend:
|
||||
image: humanconnection/nitro-backend:builder
|
||||
build:
|
||||
context: .
|
||||
target: builder
|
||||
@ -1,34 +0,0 @@
|
||||
version: "3.7"
|
||||
|
||||
services:
|
||||
backend:
|
||||
image: humanconnection/nitro-backend:latest
|
||||
build:
|
||||
context: .
|
||||
target: production
|
||||
networks:
|
||||
- hc-network
|
||||
depends_on:
|
||||
- neo4j
|
||||
ports:
|
||||
- 4000:4000
|
||||
environment:
|
||||
- NEO4J_URI=bolt://neo4j:7687
|
||||
- GRAPHQL_PORT=4000
|
||||
- GRAPHQL_URI=http://localhost:4000
|
||||
- CLIENT_URI=http://localhost:3000
|
||||
- JWT_SECRET=b/&&7b78BF&fv/Vd
|
||||
- MOCK=false
|
||||
- MAPBOX_TOKEN=pk.eyJ1IjoiaHVtYW4tY29ubmVjdGlvbiIsImEiOiJjajl0cnBubGoweTVlM3VwZ2lzNTNud3ZtIn0.KZ8KK9l70omjXbEkkbHGsQ
|
||||
- PRIVATE_KEY_PASSPHRASE=a7dsf78sadg87ad87sfagsadg78
|
||||
|
||||
neo4j:
|
||||
image: humanconnection/neo4j:latest
|
||||
build:
|
||||
context: neo4j
|
||||
networks:
|
||||
- hc-network
|
||||
|
||||
networks:
|
||||
hc-network:
|
||||
name: hc-network
|
||||
5
deployment/.gitignore
vendored
5
deployment/.gitignore
vendored
@ -1,3 +1,4 @@
|
||||
secrets.yaml
|
||||
*/secrets.yaml
|
||||
kubeconfig.yaml
|
||||
configmap.yaml
|
||||
**/secrets.yaml
|
||||
**/configmap.yaml
|
||||
|
||||
@ -4,223 +4,8 @@ We deploy with [kubernetes](https://kubernetes.io/). In order to deploy your own
|
||||
network you have to [install kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/)
|
||||
and get a kubernetes cluster.
|
||||
|
||||
We have tested two different kubernetes providers: [Minikube](https://kubernetes.io/docs/tasks/tools/install-minikube/)
|
||||
and [Digital Ocean](https://www.digitalocean.com/).
|
||||
|
||||
## Minikube
|
||||
|
||||
There are many Kubernetes providers, but if you're just getting started, Minikube is a tool that you can use to get your feet wet.
|
||||
|
||||
[Install Minikube](https://kubernetes.io/docs/tasks/tools/install-minikube/)
|
||||
|
||||
Open minikube dashboard:
|
||||
|
||||
```text
|
||||
$ minikube dashboard
|
||||
```
|
||||
|
||||
This will give you an overview. Some of the steps below need some timing to make ressources available to other dependent deployments. Keeping an eye on the dashboard is a great way to check that.
|
||||
|
||||
Follow the [installation instruction](deployment.md#installation-with-kubernetes) below. If all the pods and services have settled and everything looks green in your minikube dashboard, expose the `nitro-web` service on your host system with:
|
||||
|
||||
```text
|
||||
$ minikube service nitro-web --namespace=human-connection
|
||||
```
|
||||
|
||||
## Digital Ocean
|
||||
|
||||
1. At first, create a cluster on Digital Ocean.
|
||||
2. Download the config.yaml if the process has finished.
|
||||
3. Put the config file where you can find it later \(preferable in your home directory under `~/.kube/`\)
|
||||
4. In the open terminal you can set the current config for the active session: `export KUBECONFIG=~/.kube/THE-NAME-OF-YOUR-CLUSTER-kubeconfig.yaml`. You could make this change permanent by adding the line to your `.bashrc` or `~/.config/fish/config.fish` depending on your shell.
|
||||
|
||||
Otherwise you would have to always add `--kubeconfig ~/.kube/THE-NAME-OF-YOUR-CLUSTER-kubeconfig.yaml` on every `kubectl` command that you are running.
|
||||
|
||||
5. Now check if you can connect to the cluster and if its your newly created one by running: `kubectl get nodes`
|
||||
|
||||
If you got the steps right above and see your nodes you can continue.
|
||||
|
||||
First, install kubernetes dashboard:
|
||||
|
||||
```bash
|
||||
$ kubectl apply -f dashboard/
|
||||
$ kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/master/aio/deploy/recommended/kubernetes-dashboard.yaml
|
||||
```
|
||||
|
||||
Get your token on the command line:
|
||||
|
||||
```bash
|
||||
$ kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin-user | awk '{print $1}')
|
||||
```
|
||||
|
||||
It should print something like:
|
||||
|
||||
```text
|
||||
Name: admin-user-token-6gl6l
|
||||
Namespace: kube-system
|
||||
Labels: <none>
|
||||
Annotations: kubernetes.io/service-account.name=admin-user
|
||||
kubernetes.io/service-account.uid=b16afba9-dfec-11e7-bbb9-901b0e532516
|
||||
|
||||
Type: kubernetes.io/service-account-token
|
||||
|
||||
Data
|
||||
====
|
||||
ca.crt: 1025 bytes
|
||||
namespace: 11 bytes
|
||||
token: eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJhZG1pbi11c2VyLXRva2VuLTZnbDZsIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImFkbWluLXVzZXIiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiJiMTZhZmJhOS1kZmVjLTExZTctYmJiOS05MDFiMGU1MzI1MTYiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZS1zeXN0ZW06YWRtaW4tdXNlciJ9.M70CU3lbu3PP4OjhFms8PVL5pQKj-jj4RNSLA4YmQfTXpPUuxqXjiTf094_Rzr0fgN_IVX6gC4fiNUL5ynx9KU-lkPfk0HnX8scxfJNzypL039mpGt0bbe1IXKSIRaq_9VW59Xz-yBUhycYcKPO9RM2Qa1Ax29nqNVko4vLn1_1wPqJ6XSq3GYI8anTzV8Fku4jasUwjrws6Cn6_sPEGmL54sq5R4Z5afUtv-mItTmqZZdxnkRqcJLlg2Y8WbCPogErbsaCDJoABQ7ppaqHetwfM_0yMun6ABOQbIwwl8pspJhpplKwyo700OSpvTT9zlBsu-b35lzXGBRHzv5g_RA
|
||||
```
|
||||
|
||||
Proxy localhost to the remote kubernetes dashboard:
|
||||
|
||||
```bash
|
||||
$ kubectl proxy
|
||||
```
|
||||
|
||||
Grab the token from above and paste it into the login screen at [http://localhost:8001/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/](http://localhost:8001/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/)
|
||||
|
||||
## Installation with kubernetes
|
||||
|
||||
You have to do some prerequisites e.g. change some secrets according to your own setup.
|
||||
|
||||
### Edit secrets
|
||||
|
||||
```bash
|
||||
$ cp secrets.template.yaml human-connection/secrets.yaml
|
||||
```
|
||||
|
||||
Change all secrets as needed.
|
||||
|
||||
If you want to edit secrets, you have to `base64` encode them. See [kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/secret/#creating-a-secret-manually).
|
||||
|
||||
```text
|
||||
# example how to base64 a string:
|
||||
$ echo -n 'admin' | base64
|
||||
YWRtaW4=
|
||||
```
|
||||
|
||||
Those secrets get `base64` decoded in a kubernetes pod.
|
||||
|
||||
### Create a namespace
|
||||
|
||||
```text
|
||||
$ kubectl apply -f namespace-human-connection.yaml
|
||||
```
|
||||
|
||||
Switch to the namespace `human-connection` in your kubernetes dashboard.
|
||||
|
||||
### Run the configuration
|
||||
|
||||
```text
|
||||
$ kubectl apply -f human-connection/
|
||||
```
|
||||
|
||||
This can take a while because kubernetes will download the docker images. Sit back and relax and have a look into your kubernetes dashboard. Wait until all pods turn green and they don't show a warning `Waiting: ContainerCreating` anymore.
|
||||
|
||||
#### Setup Ingress and HTTPS
|
||||
|
||||
Follow [this quick start guide](https://docs.cert-manager.io/en/latest/tutorials/acme/quick-start/index.html) and install certmanager via helm and tiller:
|
||||
|
||||
```text
|
||||
$ kubectl create serviceaccount tiller --namespace=kube-system
|
||||
$ kubectl create clusterrolebinding tiller-admin --serviceaccount=kube-system:tiller --clusterrole=cluster-admin
|
||||
$ helm init --service-account=tiller
|
||||
$ helm repo update
|
||||
$ helm install stable/nginx-ingress
|
||||
$ kubectl apply -f https://raw.githubusercontent.com/jetstack/cert-manager/release-0.6/deploy/manifests/00-crds.yaml
|
||||
$ helm install --name cert-manager --namespace cert-manager stable/cert-manager
|
||||
```
|
||||
|
||||
Create letsencrypt issuers. _Change the email address_ in these files before running this command.
|
||||
|
||||
```bash
|
||||
$ kubectl apply -f human-connection/https/
|
||||
```
|
||||
|
||||
Create an ingress service in namespace `human-connection`. _Change the domain name_ according to your needs:
|
||||
|
||||
```bash
|
||||
$ kubectl apply -f human-connection/ingress/
|
||||
```
|
||||
|
||||
Check the ingress server is working correctly:
|
||||
|
||||
```bash
|
||||
$ curl -kivL -H 'Host: <DOMAIN_NAME>' 'https://<IP_ADDRESS>'
|
||||
```
|
||||
|
||||
If the response looks good, configure your domain registrar for the new IP address and the domain.
|
||||
|
||||
Now let's get a valid HTTPS certificate. According to the tutorial above, check your tls certificate for staging:
|
||||
|
||||
```bash
|
||||
$ kubectl describe --namespace=human-connection certificate tls
|
||||
$ kubectl describe --namespace=human-connection secret tls
|
||||
```
|
||||
|
||||
If everything looks good, update the issuer of your ingress. Change the annotation `certmanager.k8s.io/issuer` from `letsencrypt-staging` to `letsencrypt-prod` in your ingress configuration in `human-connection/ingress/ingress.yaml`.
|
||||
|
||||
```bash
|
||||
$ kubectl apply -f human-connection/ingress/ingress.yaml
|
||||
```
|
||||
|
||||
Delete the former secret to force a refresh:
|
||||
|
||||
```text
|
||||
$ kubectl --namespace=human-connection delete secret tls
|
||||
```
|
||||
|
||||
Now, HTTPS should be configured on your domain. Congrats.
|
||||
|
||||
#### Legacy data migration
|
||||
|
||||
This setup is completely optional and only required if you have data on a server which is running our legacy code and you want to import that data. It will import the uploads folder and migrate a dump of mongodb into neo4j.
|
||||
|
||||
**Prepare migration of Human Connection legacy server**
|
||||
|
||||
Create a configmap with the specific connection data of your legacy server:
|
||||
|
||||
```bash
|
||||
$ kubectl create configmap maintenance-worker \
|
||||
--namespace=human-connection \
|
||||
--from-literal=SSH_USERNAME=someuser \
|
||||
--from-literal=SSH_HOST=yourhost \
|
||||
--from-literal=MONGODB_USERNAME=hc-api \
|
||||
--from-literal=MONGODB_PASSWORD=secretpassword \
|
||||
--from-literal=MONGODB_AUTH_DB=hc_api \
|
||||
--from-literal=MONGODB_DATABASE=hc_api \
|
||||
--from-literal=UPLOADS_DIRECTORY=/var/www/api/uploads \
|
||||
--from-literal=NEO4J_URI=bolt://localhost:7687
|
||||
```
|
||||
|
||||
Create a secret with your public and private ssh keys. As the [kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/secret/#use-case-pod-with-ssh-keys) points out, you should be careful with your ssh keys. Anyone with access to your cluster will have access to your ssh keys. Better create a new pair with `ssh-keygen` and copy the public key to your legacy server with `ssh-copy-id`:
|
||||
|
||||
```bash
|
||||
$ kubectl create secret generic ssh-keys \
|
||||
--namespace=human-connection \
|
||||
--from-file=id_rsa=/path/to/.ssh/id_rsa \
|
||||
--from-file=id_rsa.pub=/path/to/.ssh/id_rsa.pub \
|
||||
--from-file=known_hosts=/path/to/.ssh/known_hosts
|
||||
```
|
||||
|
||||
**Migrate legacy database**
|
||||
|
||||
Patch the existing deployments to use a multi-container setup:
|
||||
|
||||
```bash
|
||||
cd legacy-migration
|
||||
kubectl apply -f volume-claim-mongo-export.yaml
|
||||
kubectl patch --namespace=human-connection deployment nitro-backend --patch "$(cat deployment-backend.yaml)"
|
||||
kubectl patch --namespace=human-connection deployment nitro-neo4j --patch "$(cat deployment-neo4j.yaml)"
|
||||
cd ..
|
||||
```
|
||||
|
||||
Run the migration:
|
||||
|
||||
```text
|
||||
$ kubectl --namespace=human-connection get pods
|
||||
# change <POD_IDs> below
|
||||
$ kubectl --namespace=human-connection exec -it nitro-neo4j-65bbdb597c-nc2lv migrate
|
||||
$ kubectl --namespace=human-connection exec -it nitro-backend-c6cc5ff69-8h96z sync_uploads
|
||||
```
|
||||
We have tested two different kubernetes providers: [Minikube](./minikube/README.md)
|
||||
and [Digital Ocean](./digital-ocean/README.md).
|
||||
|
||||
Check out the specific documentation for your provider. After that, learn how
|
||||
to apply the specific kubernetes configuration for [Human Connection](./human-connection/README.md).
|
||||
|
||||
@ -9,8 +9,6 @@
|
||||
NEO4J_USER: "neo4j"
|
||||
NEO4J_AUTH: "none"
|
||||
CLIENT_URI: "https://nitro-staging.human-connection.org"
|
||||
MAPBOX_TOKEN: "pk.eyJ1IjoiaHVtYW4tY29ubmVjdGlvbiIsImEiOiJjajl0cnBubGoweTVlM3VwZ2lzNTNud3ZtIn0.KZ8KK9l70omjXbEkkbHGsQ"
|
||||
PRIVATE_KEY_PASSPHRASE: "a7dsf78sadg87ad87sfagsadg78"
|
||||
metadata:
|
||||
name: configmap
|
||||
namespace: human-connection
|
||||
@ -1,39 +0,0 @@
|
||||
---
|
||||
kind: Pod
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: nitro-db-migration-worker
|
||||
namespace: human-connection
|
||||
spec:
|
||||
volumes:
|
||||
- name: secret-volume
|
||||
secret:
|
||||
secretName: ssh-keys
|
||||
defaultMode: 0400
|
||||
- name: mongo-export
|
||||
persistentVolumeClaim:
|
||||
claimName: mongo-export-claim
|
||||
containers:
|
||||
- name: nitro-db-migration-worker
|
||||
image: humanconnection/db-migration-worker:latest
|
||||
envFrom:
|
||||
- configMapRef:
|
||||
name: db-migration-worker
|
||||
volumeMounts:
|
||||
- name: secret-volume
|
||||
readOnly: false
|
||||
mountPath: /root/.ssh
|
||||
- name: mongo-export
|
||||
mountPath: /mongo-export/
|
||||
---
|
||||
kind: PersistentVolumeClaim
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: mongo-export-claim
|
||||
namespace: human-connection
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
||||
26
deployment/digital-ocean/README.md
Normal file
26
deployment/digital-ocean/README.md
Normal file
@ -0,0 +1,26 @@
|
||||
# Digital Ocean
|
||||
|
||||
As a start, read the [introduction into kubernetes](https://www.digitalocean.com/community/tutorials/an-introduction-to-kubernetes) by the folks at Digital Ocean. The following section should enable you to deploy Human Connection to your kubernetes cluster.
|
||||
|
||||
## Connect to your local cluster
|
||||
|
||||
1. Create a cluster at [Digital Ocean](https://www.digitalocean.com/).
|
||||
2. Download the `***-kubeconfig.yaml` from the Web UI.
|
||||
3. Move the file to the default location where kubectl expects it to be: `mv ***-kubeconfig.yaml ~/.kube/config`. Alternatively you can set the config on every command: `--kubeconfig ***-kubeconfig.yaml`
|
||||
4. Now check if you can connect to the cluster and if its your newly created one by running: `kubectl get nodes`
|
||||
|
||||
The output should look about like this:
|
||||
```
|
||||
$ kubectl get nodes
|
||||
NAME STATUS ROLES AGE VERSION
|
||||
nifty-driscoll-uu1w Ready <none> 69d v1.13.2
|
||||
nifty-driscoll-uuiw Ready <none> 69d v1.13.2
|
||||
nifty-driscoll-uusn Ready <none> 69d v1.13.2
|
||||
```
|
||||
|
||||
If you got the steps right above and see your nodes you can continue.
|
||||
|
||||
Digital Ocean kubernetes clusters don't have a graphical interface, so I suggest
|
||||
to setup the [kubernetes dashboard](./dashboard/README.md) as a next step.
|
||||
Configuring [HTTPS](./https/README.md) is bit tricky and therefore I suggest to
|
||||
do this as a last step.
|
||||
55
deployment/digital-ocean/dashboard/README.md
Normal file
55
deployment/digital-ocean/dashboard/README.md
Normal file
@ -0,0 +1,55 @@
|
||||
# Install Kubernetes Dashboard
|
||||
|
||||
The kubernetes dashboard is optional but very helpful for debugging. If you want to install it, you have to do so only **once** per cluster:
|
||||
|
||||
```bash
|
||||
# in folder deployment/digital-ocean/
|
||||
$ kubectl apply -f dashboard/
|
||||
$ kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/master/aio/deploy/recommended/kubernetes-dashboard.yaml
|
||||
```
|
||||
|
||||
### Login to your dashboard
|
||||
|
||||
Proxy the remote kubernetes dashboard to localhost:
|
||||
|
||||
```bash
|
||||
$ kubectl proxy
|
||||
```
|
||||
|
||||
Visit:
|
||||
|
||||
[http://localhost:8001/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/](http://localhost:8001/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/)
|
||||
|
||||
You should see a login screen.
|
||||
|
||||
To get your token for the dashboard you can run this command:
|
||||
|
||||
```bash
|
||||
$ kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin-user | awk '{print $1}')
|
||||
```
|
||||
|
||||
It should print something like:
|
||||
|
||||
```text
|
||||
Name: admin-user-token-6gl6l
|
||||
Namespace: kube-system
|
||||
Labels: <none>
|
||||
Annotations: kubernetes.io/service-account.name=admin-user
|
||||
kubernetes.io/service-account.uid=b16afba9-dfec-11e7-bbb9-901b0e532516
|
||||
|
||||
Type: kubernetes.io/service-account-token
|
||||
|
||||
Data
|
||||
====
|
||||
ca.crt: 1025 bytes
|
||||
namespace: 11 bytes
|
||||
token: eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJhZG1pbi11c2VyLXRva2VuLTZnbDZsIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImFkbWluLXVzZXIiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiJiMTZhZmJhOS1kZmVjLTExZTctYmJiOS05MDFiMGU1MzI1MTYiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZS1zeXN0ZW06YWRtaW4tdXNlciJ9.M70CU3lbu3PP4OjhFms8PVL5pQKj-jj4RNSLA4YmQfTXpPUuxqXjiTf094_Rzr0fgN_IVX6gC4fiNUL5ynx9KU-lkPfk0HnX8scxfJNzypL039mpGt0bbe1IXKSIRaq_9VW59Xz-yBUhycYcKPO9RM2Qa1Ax29nqNVko4vLn1_1wPqJ6XSq3GYI8anTzV8Fku4jasUwjrws6Cn6_sPEGmL54sq5R4Z5afUtv-mItTmqZZdxnkRqcJLlg2Y8WbCPogErbsaCDJoABQ7ppaqHetwfM_0yMun6ABOQbIwwl8pspJhpplKwyo700OSpvTT9zlBsu-b35lzXGBRHzv5g_RA
|
||||
```
|
||||
|
||||
Grab the token from above and paste it into the [login screen](http://localhost:8001/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/)
|
||||
|
||||
When you are logged in, you should see sth. like:
|
||||
|
||||

|
||||
|
||||
Feel free to save the login token from above in your password manager. Unlike the `kubeconfig` file, this token does not expire.
|
||||
BIN
deployment/digital-ocean/dashboard/dashboard-screenshot.png
Normal file
BIN
deployment/digital-ocean/dashboard/dashboard-screenshot.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 178 KiB |
57
deployment/digital-ocean/https/README.md
Normal file
57
deployment/digital-ocean/https/README.md
Normal file
@ -0,0 +1,57 @@
|
||||
# Setup Ingress and HTTPS
|
||||
|
||||
Follow [this quick start guide](https://docs.cert-manager.io/en/latest/tutorials/acme/quick-start/index.html) and install certmanager via helm and tiller:
|
||||
|
||||
```text
|
||||
$ kubectl create serviceaccount tiller --namespace=kube-system
|
||||
$ kubectl create clusterrolebinding tiller-admin --serviceaccount=kube-system:tiller --clusterrole=cluster-admin
|
||||
$ helm init --service-account=tiller
|
||||
$ helm repo update
|
||||
$ helm install stable/nginx-ingress
|
||||
$ kubectl apply -f https://raw.githubusercontent.com/jetstack/cert-manager/release-0.6/deploy/manifests/00-crds.yaml
|
||||
$ helm install --name cert-manager --namespace cert-manager stable/cert-manager
|
||||
```
|
||||
|
||||
Create letsencrypt issuers. _Change the email address_ in these files before running this command.
|
||||
|
||||
```bash
|
||||
# in folder deployment/digital-ocean/https/
|
||||
$ kubectl apply -f issuer.yaml
|
||||
```
|
||||
|
||||
Create an ingress service in namespace `human-connection`. _Change the domain name_ according to your needs:
|
||||
|
||||
```bash
|
||||
# in folder deployment/digital-ocean/https/
|
||||
$ kubectl apply -f ingress.yaml
|
||||
```
|
||||
|
||||
Check the ingress server is working correctly:
|
||||
|
||||
```bash
|
||||
$ curl -kivL -H 'Host: <DOMAIN_NAME>' 'https://<IP_ADDRESS>'
|
||||
```
|
||||
|
||||
If the response looks good, configure your domain registrar for the new IP address and the domain.
|
||||
|
||||
Now let's get a valid HTTPS certificate. According to the tutorial above, check your tls certificate for staging:
|
||||
|
||||
```bash
|
||||
$ kubectl describe --namespace=human-connection certificate tls
|
||||
$ kubectl describe --namespace=human-connection secret tls
|
||||
```
|
||||
|
||||
If everything looks good, update the issuer of your ingress. Change the annotation `certmanager.k8s.io/issuer` from `letsencrypt-staging` to `letsencrypt-prod` in your ingress configuration in `ingress.yaml`.
|
||||
|
||||
```bash
|
||||
# in folder deployment/digital-ocean/https/
|
||||
$ kubectl apply -f ingress.yaml
|
||||
```
|
||||
|
||||
Delete the former secret to force a refresh:
|
||||
|
||||
```text
|
||||
$ kubectl --namespace=human-connection delete secret tls
|
||||
```
|
||||
|
||||
Now, HTTPS should be configured on your domain. Congrats.
|
||||
58
deployment/human-connection/README.md
Normal file
58
deployment/human-connection/README.md
Normal file
@ -0,0 +1,58 @@
|
||||
# Kubernetes Configuration for Human Connection
|
||||
|
||||
Deploying Human Connection with kubernetes is straight forward. All you have to
|
||||
do is to change certain parameters, like domain names and API keys, then you
|
||||
just apply our provided configuration files to your cluster.
|
||||
|
||||
## Configuration
|
||||
|
||||
Copy our provided templates:
|
||||
|
||||
```bash
|
||||
$ cp secrets.template.yaml human-connection/secrets.yaml
|
||||
$ cp configmap.template.yaml human-connection/configmap.yaml
|
||||
```
|
||||
|
||||
Change the `configmap.yaml` as needed, all variables will be available as
|
||||
environment variables in your deployed kubernetes pods.
|
||||
|
||||
If you want to edit secrets, you have to `base64` encode them. See [kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/secret/#creating-a-secret-manually).
|
||||
|
||||
```bash
|
||||
# example how to base64 a string:
|
||||
$ echo -n 'admin' | base64 --wrap 0
|
||||
YWRtaW4=
|
||||
```
|
||||
|
||||
Those secrets get `base64` decoded and are available as environment variables in
|
||||
your deployed kubernetes pods.
|
||||
|
||||
## Create a namespace
|
||||
|
||||
```bash
|
||||
$ kubectl apply -f namespace-human-connection.yaml
|
||||
```
|
||||
|
||||
If you have a [kubernets dashboard](../digital-ocean/dashboard/README.md)
|
||||
deployed you should switch to namespace `human-connection` in order to
|
||||
monitor the state of your deployments.
|
||||
|
||||
## Create persistent volumes
|
||||
|
||||
While the deployments and services can easily be restored, simply by deleting
|
||||
and applying the kubernetes configurations again, certain data is not that
|
||||
easily recovered. Therefore we separated persistent volumes from deployments
|
||||
and services. There is a [dedicated section](../volumes/README.md). Create those
|
||||
persistent volumes once before you apply the configuration.
|
||||
|
||||
## Apply the configuration
|
||||
|
||||
```bash
|
||||
# in folder deployment/
|
||||
$ kubectl apply -f human-connection/
|
||||
```
|
||||
|
||||
This can take a while because kubernetes will download the docker images. Sit
|
||||
back and relax and have a look into your kubernetes dashboard. Wait until all
|
||||
pods turn green and they don't show a warning `Waiting: ContainerCreating`
|
||||
anymore.
|
||||
@ -43,15 +43,3 @@
|
||||
restartPolicy: Always
|
||||
terminationGracePeriodSeconds: 30
|
||||
status: {}
|
||||
---
|
||||
kind: PersistentVolumeClaim
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: uploads-claim
|
||||
namespace: human-connection
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 2Gi
|
||||
|
||||
@ -6,6 +6,10 @@
|
||||
namespace: human-connection
|
||||
spec:
|
||||
replicas: 1
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxSurge: 0
|
||||
maxUnavailable: "100%"
|
||||
selector:
|
||||
matchLabels:
|
||||
human-connection.org/selector: deployment-human-connection-neo4j
|
||||
@ -53,15 +57,3 @@
|
||||
claimName: neo4j-data-claim
|
||||
restartPolicy: Always
|
||||
terminationGracePeriodSeconds: 30
|
||||
---
|
||||
kind: PersistentVolumeClaim
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: neo4j-data-claim
|
||||
namespace: human-connection
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
||||
|
||||
85
deployment/legacy-migration/README.md
Normal file
85
deployment/legacy-migration/README.md
Normal file
@ -0,0 +1,85 @@
|
||||
# Legacy data migration
|
||||
|
||||
This setup is **completely optional** and only required if you have data on a
|
||||
server which is running our legacy code and you want to import that data. It
|
||||
will import the uploads folder and migrate a dump of the legacy Mongo database
|
||||
into our new Neo4J graph database.
|
||||
|
||||
## Configure Maintenance-Worker Pod
|
||||
|
||||
Create a configmap with the specific connection data of your legacy server:
|
||||
|
||||
```bash
|
||||
$ kubectl create configmap maintenance-worker \
|
||||
--namespace=human-connection \
|
||||
--from-literal=SSH_USERNAME=someuser \
|
||||
--from-literal=SSH_HOST=yourhost \
|
||||
--from-literal=MONGODB_USERNAME=hc-api \
|
||||
--from-literal=MONGODB_PASSWORD=secretpassword \
|
||||
--from-literal=MONGODB_AUTH_DB=hc_api \
|
||||
--from-literal=MONGODB_DATABASE=hc_api \
|
||||
--from-literal=UPLOADS_DIRECTORY=/var/www/api/uploads
|
||||
```
|
||||
|
||||
Create a secret with your public and private ssh keys. As the [kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/secret/#use-case-pod-with-ssh-keys) points out, you should be careful with your ssh keys. Anyone with access to your cluster will have access to your ssh keys. Better create a new pair with `ssh-keygen` and copy the public key to your legacy server with `ssh-copy-id`:
|
||||
|
||||
```bash
|
||||
$ kubectl create secret generic ssh-keys \
|
||||
--namespace=human-connection \
|
||||
--from-file=id_rsa=/path/to/.ssh/id_rsa \
|
||||
--from-file=id_rsa.pub=/path/to/.ssh/id_rsa.pub \
|
||||
--from-file=known_hosts=/path/to/.ssh/known_hosts
|
||||
```
|
||||
|
||||
## Deploy a Temporary Maintenance-Worker Pod
|
||||
|
||||
Bring the application into maintenance mode.
|
||||
|
||||
{% hint style="info" %} TODO: implement maintenance mode {% endhint %}
|
||||
|
||||
|
||||
Then temporarily delete backend and database deployments
|
||||
|
||||
```bash
|
||||
$ kubectl --namespace=human-connection get deployments
|
||||
NAME READY UP-TO-DATE AVAILABLE AGE
|
||||
nitro-backend 1/1 1 1 3d11h
|
||||
nitro-neo4j 1/1 1 1 3d11h
|
||||
nitro-web 2/2 2 2 73d
|
||||
$ kubectl --namespace=human-connection delete deployment nitro-neo4j
|
||||
deployment.extensions "nitro-neo4j" deleted
|
||||
$ kubectl --namespace=human-connection delete deployment nitro-backend
|
||||
deployment.extensions "nitro-backend" deleted
|
||||
```
|
||||
|
||||
Deploy one-time maintenance-worker pod:
|
||||
|
||||
```bash
|
||||
# in deployment/legacy-migration/
|
||||
$ kubectl apply -f db-migration-worker.yaml
|
||||
pod/nitro-maintenance-worker created
|
||||
```
|
||||
|
||||
Import legacy database and uploads:
|
||||
|
||||
```bash
|
||||
$ kubectl --namespace=human-connection exec -it nitro-maintenance-worker bash
|
||||
$ import_legacy_db
|
||||
$ import_uploads
|
||||
$ exit
|
||||
```
|
||||
|
||||
Delete the pod when you're done:
|
||||
|
||||
```bash
|
||||
$ kubectl --namespace=human-connection delete pod nitro-maintenance-worker
|
||||
```
|
||||
|
||||
Oh, and of course you have to get those deleted deployments back. One way of
|
||||
doing it would be:
|
||||
|
||||
```bash
|
||||
# in folder deployment/
|
||||
$ kubectl apply -f human-connection/deployment-backend.yaml -f human-connection/deployment-neo4j.yaml
|
||||
```
|
||||
|
||||
@ -1,27 +0,0 @@
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: nitro-backend
|
||||
namespace: human-connection
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: nitro-db-migration-worker
|
||||
image: humanconnection/db-migration-worker:latest
|
||||
imagePullPolicy: Always
|
||||
envFrom:
|
||||
- configMapRef:
|
||||
name: db-migration-worker
|
||||
volumeMounts:
|
||||
- name: secret-volume
|
||||
readOnly: false
|
||||
mountPath: /root/.ssh
|
||||
- name: uploads
|
||||
mountPath: /uploads/
|
||||
volumes:
|
||||
- name: secret-volume
|
||||
secret:
|
||||
secretName: ssh-keys
|
||||
defaultMode: 0400
|
||||
@ -1,39 +0,0 @@
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: nitro-neo4j
|
||||
namespace: human-connection
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: nitro-db-migration-worker
|
||||
image: humanconnection/db-migration-worker:latest
|
||||
imagePullPolicy: Always
|
||||
envFrom:
|
||||
- configMapRef:
|
||||
name: db-migration-worker
|
||||
env:
|
||||
- name: COMMIT
|
||||
value: <BACKEND_COMMIT>
|
||||
- name: NEO4J_URI
|
||||
value: bolt://localhost:7687
|
||||
volumeMounts:
|
||||
- name: secret-volume
|
||||
readOnly: false
|
||||
mountPath: /root/.ssh
|
||||
- name: mongo-export
|
||||
mountPath: /mongo-export/
|
||||
- name: nitro-neo4j
|
||||
volumeMounts:
|
||||
- mountPath: /mongo-export/
|
||||
name: mongo-export
|
||||
volumes:
|
||||
- name: secret-volume
|
||||
secret:
|
||||
secretName: ssh-keys
|
||||
defaultMode: 0400
|
||||
- name: mongo-export
|
||||
persistentVolumeClaim:
|
||||
claimName: mongo-export-claim
|
||||
34
deployment/legacy-migration/maintenance-worker.yaml
Normal file
34
deployment/legacy-migration/maintenance-worker.yaml
Normal file
@ -0,0 +1,34 @@
|
||||
---
|
||||
kind: Pod
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: nitro-maintenance-worker
|
||||
namespace: human-connection
|
||||
spec:
|
||||
containers:
|
||||
- name: nitro-maintenance-worker
|
||||
image: humanconnection/maintenance-worker:latest
|
||||
envFrom:
|
||||
- configMapRef:
|
||||
name: maintenance-worker
|
||||
- configMapRef:
|
||||
name: configmap
|
||||
volumeMounts:
|
||||
- name: secret-volume
|
||||
readOnly: false
|
||||
mountPath: /root/.ssh
|
||||
- name: uploads
|
||||
mountPath: /nitro-backend/public/uploads
|
||||
- name: neo4j-data
|
||||
mountPath: /data/
|
||||
volumes:
|
||||
- name: secret-volume
|
||||
secret:
|
||||
secretName: ssh-keys
|
||||
defaultMode: 0400
|
||||
- name: uploads
|
||||
persistentVolumeClaim:
|
||||
claimName: uploads-claim
|
||||
- name: neo4j-data
|
||||
persistentVolumeClaim:
|
||||
claimName: neo4j-data-claim
|
||||
11
deployment/legacy-migration/maintenance-worker/Dockerfile
Normal file
11
deployment/legacy-migration/maintenance-worker/Dockerfile
Normal file
@ -0,0 +1,11 @@
|
||||
FROM humanconnection/neo4j:latest
|
||||
|
||||
ENV NODE_ENV=maintenance
|
||||
EXPOSE 7687 7474
|
||||
|
||||
RUN apk upgrade --update
|
||||
RUN apk add --no-cache mongodb-tools openssh nodejs yarn rsync
|
||||
|
||||
COPY known_hosts /root/.ssh/known_hosts
|
||||
COPY migration ./migration
|
||||
COPY ./binaries/* /usr/local/bin/
|
||||
@ -0,0 +1,6 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
mkdir -p ~/.ssh
|
||||
echo $SSH_PRIVATE_KEY | base64 -d > ~/.ssh/id_rsa
|
||||
chmod 600 ~/.ssh/id_rsa
|
||||
@ -9,4 +9,5 @@ do
|
||||
fi
|
||||
done
|
||||
|
||||
[ -z "$SSH_PRIVATE_KEY" ] || create_private_ssh_key_from_env
|
||||
rsync --archive --update --verbose ${SSH_USERNAME}@${SSH_HOST}:${UPLOADS_DIRECTORY}/* /uploads/
|
||||
@ -0,0 +1,42 @@
|
||||
version: "3.4"
|
||||
|
||||
services:
|
||||
maintenance:
|
||||
image: humanconnection/maintenance-worker:latest
|
||||
build:
|
||||
context: .
|
||||
volumes:
|
||||
- uploads:/uploads
|
||||
- neo4j-data:/data
|
||||
- ./migration/:/migration
|
||||
networks:
|
||||
- hc-network
|
||||
environment:
|
||||
- GRAPHQL_PORT=4000
|
||||
- GRAPHQL_URI=http://localhost:4000
|
||||
- CLIENT_URI=http://localhost:3000
|
||||
- JWT_SECRET=b/&&7b78BF&fv/Vd
|
||||
- MOCK=false
|
||||
- MAPBOX_TOKEN=pk.eyJ1IjoiaHVtYW4tY29ubmVjdGlvbiIsImEiOiJjajl0cnBubGoweTVlM3VwZ2lzNTNud3ZtIn0.KZ8KK9l70omjXbEkkbHGsQ
|
||||
- PRIVATE_KEY_PASSPHRASE=a7dsf78sadg87ad87sfagsadg78
|
||||
- NEO4J_URI=bolt://localhost:7687
|
||||
- NEO4J_apoc_import_file_enabled=true
|
||||
- NEO4J_AUTH=none
|
||||
- "SSH_USERNAME=${SSH_USERNAME}"
|
||||
- "SSH_HOST=${SSH_HOST}"
|
||||
- "SSH_PRIVATE_KEY=${SSH_PRIVATE_KEY}"
|
||||
- "MONGODB_USERNAME=${MONGODB_USERNAME}"
|
||||
- "MONGODB_PASSWORD=${MONGODB_PASSWORD}"
|
||||
- "MONGODB_AUTH_DB=${MONGODB_AUTH_DB}"
|
||||
- "MONGODB_DATABASE=${MONGODB_DATABASE}"
|
||||
- "UPLOADS_DIRECTORY=${UPLOADS_DIRECTORY}"
|
||||
ports:
|
||||
- 7687:7687
|
||||
- 7474:7474
|
||||
|
||||
volumes:
|
||||
uploads:
|
||||
neo4j-data:
|
||||
|
||||
networks:
|
||||
hc-network:
|
||||
@ -9,16 +9,17 @@ echo "MONGODB_DATABASE ${MONGODB_DATABASE}"
|
||||
echo "MONGODB_AUTH_DB ${MONGODB_AUTH_DB}"
|
||||
echo "-------------------------------------------------"
|
||||
|
||||
mongo ${MONGODB_DATABASE} --eval "db.dropDatabase();"
|
||||
rm -rf /mongo-export/*
|
||||
[ -z "$SSH_PRIVATE_KEY" ] || create_private_ssh_key_from_env
|
||||
|
||||
rm -rf /tmp/mongo-export/*
|
||||
mkdir -p /tmp/mongo-export
|
||||
|
||||
ssh -4 -M -S my-ctrl-socket -fnNT -L 27018:localhost:27017 -l ${SSH_USERNAME} ${SSH_HOST}
|
||||
mongodump --host localhost -d ${MONGODB_DATABASE} --port 27018 --username ${MONGODB_USERNAME} --password ${MONGODB_PASSWORD} --authenticationDatabase ${MONGODB_AUTH_DB} --gzip --archive=/tmp/mongodump.archive
|
||||
mongorestore --gzip --archive=/tmp/mongodump.archive
|
||||
ssh -S my-ctrl-socket -O check -l ${SSH_USERNAME} ${SSH_HOST}
|
||||
ssh -S my-ctrl-socket -O exit -l ${SSH_USERNAME} ${SSH_HOST}
|
||||
|
||||
for collection in "categories" "badges" "users" "contributions" "comments" "follows" "shouts"
|
||||
do
|
||||
mongoexport --db ${MONGODB_DATABASE} --collection $collection --out "/mongo-export/$collection.json"
|
||||
mongoexport --host localhost -d ${MONGODB_DATABASE} --port 27018 --username ${MONGODB_USERNAME} --password ${MONGODB_PASSWORD} --authenticationDatabase ${MONGODB_AUTH_DB} --db ${MONGODB_DATABASE} --collection $collection --out "/tmp/mongo-export/$collection.json"
|
||||
done
|
||||
|
||||
ssh -S my-ctrl-socket -O check -l ${SSH_USERNAME} ${SSH_HOST}
|
||||
ssh -S my-ctrl-socket -O exit -l ${SSH_USERNAME} ${SSH_HOST}
|
||||
@ -1,4 +1,4 @@
|
||||
CALL apoc.load.json('file:/mongo-export/badges.json') YIELD value as badge
|
||||
CALL apoc.load.json('file:/tmp/mongo-export/badges.json') YIELD value as badge
|
||||
MERGE(b:Badge {id: badge._id["$oid"]})
|
||||
ON CREATE SET
|
||||
b.key = badge.key,
|
||||
@ -1,4 +1,4 @@
|
||||
CALL apoc.load.json('file:/mongo-export/categories.json') YIELD value as category
|
||||
CALL apoc.load.json('file:/tmp/mongo-export/categories.json') YIELD value as category
|
||||
MERGE(c:Category {id: category._id["$oid"]})
|
||||
ON CREATE SET
|
||||
c.name = category.title,
|
||||
@ -1,4 +1,4 @@
|
||||
CALL apoc.load.json('file:/mongo-export/comments.json') YIELD value as json
|
||||
CALL apoc.load.json('file:/tmp/mongo-export/comments.json') YIELD value as json
|
||||
MERGE (comment:Comment {id: json._id["$oid"]})
|
||||
ON CREATE SET
|
||||
comment.content = json.content,
|
||||
@ -1,4 +1,4 @@
|
||||
CALL apoc.load.json('file:/mongo-export/contributions.json') YIELD value as post
|
||||
CALL apoc.load.json('file:/tmp/mongo-export/contributions.json') YIELD value as post
|
||||
MERGE (p:Post {id: post._id["$oid"]})
|
||||
ON CREATE SET
|
||||
p.title = post.title,
|
||||
@ -1,4 +1,4 @@
|
||||
CALL apoc.load.json('file:/mongo-export/follows.json') YIELD value as follow
|
||||
CALL apoc.load.json('file:/tmp/mongo-export/follows.json') YIELD value as follow
|
||||
MATCH (u1:User {id: follow.userId}), (u2:User {id: follow.foreignId})
|
||||
MERGE (u1)-[:FOLLOWS]->(u2)
|
||||
;
|
||||
@ -1,4 +1,4 @@
|
||||
CALL apoc.load.json('file:/mongo-export/shouts.json') YIELD value as shout
|
||||
CALL apoc.load.json('file:/tmp/mongo-export/shouts.json') YIELD value as shout
|
||||
MATCH (u:User {id: shout.userId}), (p:Post {id: shout.foreignId})
|
||||
MERGE (u)-[:SHOUTED]->(p)
|
||||
;
|
||||
@ -1,4 +1,4 @@
|
||||
CALL apoc.load.json('file:/mongo-export/users.json') YIELD value as user
|
||||
CALL apoc.load.json('file:/tmp/mongo-export/users.json') YIELD value as user
|
||||
MERGE(u:User {id: user._id["$oid"]})
|
||||
ON CREATE SET
|
||||
u.name = user.name,
|
||||
25
deployment/minikube/README.md
Normal file
25
deployment/minikube/README.md
Normal file
@ -0,0 +1,25 @@
|
||||
# Minikube
|
||||
|
||||
There are many Kubernetes providers, but if you're just getting started, Minikube is a tool that you can use to get your feet wet.
|
||||
|
||||
After you [installed Minikube](https://kubernetes.io/docs/tasks/tools/install-minikube/)
|
||||
open your minikube dashboard:
|
||||
|
||||
```text
|
||||
$ minikube dashboard
|
||||
```
|
||||
|
||||
This will give you an overview. Some of the steps below need some timing to make ressources available to other dependent deployments. Keeping an eye on the dashboard is a great way to check that.
|
||||
|
||||
Follow the installation instruction for [Human Connection](../human-connection/README.md).
|
||||
If all the pods and services have settled and everything looks green in your
|
||||
minikube dashboard, expose the services you want on your host system.
|
||||
|
||||
For example:
|
||||
|
||||
```text
|
||||
$ minikube service nitro-web --namespace=human-connection
|
||||
# optionally
|
||||
$ minikube service nitro-backend --namespace=human-connection
|
||||
```
|
||||
|
||||
@ -4,6 +4,7 @@ data:
|
||||
JWT_SECRET: "Yi8mJjdiNzhCRiZmdi9WZA=="
|
||||
MONGODB_PASSWORD: "TU9OR09EQl9QQVNTV09SRA=="
|
||||
PRIVATE_KEY_PASSPHRASE: "YTdkc2Y3OHNhZGc4N2FkODdzZmFnc2FkZzc4"
|
||||
MAPBOX_TOKEN: "cGsuZXlKMUlqb2lhSFZ0WVc0dFkyOXVibVZqZEdsdmJpSXNJbUVpT2lKamFqbDBjbkJ1Ykdvd2VUVmxNM1Z3WjJsek5UTnVkM1p0SW4wLktaOEtLOWw3MG9talhiRWtrYkhHc1EK"
|
||||
metadata:
|
||||
name: human-connection
|
||||
namespace: human-connection
|
||||
|
||||
42
deployment/volumes/README.md
Normal file
42
deployment/volumes/README.md
Normal file
@ -0,0 +1,42 @@
|
||||
# Persistent Volumes
|
||||
|
||||
At the moment, the application needs two persistent volumes:
|
||||
|
||||
* The `/data/` folder where `neo4j` stores its database and
|
||||
* the folder `/nitro-backend/public/uploads` where the backend stores uploads.
|
||||
|
||||
As a matter of precaution, the persistent volume claims that setup these volumes
|
||||
live in a separate folder. You don't want to accidently loose all your data in
|
||||
your database by running `kubectl delete -f human-connection/`, do you?
|
||||
|
||||
## Create Persistent Volume Claims
|
||||
|
||||
Run the following:
|
||||
```sh
|
||||
# in folder deployments/
|
||||
$ kubectl apply -f volumes
|
||||
persistentvolumeclaim/neo4j-data-claim created
|
||||
persistentvolumeclaim/uploads-claim created
|
||||
```
|
||||
|
||||
## Change Reclaim Policy
|
||||
|
||||
We recommend to change the `ReclaimPolicy`, so if you delete the persistent
|
||||
volume claims, the associated volumes will be released, not deleted:
|
||||
|
||||
```sh
|
||||
$ kubectl --namespace=human-connection get pv
|
||||
|
||||
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
|
||||
pvc-bd02a715-66d0-11e9-be52-ba9c337f4551 1Gi RWO Delete Bound human-connection/neo4j-data-claim do-block-storage 4m24s
|
||||
pvc-bd208086-66d0-11e9-be52-ba9c337f4551 2Gi RWO Delete Bound human-connection/uploads-claim do-block-storage 4m12s
|
||||
```
|
||||
|
||||
Get the volume id from above, then change `ReclaimPolicy` with:
|
||||
```sh
|
||||
kubectl patch pv <VOLUME-ID> -p '{"spec":{"persistentVolumeReclaimPolicy":"Retain"}}'
|
||||
|
||||
# in the above example
|
||||
kubectl patch pv pvc-bd02a715-66d0-11e9-be52-ba9c337f4551 -p '{"spec":{"persistentVolumeReclaimPolicy":"Retain"}}'
|
||||
kubectl patch pv pvc-bd208086-66d0-11e9-be52-ba9c337f4551 -p '{"spec":{"persistentVolumeReclaimPolicy":"Retain"}}'
|
||||
```
|
||||
@ -2,7 +2,7 @@
|
||||
kind: PersistentVolumeClaim
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: mongo-export-claim
|
||||
name: neo4j-data-claim
|
||||
namespace: human-connection
|
||||
spec:
|
||||
accessModes:
|
||||
12
deployment/volumes/uploads.yaml
Normal file
12
deployment/volumes/uploads.yaml
Normal file
@ -0,0 +1,12 @@
|
||||
---
|
||||
kind: PersistentVolumeClaim
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: uploads-claim
|
||||
namespace: human-connection
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 2Gi
|
||||
@ -38,7 +38,7 @@ services:
|
||||
neo4j:
|
||||
image: humanconnection/neo4j:latest
|
||||
build:
|
||||
context: backend/neo4j
|
||||
context: neo4j
|
||||
networks:
|
||||
- hc-network
|
||||
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user