diff --git a/SUMMARY.md b/SUMMARY.md index e1cf09126..7c1e41d13 100644 --- a/SUMMARY.md +++ b/SUMMARY.md @@ -21,8 +21,14 @@ * [Backend tests](backend/testing.md) * [Contributing](CONTRIBUTING.md) * [Kubernetes Deployment](deployment/README.md) + * [Minikube](deployment/minikube/README.md) + * [Digital Ocean](deployment/digital-ocean/README.md) + * [Kubernetes Dashboard](deployment/digital-ocean/dashboard/README.md) + * [HTTPS](deployment/digital-ocean/https/README.md) + * [Human Connection](deployment/human-connection/README.md) + * [Volumes](deployment/volumes/README.md) * [Neo4J DB Backup](deployment/backup.md) -* [Maintenance](maintenance/README.md) + * [Legacy Migration](deployment/legacy-migration/README.md) * [Feature Specification](cypress/features.md) * [Code of conduct](CODE_OF_CONDUCT.md) * [License](LICENSE.md) diff --git a/backend/.dockerignore b/backend/.dockerignore index 31f5b28f3..25a941824 100644 --- a/backend/.dockerignore +++ b/backend/.dockerignore @@ -15,7 +15,7 @@ node_modules/ scripts/ dist/ -db-migration-worker/ +maintenance-worker/ neo4j/ public/uploads/* diff --git a/backend/db-migration-worker/Dockerfile b/backend/db-migration-worker/Dockerfile deleted file mode 100644 index 865a4c330..000000000 --- a/backend/db-migration-worker/Dockerfile +++ /dev/null @@ -1,13 +0,0 @@ -FROM mongo:4 - -RUN apt-get update && apt-get -y install --no-install-recommends wget apt-transport-https \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* -RUN wget -O - https://debian.neo4j.org/neotechnology.gpg.key | apt-key add - -RUN echo 'deb https://debian.neo4j.org/repo stable/' | tee /etc/apt/sources.list.d/neo4j.list -RUN apt-get update && apt-get -y install --no-install-recommends openjdk-8-jre openssh-client neo4j rsync \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* -COPY migration ./migration -COPY migrate.sh /usr/local/bin/migrate -COPY sync_uploads.sh /usr/local/bin/sync_uploads diff --git a/backend/db-migration-worker/README.md b/backend/db-migration-worker/README.md deleted file mode 100644 index 3d0f86edd..000000000 --- a/backend/db-migration-worker/README.md +++ /dev/null @@ -1,40 +0,0 @@ -# Legacy Migration - -This guide helps you to import data from our legacy servers, which are using FeathersJS and MongoDB. - -**You can skip this if you don't plan to migrate any legacy applications!** - -## Prerequisites - -You need [docker](https://www.docker.com/) installed on your machine. Furthermore you need SSH access to the server and you need to know the following login credentials and server settings: - -| Environment variable | Description | -| :--- | :--- | -| SSH\_USERNAME | Your ssh username on the server | -| SSH\_HOST | The IP address of the server | -| MONGODB\_USERNAME | Mongo username on the server | -| MONGODB\_PASSWORD | Mongo password on the server | -| MONGODB\_AUTH\_DB | Mongo authentication database | -| MONGODB\_DATABASE | The name of the mongo database | -| UPLOADS\_DIRECTORY | Path to remote uploads folder | - -## Run the database migration - -Run `docker-compose` with all environment variables specified: - -```bash -SSH_USERNAME=username SSH_HOST=some.server.com MONGODB_USERNAME='hc-api' MONGODB_PASSWORD='secret' MONGODB_DATABASE=hc_api MONGODB_AUTH_DB=hc_api UPLOADS_DIRECTORY=/var/www/api/uploads docker-compose up -``` - -Download the remote mongo database: - -```bash -docker-compose exec db-migration-worker ./import.sh -``` - -Import the local download into Neo4J: - -```bash -docker-compose exec neo4j import/import.sh -``` - diff --git a/backend/docker-compose.cypress.yml b/backend/docker-compose.cypress.yml deleted file mode 100644 index 3d577e638..000000000 --- a/backend/docker-compose.cypress.yml +++ /dev/null @@ -1,18 +0,0 @@ -version: "3.7" - -services: - neo4j: - environment: - - NEO4J_AUTH=none - ports: - - 7687:7687 - - 7474:7474 - backend: - ports: - - 4001:4001 - - 4123:4123 - image: humanconnection/nitro-backend:builder - build: - context: . - target: builder - command: yarn run test:cypress diff --git a/backend/docker-compose.db-migration.yml b/backend/docker-compose.db-migration.yml deleted file mode 100644 index 02f054d1b..000000000 --- a/backend/docker-compose.db-migration.yml +++ /dev/null @@ -1,36 +0,0 @@ -version: "3.7" - -services: - backend: - volumes: - - uploads:/nitro-backend/public/uploads - neo4j: - volumes: - - mongo-export:/mongo-export - environment: - - NEO4J_apoc_import_file_enabled=true - db-migration-worker: - build: - context: db-migration-worker - volumes: - - mongo-export:/mongo-export - - uploads:/uploads - - ./db-migration-worker/migration/:/migration - - ./db-migration-worker/.ssh/:/root/.ssh/ - networks: - - hc-network - depends_on: - - backend - environment: - - NEO4J_URI=bolt://neo4j:7687 - - "SSH_USERNAME=${SSH_USERNAME}" - - "SSH_HOST=${SSH_HOST}" - - "MONGODB_USERNAME=${MONGODB_USERNAME}" - - "MONGODB_PASSWORD=${MONGODB_PASSWORD}" - - "MONGODB_AUTH_DB=${MONGODB_AUTH_DB}" - - "MONGODB_DATABASE=${MONGODB_DATABASE}" - - "UPLOADS_DIRECTORY=${UPLOADS_DIRECTORY}" - -volumes: - mongo-export: - uploads: diff --git a/backend/docker-compose.override.yml b/backend/docker-compose.override.yml deleted file mode 100644 index b972c31f6..000000000 --- a/backend/docker-compose.override.yml +++ /dev/null @@ -1,23 +0,0 @@ -version: "3.7" - -services: - backend: - image: humanconnection/nitro-backend:builder - build: - context: . - target: builder - volumes: - - .:/nitro-backend - - /nitro-backend/node_modules - command: yarn run dev - neo4j: - environment: - - NEO4J_AUTH=none - ports: - - 7687:7687 - - 7474:7474 - volumes: - - neo4j-data:/data - -volumes: - neo4j-data: diff --git a/backend/docker-compose.travis.yml b/backend/docker-compose.travis.yml deleted file mode 100644 index e1998f6dd..000000000 --- a/backend/docker-compose.travis.yml +++ /dev/null @@ -1,14 +0,0 @@ -version: "3.7" - -services: - neo4j: - environment: - - NEO4J_AUTH=none - ports: - - 7687:7687 - - 7474:7474 - backend: - image: humanconnection/nitro-backend:builder - build: - context: . - target: builder diff --git a/backend/docker-compose.yml b/backend/docker-compose.yml deleted file mode 100644 index 30d102f96..000000000 --- a/backend/docker-compose.yml +++ /dev/null @@ -1,34 +0,0 @@ -version: "3.7" - -services: - backend: - image: humanconnection/nitro-backend:latest - build: - context: . - target: production - networks: - - hc-network - depends_on: - - neo4j - ports: - - 4000:4000 - environment: - - NEO4J_URI=bolt://neo4j:7687 - - GRAPHQL_PORT=4000 - - GRAPHQL_URI=http://localhost:4000 - - CLIENT_URI=http://localhost:3000 - - JWT_SECRET=b/&&7b78BF&fv/Vd - - MOCK=false - - MAPBOX_TOKEN=pk.eyJ1IjoiaHVtYW4tY29ubmVjdGlvbiIsImEiOiJjajl0cnBubGoweTVlM3VwZ2lzNTNud3ZtIn0.KZ8KK9l70omjXbEkkbHGsQ - - PRIVATE_KEY_PASSPHRASE=a7dsf78sadg87ad87sfagsadg78 - - neo4j: - image: humanconnection/neo4j:latest - build: - context: neo4j - networks: - - hc-network - -networks: - hc-network: - name: hc-network diff --git a/backend/package.json b/backend/package.json index 942cd8023..a258b1f27 100644 --- a/backend/package.json +++ b/backend/package.json @@ -85,7 +85,7 @@ "eslint": "~5.16.0", "eslint-config-standard": "~12.0.0", "eslint-plugin-import": "~2.17.2", - "eslint-plugin-jest": "~22.4.1", + "eslint-plugin-jest": "~22.5.0", "eslint-plugin-node": "~8.0.1", "eslint-plugin-promise": "~4.1.1", "eslint-plugin-standard": "~4.0.0", diff --git a/backend/yarn.lock b/backend/yarn.lock index b7cf8099b..e66d93297 100644 --- a/backend/yarn.lock +++ b/backend/yarn.lock @@ -3030,10 +3030,10 @@ eslint-plugin-import@~2.17.2: read-pkg-up "^2.0.0" resolve "^1.10.0" -eslint-plugin-jest@~22.4.1: - version "22.4.1" - resolved "https://registry.yarnpkg.com/eslint-plugin-jest/-/eslint-plugin-jest-22.4.1.tgz#a5fd6f7a2a41388d16f527073b778013c5189a9c" - integrity sha512-gcLfn6P2PrFAVx3AobaOzlIEevpAEf9chTpFZz7bYfc7pz8XRv7vuKTIE4hxPKZSha6XWKKplDQ0x9Pq8xX2mg== +eslint-plugin-jest@~22.5.0: + version "22.5.0" + resolved "https://registry.yarnpkg.com/eslint-plugin-jest/-/eslint-plugin-jest-22.5.0.tgz#3a02527a5b08f7232f7bb0a52da98407bf84cdd0" + integrity sha512-YLeCRAuU3qP9lRZMul1/IbxXGg1THVpWFPBEa+VUQkcqEtO3W9GDKZ84MxYxzKTwMChTjj1l2vuNKva8HYtGPg== eslint-plugin-node@~8.0.1: version "8.0.1" diff --git a/deployment/.gitignore b/deployment/.gitignore index aad0daea8..14cfa18ed 100644 --- a/deployment/.gitignore +++ b/deployment/.gitignore @@ -1,3 +1,4 @@ secrets.yaml -*/secrets.yaml -kubeconfig.yaml +configmap.yaml +**/secrets.yaml +**/configmap.yaml diff --git a/deployment/README.md b/deployment/README.md index 84912d2a5..0615ccf9b 100644 --- a/deployment/README.md +++ b/deployment/README.md @@ -4,223 +4,8 @@ We deploy with [kubernetes](https://kubernetes.io/). In order to deploy your own network you have to [install kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) and get a kubernetes cluster. -We have tested two different kubernetes providers: [Minikube](https://kubernetes.io/docs/tasks/tools/install-minikube/) -and [Digital Ocean](https://www.digitalocean.com/). - -## Minikube - -There are many Kubernetes providers, but if you're just getting started, Minikube is a tool that you can use to get your feet wet. - -[Install Minikube](https://kubernetes.io/docs/tasks/tools/install-minikube/) - -Open minikube dashboard: - -```text -$ minikube dashboard -``` - -This will give you an overview. Some of the steps below need some timing to make ressources available to other dependent deployments. Keeping an eye on the dashboard is a great way to check that. - -Follow the [installation instruction](deployment.md#installation-with-kubernetes) below. If all the pods and services have settled and everything looks green in your minikube dashboard, expose the `nitro-web` service on your host system with: - -```text -$ minikube service nitro-web --namespace=human-connection -``` - -## Digital Ocean - -1. At first, create a cluster on Digital Ocean. -2. Download the config.yaml if the process has finished. -3. Put the config file where you can find it later \(preferable in your home directory under `~/.kube/`\) -4. In the open terminal you can set the current config for the active session: `export KUBECONFIG=~/.kube/THE-NAME-OF-YOUR-CLUSTER-kubeconfig.yaml`. You could make this change permanent by adding the line to your `.bashrc` or `~/.config/fish/config.fish` depending on your shell. - - Otherwise you would have to always add `--kubeconfig ~/.kube/THE-NAME-OF-YOUR-CLUSTER-kubeconfig.yaml` on every `kubectl` command that you are running. - -5. Now check if you can connect to the cluster and if its your newly created one by running: `kubectl get nodes` - -If you got the steps right above and see your nodes you can continue. - -First, install kubernetes dashboard: - -```bash -$ kubectl apply -f dashboard/ -$ kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/master/aio/deploy/recommended/kubernetes-dashboard.yaml -``` - -Get your token on the command line: - -```bash -$ kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin-user | awk '{print $1}') -``` - -It should print something like: - -```text -Name: admin-user-token-6gl6l -Namespace: kube-system -Labels: -Annotations: kubernetes.io/service-account.name=admin-user - kubernetes.io/service-account.uid=b16afba9-dfec-11e7-bbb9-901b0e532516 - -Type: kubernetes.io/service-account-token - -Data -==== -ca.crt: 1025 bytes -namespace: 11 bytes -token: eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJhZG1pbi11c2VyLXRva2VuLTZnbDZsIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImFkbWluLXVzZXIiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiJiMTZhZmJhOS1kZmVjLTExZTctYmJiOS05MDFiMGU1MzI1MTYiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZS1zeXN0ZW06YWRtaW4tdXNlciJ9.M70CU3lbu3PP4OjhFms8PVL5pQKj-jj4RNSLA4YmQfTXpPUuxqXjiTf094_Rzr0fgN_IVX6gC4fiNUL5ynx9KU-lkPfk0HnX8scxfJNzypL039mpGt0bbe1IXKSIRaq_9VW59Xz-yBUhycYcKPO9RM2Qa1Ax29nqNVko4vLn1_1wPqJ6XSq3GYI8anTzV8Fku4jasUwjrws6Cn6_sPEGmL54sq5R4Z5afUtv-mItTmqZZdxnkRqcJLlg2Y8WbCPogErbsaCDJoABQ7ppaqHetwfM_0yMun6ABOQbIwwl8pspJhpplKwyo700OSpvTT9zlBsu-b35lzXGBRHzv5g_RA -``` - -Proxy localhost to the remote kubernetes dashboard: - -```bash -$ kubectl proxy -``` - -Grab the token from above and paste it into the login screen at [http://localhost:8001/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/](http://localhost:8001/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/) - -## Installation with kubernetes - -You have to do some prerequisites e.g. change some secrets according to your own setup. - -### Edit secrets - -```bash -$ cp secrets.template.yaml human-connection/secrets.yaml -``` - -Change all secrets as needed. - -If you want to edit secrets, you have to `base64` encode them. See [kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/secret/#creating-a-secret-manually). - -```text -# example how to base64 a string: -$ echo -n 'admin' | base64 -YWRtaW4= -``` - -Those secrets get `base64` decoded in a kubernetes pod. - -### Create a namespace - -```text -$ kubectl apply -f namespace-human-connection.yaml -``` - -Switch to the namespace `human-connection` in your kubernetes dashboard. - -### Run the configuration - -```text -$ kubectl apply -f human-connection/ -``` - -This can take a while because kubernetes will download the docker images. Sit back and relax and have a look into your kubernetes dashboard. Wait until all pods turn green and they don't show a warning `Waiting: ContainerCreating` anymore. - -#### Setup Ingress and HTTPS - -Follow [this quick start guide](https://docs.cert-manager.io/en/latest/tutorials/acme/quick-start/index.html) and install certmanager via helm and tiller: - -```text -$ kubectl create serviceaccount tiller --namespace=kube-system -$ kubectl create clusterrolebinding tiller-admin --serviceaccount=kube-system:tiller --clusterrole=cluster-admin -$ helm init --service-account=tiller -$ helm repo update -$ helm install stable/nginx-ingress -$ kubectl apply -f https://raw.githubusercontent.com/jetstack/cert-manager/release-0.6/deploy/manifests/00-crds.yaml -$ helm install --name cert-manager --namespace cert-manager stable/cert-manager -``` - -Create letsencrypt issuers. _Change the email address_ in these files before running this command. - -```bash -$ kubectl apply -f human-connection/https/ -``` - -Create an ingress service in namespace `human-connection`. _Change the domain name_ according to your needs: - -```bash -$ kubectl apply -f human-connection/ingress/ -``` - -Check the ingress server is working correctly: - -```bash -$ curl -kivL -H 'Host: ' 'https://' -``` - -If the response looks good, configure your domain registrar for the new IP address and the domain. - -Now let's get a valid HTTPS certificate. According to the tutorial above, check your tls certificate for staging: - -```bash -$ kubectl describe --namespace=human-connection certificate tls -$ kubectl describe --namespace=human-connection secret tls -``` - -If everything looks good, update the issuer of your ingress. Change the annotation `certmanager.k8s.io/issuer` from `letsencrypt-staging` to `letsencrypt-prod` in your ingress configuration in `human-connection/ingress/ingress.yaml`. - -```bash -$ kubectl apply -f human-connection/ingress/ingress.yaml -``` - -Delete the former secret to force a refresh: - -```text -$ kubectl --namespace=human-connection delete secret tls -``` - -Now, HTTPS should be configured on your domain. Congrats. - -#### Legacy data migration - -This setup is completely optional and only required if you have data on a server which is running our legacy code and you want to import that data. It will import the uploads folder and migrate a dump of mongodb into neo4j. - -**Prepare migration of Human Connection legacy server** - -Create a configmap with the specific connection data of your legacy server: - -```bash -$ kubectl create configmap maintenance-worker \ - --namespace=human-connection \ - --from-literal=SSH_USERNAME=someuser \ - --from-literal=SSH_HOST=yourhost \ - --from-literal=MONGODB_USERNAME=hc-api \ - --from-literal=MONGODB_PASSWORD=secretpassword \ - --from-literal=MONGODB_AUTH_DB=hc_api \ - --from-literal=MONGODB_DATABASE=hc_api \ - --from-literal=UPLOADS_DIRECTORY=/var/www/api/uploads \ - --from-literal=NEO4J_URI=bolt://localhost:7687 -``` - -Create a secret with your public and private ssh keys. As the [kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/secret/#use-case-pod-with-ssh-keys) points out, you should be careful with your ssh keys. Anyone with access to your cluster will have access to your ssh keys. Better create a new pair with `ssh-keygen` and copy the public key to your legacy server with `ssh-copy-id`: - -```bash -$ kubectl create secret generic ssh-keys \ - --namespace=human-connection \ - --from-file=id_rsa=/path/to/.ssh/id_rsa \ - --from-file=id_rsa.pub=/path/to/.ssh/id_rsa.pub \ - --from-file=known_hosts=/path/to/.ssh/known_hosts -``` - -**Migrate legacy database** - -Patch the existing deployments to use a multi-container setup: - -```bash -cd legacy-migration -kubectl apply -f volume-claim-mongo-export.yaml -kubectl patch --namespace=human-connection deployment nitro-backend --patch "$(cat deployment-backend.yaml)" -kubectl patch --namespace=human-connection deployment nitro-neo4j --patch "$(cat deployment-neo4j.yaml)" -cd .. -``` - -Run the migration: - -```text -$ kubectl --namespace=human-connection get pods -# change below -$ kubectl --namespace=human-connection exec -it nitro-neo4j-65bbdb597c-nc2lv migrate -$ kubectl --namespace=human-connection exec -it nitro-backend-c6cc5ff69-8h96z sync_uploads -``` +We have tested two different kubernetes providers: [Minikube](./minikube/README.md) +and [Digital Ocean](./digital-ocean/README.md). +Check out the specific documentation for your provider. After that, learn how +to apply the specific kubernetes configuration for [Human Connection](./human-connection/README.md). diff --git a/deployment/human-connection/configmap.yaml b/deployment/configmap.template.yaml similarity index 67% rename from deployment/human-connection/configmap.yaml rename to deployment/configmap.template.yaml index 5e4d6ba89..baf41661a 100644 --- a/deployment/human-connection/configmap.yaml +++ b/deployment/configmap.template.yaml @@ -9,8 +9,6 @@ NEO4J_USER: "neo4j" NEO4J_AUTH: "none" CLIENT_URI: "https://nitro-staging.human-connection.org" - MAPBOX_TOKEN: "pk.eyJ1IjoiaHVtYW4tY29ubmVjdGlvbiIsImEiOiJjajl0cnBubGoweTVlM3VwZ2lzNTNud3ZtIn0.KZ8KK9l70omjXbEkkbHGsQ" - PRIVATE_KEY_PASSPHRASE: "a7dsf78sadg87ad87sfagsadg78" metadata: name: configmap namespace: human-connection diff --git a/deployment/db-migration-worker.yaml b/deployment/db-migration-worker.yaml deleted file mode 100644 index 55743e360..000000000 --- a/deployment/db-migration-worker.yaml +++ /dev/null @@ -1,39 +0,0 @@ ---- - kind: Pod - apiVersion: v1 - metadata: - name: nitro-db-migration-worker - namespace: human-connection - spec: - volumes: - - name: secret-volume - secret: - secretName: ssh-keys - defaultMode: 0400 - - name: mongo-export - persistentVolumeClaim: - claimName: mongo-export-claim - containers: - - name: nitro-db-migration-worker - image: humanconnection/db-migration-worker:latest - envFrom: - - configMapRef: - name: db-migration-worker - volumeMounts: - - name: secret-volume - readOnly: false - mountPath: /root/.ssh - - name: mongo-export - mountPath: /mongo-export/ ---- - kind: PersistentVolumeClaim - apiVersion: v1 - metadata: - name: mongo-export-claim - namespace: human-connection - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 1Gi diff --git a/deployment/digital-ocean/README.md b/deployment/digital-ocean/README.md new file mode 100644 index 000000000..12c272691 --- /dev/null +++ b/deployment/digital-ocean/README.md @@ -0,0 +1,26 @@ +# Digital Ocean + +As a start, read the [introduction into kubernetes](https://www.digitalocean.com/community/tutorials/an-introduction-to-kubernetes) by the folks at Digital Ocean. The following section should enable you to deploy Human Connection to your kubernetes cluster. + +## Connect to your local cluster + +1. Create a cluster at [Digital Ocean](https://www.digitalocean.com/). +2. Download the `***-kubeconfig.yaml` from the Web UI. +3. Move the file to the default location where kubectl expects it to be: `mv ***-kubeconfig.yaml ~/.kube/config`. Alternatively you can set the config on every command: `--kubeconfig ***-kubeconfig.yaml` +4. Now check if you can connect to the cluster and if its your newly created one by running: `kubectl get nodes` + +The output should look about like this: +``` +$ kubectl get nodes +NAME STATUS ROLES AGE VERSION +nifty-driscoll-uu1w Ready 69d v1.13.2 +nifty-driscoll-uuiw Ready 69d v1.13.2 +nifty-driscoll-uusn Ready 69d v1.13.2 +``` + +If you got the steps right above and see your nodes you can continue. + +Digital Ocean kubernetes clusters don't have a graphical interface, so I suggest +to setup the [kubernetes dashboard](./dashboard/README.md) as a next step. +Configuring [HTTPS](./https/README.md) is bit tricky and therefore I suggest to +do this as a last step. diff --git a/deployment/digital-ocean/dashboard/README.md b/deployment/digital-ocean/dashboard/README.md new file mode 100644 index 000000000..3ae6378bf --- /dev/null +++ b/deployment/digital-ocean/dashboard/README.md @@ -0,0 +1,55 @@ +# Install Kubernetes Dashboard + +The kubernetes dashboard is optional but very helpful for debugging. If you want to install it, you have to do so only **once** per cluster: + +```bash +# in folder deployment/digital-ocean/ +$ kubectl apply -f dashboard/ +$ kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/master/aio/deploy/recommended/kubernetes-dashboard.yaml +``` + +### Login to your dashboard + +Proxy the remote kubernetes dashboard to localhost: + +```bash +$ kubectl proxy +``` + +Visit: + +[http://localhost:8001/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/](http://localhost:8001/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/) + +You should see a login screen. + +To get your token for the dashboard you can run this command: + +```bash +$ kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin-user | awk '{print $1}') +``` + +It should print something like: + +```text +Name: admin-user-token-6gl6l +Namespace: kube-system +Labels: +Annotations: kubernetes.io/service-account.name=admin-user + kubernetes.io/service-account.uid=b16afba9-dfec-11e7-bbb9-901b0e532516 + +Type: kubernetes.io/service-account-token + +Data +==== +ca.crt: 1025 bytes +namespace: 11 bytes +token: eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJhZG1pbi11c2VyLXRva2VuLTZnbDZsIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImFkbWluLXVzZXIiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiJiMTZhZmJhOS1kZmVjLTExZTctYmJiOS05MDFiMGU1MzI1MTYiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZS1zeXN0ZW06YWRtaW4tdXNlciJ9.M70CU3lbu3PP4OjhFms8PVL5pQKj-jj4RNSLA4YmQfTXpPUuxqXjiTf094_Rzr0fgN_IVX6gC4fiNUL5ynx9KU-lkPfk0HnX8scxfJNzypL039mpGt0bbe1IXKSIRaq_9VW59Xz-yBUhycYcKPO9RM2Qa1Ax29nqNVko4vLn1_1wPqJ6XSq3GYI8anTzV8Fku4jasUwjrws6Cn6_sPEGmL54sq5R4Z5afUtv-mItTmqZZdxnkRqcJLlg2Y8WbCPogErbsaCDJoABQ7ppaqHetwfM_0yMun6ABOQbIwwl8pspJhpplKwyo700OSpvTT9zlBsu-b35lzXGBRHzv5g_RA +``` + +Grab the token from above and paste it into the [login screen](http://localhost:8001/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/) + +When you are logged in, you should see sth. like: + +![Dashboard](./dashboard-screenshot.png) + +Feel free to save the login token from above in your password manager. Unlike the `kubeconfig` file, this token does not expire. diff --git a/deployment/dashboard/admin-user.yaml b/deployment/digital-ocean/dashboard/admin-user.yaml similarity index 100% rename from deployment/dashboard/admin-user.yaml rename to deployment/digital-ocean/dashboard/admin-user.yaml diff --git a/deployment/digital-ocean/dashboard/dashboard-screenshot.png b/deployment/digital-ocean/dashboard/dashboard-screenshot.png new file mode 100644 index 000000000..6aefb5414 Binary files /dev/null and b/deployment/digital-ocean/dashboard/dashboard-screenshot.png differ diff --git a/deployment/dashboard/role-binding.yaml b/deployment/digital-ocean/dashboard/role-binding.yaml similarity index 100% rename from deployment/dashboard/role-binding.yaml rename to deployment/digital-ocean/dashboard/role-binding.yaml diff --git a/deployment/digital-ocean/https/README.md b/deployment/digital-ocean/https/README.md new file mode 100644 index 000000000..398601e78 --- /dev/null +++ b/deployment/digital-ocean/https/README.md @@ -0,0 +1,57 @@ +# Setup Ingress and HTTPS + +Follow [this quick start guide](https://docs.cert-manager.io/en/latest/tutorials/acme/quick-start/index.html) and install certmanager via helm and tiller: + +```text +$ kubectl create serviceaccount tiller --namespace=kube-system +$ kubectl create clusterrolebinding tiller-admin --serviceaccount=kube-system:tiller --clusterrole=cluster-admin +$ helm init --service-account=tiller +$ helm repo update +$ helm install stable/nginx-ingress +$ kubectl apply -f https://raw.githubusercontent.com/jetstack/cert-manager/release-0.6/deploy/manifests/00-crds.yaml +$ helm install --name cert-manager --namespace cert-manager stable/cert-manager +``` + +Create letsencrypt issuers. _Change the email address_ in these files before running this command. + +```bash +# in folder deployment/digital-ocean/https/ +$ kubectl apply -f issuer.yaml +``` + +Create an ingress service in namespace `human-connection`. _Change the domain name_ according to your needs: + +```bash +# in folder deployment/digital-ocean/https/ +$ kubectl apply -f ingress.yaml +``` + +Check the ingress server is working correctly: + +```bash +$ curl -kivL -H 'Host: ' 'https://' +``` + +If the response looks good, configure your domain registrar for the new IP address and the domain. + +Now let's get a valid HTTPS certificate. According to the tutorial above, check your tls certificate for staging: + +```bash +$ kubectl describe --namespace=human-connection certificate tls +$ kubectl describe --namespace=human-connection secret tls +``` + +If everything looks good, update the issuer of your ingress. Change the annotation `certmanager.k8s.io/issuer` from `letsencrypt-staging` to `letsencrypt-prod` in your ingress configuration in `ingress.yaml`. + +```bash +# in folder deployment/digital-ocean/https/ +$ kubectl apply -f ingress.yaml +``` + +Delete the former secret to force a refresh: + +```text +$ kubectl --namespace=human-connection delete secret tls +``` + +Now, HTTPS should be configured on your domain. Congrats. diff --git a/deployment/human-connection/ingress/ingress.yaml b/deployment/digital-ocean/https/ingress.yaml similarity index 100% rename from deployment/human-connection/ingress/ingress.yaml rename to deployment/digital-ocean/https/ingress.yaml diff --git a/deployment/human-connection/https/issuer.yaml b/deployment/digital-ocean/https/issuer.yaml similarity index 100% rename from deployment/human-connection/https/issuer.yaml rename to deployment/digital-ocean/https/issuer.yaml diff --git a/deployment/human-connection/README.md b/deployment/human-connection/README.md new file mode 100644 index 000000000..d6a2dd989 --- /dev/null +++ b/deployment/human-connection/README.md @@ -0,0 +1,58 @@ +# Kubernetes Configuration for Human Connection + +Deploying Human Connection with kubernetes is straight forward. All you have to +do is to change certain parameters, like domain names and API keys, then you +just apply our provided configuration files to your cluster. + +## Configuration + +Copy our provided templates: + +```bash +$ cp secrets.template.yaml human-connection/secrets.yaml +$ cp configmap.template.yaml human-connection/configmap.yaml +``` + +Change the `configmap.yaml` as needed, all variables will be available as +environment variables in your deployed kubernetes pods. + +If you want to edit secrets, you have to `base64` encode them. See [kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/secret/#creating-a-secret-manually). + +```bash +# example how to base64 a string: +$ echo -n 'admin' | base64 --wrap 0 +YWRtaW4= +``` + +Those secrets get `base64` decoded and are available as environment variables in +your deployed kubernetes pods. + +## Create a namespace + +```bash +$ kubectl apply -f namespace-human-connection.yaml +``` + +If you have a [kubernets dashboard](../digital-ocean/dashboard/README.md) +deployed you should switch to namespace `human-connection` in order to +monitor the state of your deployments. + +## Create persistent volumes + +While the deployments and services can easily be restored, simply by deleting +and applying the kubernetes configurations again, certain data is not that +easily recovered. Therefore we separated persistent volumes from deployments +and services. There is a [dedicated section](../volumes/README.md). Create those +persistent volumes once before you apply the configuration. + +## Apply the configuration + +```bash +# in folder deployment/ +$ kubectl apply -f human-connection/ +``` + +This can take a while because kubernetes will download the docker images. Sit +back and relax and have a look into your kubernetes dashboard. Wait until all +pods turn green and they don't show a warning `Waiting: ContainerCreating` +anymore. diff --git a/deployment/human-connection/deployment-backend.yaml b/deployment/human-connection/deployment-backend.yaml index 29992ef7e..a873b7bb2 100644 --- a/deployment/human-connection/deployment-backend.yaml +++ b/deployment/human-connection/deployment-backend.yaml @@ -43,15 +43,3 @@ restartPolicy: Always terminationGracePeriodSeconds: 30 status: {} ---- - kind: PersistentVolumeClaim - apiVersion: v1 - metadata: - name: uploads-claim - namespace: human-connection - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 2Gi diff --git a/deployment/human-connection/deployment-neo4j.yaml b/deployment/human-connection/deployment-neo4j.yaml index 2c76a3322..4a715da76 100644 --- a/deployment/human-connection/deployment-neo4j.yaml +++ b/deployment/human-connection/deployment-neo4j.yaml @@ -6,6 +6,10 @@ namespace: human-connection spec: replicas: 1 + strategy: + rollingUpdate: + maxSurge: 0 + maxUnavailable: "100%" selector: matchLabels: human-connection.org/selector: deployment-human-connection-neo4j @@ -53,15 +57,3 @@ claimName: neo4j-data-claim restartPolicy: Always terminationGracePeriodSeconds: 30 ---- - kind: PersistentVolumeClaim - apiVersion: v1 - metadata: - name: neo4j-data-claim - namespace: human-connection - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 1Gi diff --git a/deployment/legacy-migration/README.md b/deployment/legacy-migration/README.md new file mode 100644 index 000000000..8cc7bd746 --- /dev/null +++ b/deployment/legacy-migration/README.md @@ -0,0 +1,85 @@ +# Legacy data migration + +This setup is **completely optional** and only required if you have data on a +server which is running our legacy code and you want to import that data. It +will import the uploads folder and migrate a dump of the legacy Mongo database +into our new Neo4J graph database. + +## Configure Maintenance-Worker Pod + +Create a configmap with the specific connection data of your legacy server: + +```bash +$ kubectl create configmap maintenance-worker \ + --namespace=human-connection \ + --from-literal=SSH_USERNAME=someuser \ + --from-literal=SSH_HOST=yourhost \ + --from-literal=MONGODB_USERNAME=hc-api \ + --from-literal=MONGODB_PASSWORD=secretpassword \ + --from-literal=MONGODB_AUTH_DB=hc_api \ + --from-literal=MONGODB_DATABASE=hc_api \ + --from-literal=UPLOADS_DIRECTORY=/var/www/api/uploads +``` + +Create a secret with your public and private ssh keys. As the [kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/secret/#use-case-pod-with-ssh-keys) points out, you should be careful with your ssh keys. Anyone with access to your cluster will have access to your ssh keys. Better create a new pair with `ssh-keygen` and copy the public key to your legacy server with `ssh-copy-id`: + +```bash +$ kubectl create secret generic ssh-keys \ + --namespace=human-connection \ + --from-file=id_rsa=/path/to/.ssh/id_rsa \ + --from-file=id_rsa.pub=/path/to/.ssh/id_rsa.pub \ + --from-file=known_hosts=/path/to/.ssh/known_hosts +``` + +## Deploy a Temporary Maintenance-Worker Pod + +Bring the application into maintenance mode. + +{% hint style="info" %} TODO: implement maintenance mode {% endhint %} + + +Then temporarily delete backend and database deployments + +```bash +$ kubectl --namespace=human-connection get deployments +NAME READY UP-TO-DATE AVAILABLE AGE +nitro-backend 1/1 1 1 3d11h +nitro-neo4j 1/1 1 1 3d11h +nitro-web 2/2 2 2 73d +$ kubectl --namespace=human-connection delete deployment nitro-neo4j +deployment.extensions "nitro-neo4j" deleted +$ kubectl --namespace=human-connection delete deployment nitro-backend +deployment.extensions "nitro-backend" deleted +``` + +Deploy one-time maintenance-worker pod: + +```bash +# in deployment/legacy-migration/ +$ kubectl apply -f db-migration-worker.yaml +pod/nitro-maintenance-worker created +``` + +Import legacy database and uploads: + +```bash +$ kubectl --namespace=human-connection exec -it nitro-maintenance-worker bash +$ import_legacy_db +$ import_uploads +$ exit +``` + +Delete the pod when you're done: + +```bash +$ kubectl --namespace=human-connection delete pod nitro-maintenance-worker +``` + +Oh, and of course you have to get those deleted deployments back. One way of +doing it would be: + +```bash +# in folder deployment/ +$ kubectl apply -f human-connection/deployment-backend.yaml -f human-connection/deployment-neo4j.yaml +``` + diff --git a/deployment/legacy-migration/deployment-backend.yaml b/deployment/legacy-migration/deployment-backend.yaml deleted file mode 100644 index 1adeb0665..000000000 --- a/deployment/legacy-migration/deployment-backend.yaml +++ /dev/null @@ -1,27 +0,0 @@ ---- - apiVersion: extensions/v1beta1 - kind: Deployment - metadata: - name: nitro-backend - namespace: human-connection - spec: - template: - spec: - containers: - - name: nitro-db-migration-worker - image: humanconnection/db-migration-worker:latest - imagePullPolicy: Always - envFrom: - - configMapRef: - name: db-migration-worker - volumeMounts: - - name: secret-volume - readOnly: false - mountPath: /root/.ssh - - name: uploads - mountPath: /uploads/ - volumes: - - name: secret-volume - secret: - secretName: ssh-keys - defaultMode: 0400 diff --git a/deployment/legacy-migration/deployment-neo4j.yaml b/deployment/legacy-migration/deployment-neo4j.yaml deleted file mode 100644 index 2852b90cb..000000000 --- a/deployment/legacy-migration/deployment-neo4j.yaml +++ /dev/null @@ -1,39 +0,0 @@ ---- - apiVersion: extensions/v1beta1 - kind: Deployment - metadata: - name: nitro-neo4j - namespace: human-connection - spec: - template: - spec: - containers: - - name: nitro-db-migration-worker - image: humanconnection/db-migration-worker:latest - imagePullPolicy: Always - envFrom: - - configMapRef: - name: db-migration-worker - env: - - name: COMMIT - value: - - name: NEO4J_URI - value: bolt://localhost:7687 - volumeMounts: - - name: secret-volume - readOnly: false - mountPath: /root/.ssh - - name: mongo-export - mountPath: /mongo-export/ - - name: nitro-neo4j - volumeMounts: - - mountPath: /mongo-export/ - name: mongo-export - volumes: - - name: secret-volume - secret: - secretName: ssh-keys - defaultMode: 0400 - - name: mongo-export - persistentVolumeClaim: - claimName: mongo-export-claim diff --git a/deployment/legacy-migration/maintenance-worker.yaml b/deployment/legacy-migration/maintenance-worker.yaml new file mode 100644 index 000000000..cda17400a --- /dev/null +++ b/deployment/legacy-migration/maintenance-worker.yaml @@ -0,0 +1,34 @@ +--- + kind: Pod + apiVersion: v1 + metadata: + name: nitro-maintenance-worker + namespace: human-connection + spec: + containers: + - name: nitro-maintenance-worker + image: humanconnection/maintenance-worker:latest + envFrom: + - configMapRef: + name: maintenance-worker + - configMapRef: + name: configmap + volumeMounts: + - name: secret-volume + readOnly: false + mountPath: /root/.ssh + - name: uploads + mountPath: /nitro-backend/public/uploads + - name: neo4j-data + mountPath: /data/ + volumes: + - name: secret-volume + secret: + secretName: ssh-keys + defaultMode: 0400 + - name: uploads + persistentVolumeClaim: + claimName: uploads-claim + - name: neo4j-data + persistentVolumeClaim: + claimName: neo4j-data-claim diff --git a/backend/db-migration-worker/.dockerignore b/deployment/legacy-migration/maintenance-worker/.dockerignore similarity index 100% rename from backend/db-migration-worker/.dockerignore rename to deployment/legacy-migration/maintenance-worker/.dockerignore diff --git a/backend/db-migration-worker/.gitignore b/deployment/legacy-migration/maintenance-worker/.gitignore similarity index 100% rename from backend/db-migration-worker/.gitignore rename to deployment/legacy-migration/maintenance-worker/.gitignore diff --git a/deployment/legacy-migration/maintenance-worker/Dockerfile b/deployment/legacy-migration/maintenance-worker/Dockerfile new file mode 100644 index 000000000..1fafce5e8 --- /dev/null +++ b/deployment/legacy-migration/maintenance-worker/Dockerfile @@ -0,0 +1,11 @@ +FROM humanconnection/neo4j:latest + +ENV NODE_ENV=maintenance +EXPOSE 7687 7474 + +RUN apk upgrade --update +RUN apk add --no-cache mongodb-tools openssh nodejs yarn rsync + +COPY known_hosts /root/.ssh/known_hosts +COPY migration ./migration +COPY ./binaries/* /usr/local/bin/ diff --git a/deployment/legacy-migration/maintenance-worker/binaries/create_private_ssh_key_from_env b/deployment/legacy-migration/maintenance-worker/binaries/create_private_ssh_key_from_env new file mode 100755 index 000000000..f44671978 --- /dev/null +++ b/deployment/legacy-migration/maintenance-worker/binaries/create_private_ssh_key_from_env @@ -0,0 +1,6 @@ +#!/usr/bin/env bash +set -e + +mkdir -p ~/.ssh +echo $SSH_PRIVATE_KEY | base64 -d > ~/.ssh/id_rsa +chmod 600 ~/.ssh/id_rsa diff --git a/backend/db-migration-worker/migrate.sh b/deployment/legacy-migration/maintenance-worker/binaries/import_legacy_db similarity index 100% rename from backend/db-migration-worker/migrate.sh rename to deployment/legacy-migration/maintenance-worker/binaries/import_legacy_db diff --git a/backend/db-migration-worker/sync_uploads.sh b/deployment/legacy-migration/maintenance-worker/binaries/import_legacy_uploads similarity index 81% rename from backend/db-migration-worker/sync_uploads.sh rename to deployment/legacy-migration/maintenance-worker/binaries/import_legacy_uploads index d24936e3b..24ae0fca5 100755 --- a/backend/db-migration-worker/sync_uploads.sh +++ b/deployment/legacy-migration/maintenance-worker/binaries/import_legacy_uploads @@ -9,4 +9,5 @@ do fi done +[ -z "$SSH_PRIVATE_KEY" ] || create_private_ssh_key_from_env rsync --archive --update --verbose ${SSH_USERNAME}@${SSH_HOST}:${UPLOADS_DIRECTORY}/* /uploads/ diff --git a/deployment/legacy-migration/maintenance-worker/docker-compose.yml b/deployment/legacy-migration/maintenance-worker/docker-compose.yml new file mode 100644 index 000000000..a45a5163a --- /dev/null +++ b/deployment/legacy-migration/maintenance-worker/docker-compose.yml @@ -0,0 +1,42 @@ +version: "3.4" + +services: + maintenance: + image: humanconnection/maintenance-worker:latest + build: + context: . + volumes: + - uploads:/uploads + - neo4j-data:/data + - ./migration/:/migration + networks: + - hc-network + environment: + - GRAPHQL_PORT=4000 + - GRAPHQL_URI=http://localhost:4000 + - CLIENT_URI=http://localhost:3000 + - JWT_SECRET=b/&&7b78BF&fv/Vd + - MOCK=false + - MAPBOX_TOKEN=pk.eyJ1IjoiaHVtYW4tY29ubmVjdGlvbiIsImEiOiJjajl0cnBubGoweTVlM3VwZ2lzNTNud3ZtIn0.KZ8KK9l70omjXbEkkbHGsQ + - PRIVATE_KEY_PASSPHRASE=a7dsf78sadg87ad87sfagsadg78 + - NEO4J_URI=bolt://localhost:7687 + - NEO4J_apoc_import_file_enabled=true + - NEO4J_AUTH=none + - "SSH_USERNAME=${SSH_USERNAME}" + - "SSH_HOST=${SSH_HOST}" + - "SSH_PRIVATE_KEY=${SSH_PRIVATE_KEY}" + - "MONGODB_USERNAME=${MONGODB_USERNAME}" + - "MONGODB_PASSWORD=${MONGODB_PASSWORD}" + - "MONGODB_AUTH_DB=${MONGODB_AUTH_DB}" + - "MONGODB_DATABASE=${MONGODB_DATABASE}" + - "UPLOADS_DIRECTORY=${UPLOADS_DIRECTORY}" + ports: + - 7687:7687 + - 7474:7474 + +volumes: + uploads: + neo4j-data: + +networks: + hc-network: diff --git a/backend/db-migration-worker/.ssh/known_hosts b/deployment/legacy-migration/maintenance-worker/known_hosts similarity index 100% rename from backend/db-migration-worker/.ssh/known_hosts rename to deployment/legacy-migration/maintenance-worker/known_hosts diff --git a/backend/db-migration-worker/migration/mongo/import.sh b/deployment/legacy-migration/maintenance-worker/migration/mongo/import.sh similarity index 61% rename from backend/db-migration-worker/migration/mongo/import.sh rename to deployment/legacy-migration/maintenance-worker/migration/mongo/import.sh index 7cf3e91e4..328560bfc 100755 --- a/backend/db-migration-worker/migration/mongo/import.sh +++ b/deployment/legacy-migration/maintenance-worker/migration/mongo/import.sh @@ -9,16 +9,17 @@ echo "MONGODB_DATABASE ${MONGODB_DATABASE}" echo "MONGODB_AUTH_DB ${MONGODB_AUTH_DB}" echo "-------------------------------------------------" -mongo ${MONGODB_DATABASE} --eval "db.dropDatabase();" -rm -rf /mongo-export/* +[ -z "$SSH_PRIVATE_KEY" ] || create_private_ssh_key_from_env + +rm -rf /tmp/mongo-export/* +mkdir -p /tmp/mongo-export ssh -4 -M -S my-ctrl-socket -fnNT -L 27018:localhost:27017 -l ${SSH_USERNAME} ${SSH_HOST} -mongodump --host localhost -d ${MONGODB_DATABASE} --port 27018 --username ${MONGODB_USERNAME} --password ${MONGODB_PASSWORD} --authenticationDatabase ${MONGODB_AUTH_DB} --gzip --archive=/tmp/mongodump.archive -mongorestore --gzip --archive=/tmp/mongodump.archive -ssh -S my-ctrl-socket -O check -l ${SSH_USERNAME} ${SSH_HOST} -ssh -S my-ctrl-socket -O exit -l ${SSH_USERNAME} ${SSH_HOST} for collection in "categories" "badges" "users" "contributions" "comments" "follows" "shouts" do - mongoexport --db ${MONGODB_DATABASE} --collection $collection --out "/mongo-export/$collection.json" + mongoexport --host localhost -d ${MONGODB_DATABASE} --port 27018 --username ${MONGODB_USERNAME} --password ${MONGODB_PASSWORD} --authenticationDatabase ${MONGODB_AUTH_DB} --db ${MONGODB_DATABASE} --collection $collection --out "/tmp/mongo-export/$collection.json" done + +ssh -S my-ctrl-socket -O check -l ${SSH_USERNAME} ${SSH_HOST} +ssh -S my-ctrl-socket -O exit -l ${SSH_USERNAME} ${SSH_HOST} diff --git a/backend/db-migration-worker/migration/neo4j/badges.cql b/deployment/legacy-migration/maintenance-worker/migration/neo4j/badges.cql similarity index 75% rename from backend/db-migration-worker/migration/neo4j/badges.cql rename to deployment/legacy-migration/maintenance-worker/migration/neo4j/badges.cql index 90e4755b4..f4bf67dda 100644 --- a/backend/db-migration-worker/migration/neo4j/badges.cql +++ b/deployment/legacy-migration/maintenance-worker/migration/neo4j/badges.cql @@ -1,4 +1,4 @@ -CALL apoc.load.json('file:/mongo-export/badges.json') YIELD value as badge +CALL apoc.load.json('file:/tmp/mongo-export/badges.json') YIELD value as badge MERGE(b:Badge {id: badge._id["$oid"]}) ON CREATE SET b.key = badge.key, diff --git a/backend/db-migration-worker/migration/neo4j/categories.cql b/deployment/legacy-migration/maintenance-worker/migration/neo4j/categories.cql similarity index 94% rename from backend/db-migration-worker/migration/neo4j/categories.cql rename to deployment/legacy-migration/maintenance-worker/migration/neo4j/categories.cql index a2bf6a352..c22354cbe 100644 --- a/backend/db-migration-worker/migration/neo4j/categories.cql +++ b/deployment/legacy-migration/maintenance-worker/migration/neo4j/categories.cql @@ -1,4 +1,4 @@ -CALL apoc.load.json('file:/mongo-export/categories.json') YIELD value as category +CALL apoc.load.json('file:/tmp/mongo-export/categories.json') YIELD value as category MERGE(c:Category {id: category._id["$oid"]}) ON CREATE SET c.name = category.title, diff --git a/backend/db-migration-worker/migration/neo4j/comments.cql b/deployment/legacy-migration/maintenance-worker/migration/neo4j/comments.cql similarity index 84% rename from backend/db-migration-worker/migration/neo4j/comments.cql rename to deployment/legacy-migration/maintenance-worker/migration/neo4j/comments.cql index 6709acbc8..eb645108a 100644 --- a/backend/db-migration-worker/migration/neo4j/comments.cql +++ b/deployment/legacy-migration/maintenance-worker/migration/neo4j/comments.cql @@ -1,4 +1,4 @@ -CALL apoc.load.json('file:/mongo-export/comments.json') YIELD value as json +CALL apoc.load.json('file:/tmp/mongo-export/comments.json') YIELD value as json MERGE (comment:Comment {id: json._id["$oid"]}) ON CREATE SET comment.content = json.content, diff --git a/backend/db-migration-worker/migration/neo4j/contributions.cql b/deployment/legacy-migration/maintenance-worker/migration/neo4j/contributions.cql similarity index 89% rename from backend/db-migration-worker/migration/neo4j/contributions.cql rename to deployment/legacy-migration/maintenance-worker/migration/neo4j/contributions.cql index 0c7b18959..134c276cf 100644 --- a/backend/db-migration-worker/migration/neo4j/contributions.cql +++ b/deployment/legacy-migration/maintenance-worker/migration/neo4j/contributions.cql @@ -1,4 +1,4 @@ -CALL apoc.load.json('file:/mongo-export/contributions.json') YIELD value as post +CALL apoc.load.json('file:/tmp/mongo-export/contributions.json') YIELD value as post MERGE (p:Post {id: post._id["$oid"]}) ON CREATE SET p.title = post.title, diff --git a/backend/db-migration-worker/migration/neo4j/follows.cql b/deployment/legacy-migration/maintenance-worker/migration/neo4j/follows.cql similarity index 55% rename from backend/db-migration-worker/migration/neo4j/follows.cql rename to deployment/legacy-migration/maintenance-worker/migration/neo4j/follows.cql index 0dad6a435..6f5416723 100644 --- a/backend/db-migration-worker/migration/neo4j/follows.cql +++ b/deployment/legacy-migration/maintenance-worker/migration/neo4j/follows.cql @@ -1,4 +1,4 @@ -CALL apoc.load.json('file:/mongo-export/follows.json') YIELD value as follow +CALL apoc.load.json('file:/tmp/mongo-export/follows.json') YIELD value as follow MATCH (u1:User {id: follow.userId}), (u2:User {id: follow.foreignId}) MERGE (u1)-[:FOLLOWS]->(u2) ; diff --git a/backend/db-migration-worker/migration/neo4j/import.sh b/deployment/legacy-migration/maintenance-worker/migration/neo4j/import.sh similarity index 100% rename from backend/db-migration-worker/migration/neo4j/import.sh rename to deployment/legacy-migration/maintenance-worker/migration/neo4j/import.sh diff --git a/backend/db-migration-worker/migration/neo4j/shouts.cql b/deployment/legacy-migration/maintenance-worker/migration/neo4j/shouts.cql similarity index 54% rename from backend/db-migration-worker/migration/neo4j/shouts.cql rename to deployment/legacy-migration/maintenance-worker/migration/neo4j/shouts.cql index 60aca50c9..cd72ab66b 100644 --- a/backend/db-migration-worker/migration/neo4j/shouts.cql +++ b/deployment/legacy-migration/maintenance-worker/migration/neo4j/shouts.cql @@ -1,4 +1,4 @@ -CALL apoc.load.json('file:/mongo-export/shouts.json') YIELD value as shout +CALL apoc.load.json('file:/tmp/mongo-export/shouts.json') YIELD value as shout MATCH (u:User {id: shout.userId}), (p:Post {id: shout.foreignId}) MERGE (u)-[:SHOUTED]->(p) ; diff --git a/backend/db-migration-worker/migration/neo4j/users.cql b/deployment/legacy-migration/maintenance-worker/migration/neo4j/users.cql similarity index 87% rename from backend/db-migration-worker/migration/neo4j/users.cql rename to deployment/legacy-migration/maintenance-worker/migration/neo4j/users.cql index 5f87bb273..22eb46882 100644 --- a/backend/db-migration-worker/migration/neo4j/users.cql +++ b/deployment/legacy-migration/maintenance-worker/migration/neo4j/users.cql @@ -1,4 +1,4 @@ -CALL apoc.load.json('file:/mongo-export/users.json') YIELD value as user +CALL apoc.load.json('file:/tmp/mongo-export/users.json') YIELD value as user MERGE(u:User {id: user._id["$oid"]}) ON CREATE SET u.name = user.name, diff --git a/deployment/minikube/README.md b/deployment/minikube/README.md new file mode 100644 index 000000000..e77ddd667 --- /dev/null +++ b/deployment/minikube/README.md @@ -0,0 +1,25 @@ +# Minikube + +There are many Kubernetes providers, but if you're just getting started, Minikube is a tool that you can use to get your feet wet. + +After you [installed Minikube](https://kubernetes.io/docs/tasks/tools/install-minikube/) +open your minikube dashboard: + +```text +$ minikube dashboard +``` + +This will give you an overview. Some of the steps below need some timing to make ressources available to other dependent deployments. Keeping an eye on the dashboard is a great way to check that. + +Follow the installation instruction for [Human Connection](../human-connection/README.md). +If all the pods and services have settled and everything looks green in your +minikube dashboard, expose the services you want on your host system. + +For example: + +```text +$ minikube service nitro-web --namespace=human-connection +# optionally +$ minikube service nitro-backend --namespace=human-connection +``` + diff --git a/deployment/secrets.template.yaml b/deployment/secrets.template.yaml index ac56b7aa1..8f18dbf46 100644 --- a/deployment/secrets.template.yaml +++ b/deployment/secrets.template.yaml @@ -4,6 +4,7 @@ data: JWT_SECRET: "Yi8mJjdiNzhCRiZmdi9WZA==" MONGODB_PASSWORD: "TU9OR09EQl9QQVNTV09SRA==" PRIVATE_KEY_PASSPHRASE: "YTdkc2Y3OHNhZGc4N2FkODdzZmFnc2FkZzc4" + MAPBOX_TOKEN: "cGsuZXlKMUlqb2lhSFZ0WVc0dFkyOXVibVZqZEdsdmJpSXNJbUVpT2lKamFqbDBjbkJ1Ykdvd2VUVmxNM1Z3WjJsek5UTnVkM1p0SW4wLktaOEtLOWw3MG9talhiRWtrYkhHc1EK" metadata: name: human-connection namespace: human-connection diff --git a/deployment/volumes/README.md b/deployment/volumes/README.md new file mode 100644 index 000000000..b838794d5 --- /dev/null +++ b/deployment/volumes/README.md @@ -0,0 +1,42 @@ +# Persistent Volumes + +At the moment, the application needs two persistent volumes: + +* The `/data/` folder where `neo4j` stores its database and +* the folder `/nitro-backend/public/uploads` where the backend stores uploads. + +As a matter of precaution, the persistent volume claims that setup these volumes +live in a separate folder. You don't want to accidently loose all your data in +your database by running `kubectl delete -f human-connection/`, do you? + +## Create Persistent Volume Claims + +Run the following: +```sh +# in folder deployments/ +$ kubectl apply -f volumes +persistentvolumeclaim/neo4j-data-claim created +persistentvolumeclaim/uploads-claim created +``` + +## Change Reclaim Policy + +We recommend to change the `ReclaimPolicy`, so if you delete the persistent +volume claims, the associated volumes will be released, not deleted: + +```sh +$ kubectl --namespace=human-connection get pv + +NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE +pvc-bd02a715-66d0-11e9-be52-ba9c337f4551 1Gi RWO Delete Bound human-connection/neo4j-data-claim do-block-storage 4m24s +pvc-bd208086-66d0-11e9-be52-ba9c337f4551 2Gi RWO Delete Bound human-connection/uploads-claim do-block-storage 4m12s +``` + +Get the volume id from above, then change `ReclaimPolicy` with: +```sh +kubectl patch pv -p '{"spec":{"persistentVolumeReclaimPolicy":"Retain"}}' + +# in the above example +kubectl patch pv pvc-bd02a715-66d0-11e9-be52-ba9c337f4551 -p '{"spec":{"persistentVolumeReclaimPolicy":"Retain"}}' +kubectl patch pv pvc-bd208086-66d0-11e9-be52-ba9c337f4551 -p '{"spec":{"persistentVolumeReclaimPolicy":"Retain"}}' +``` diff --git a/deployment/legacy-migration/volume-claim-mongo-export.yaml b/deployment/volumes/neo4j-data.yaml similarity index 86% rename from deployment/legacy-migration/volume-claim-mongo-export.yaml rename to deployment/volumes/neo4j-data.yaml index 106ef4736..f077be933 100644 --- a/deployment/legacy-migration/volume-claim-mongo-export.yaml +++ b/deployment/volumes/neo4j-data.yaml @@ -2,7 +2,7 @@ kind: PersistentVolumeClaim apiVersion: v1 metadata: - name: mongo-export-claim + name: neo4j-data-claim namespace: human-connection spec: accessModes: diff --git a/deployment/volumes/uploads.yaml b/deployment/volumes/uploads.yaml new file mode 100644 index 000000000..11a8027e9 --- /dev/null +++ b/deployment/volumes/uploads.yaml @@ -0,0 +1,12 @@ +--- + kind: PersistentVolumeClaim + apiVersion: v1 + metadata: + name: uploads-claim + namespace: human-connection + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 2Gi diff --git a/docker-compose.yml b/docker-compose.yml index a7e7c0802..896d1bef9 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -38,7 +38,7 @@ services: neo4j: image: humanconnection/neo4j:latest build: - context: backend/neo4j + context: neo4j networks: - hc-network diff --git a/backend/neo4j/Dockerfile b/neo4j/Dockerfile similarity index 100% rename from backend/neo4j/Dockerfile rename to neo4j/Dockerfile diff --git a/backend/neo4j/migrate.sh b/neo4j/migrate.sh similarity index 100% rename from backend/neo4j/migrate.sh rename to neo4j/migrate.sh diff --git a/webapp/package.json b/webapp/package.json index 58adc74a2..80a0ff427 100644 --- a/webapp/package.json +++ b/webapp/package.json @@ -80,7 +80,7 @@ "nodemon": "~1.18.11", "prettier": "~1.14.3", "sass-loader": "~7.1.0", - "tippy.js": "^4.2.1", + "tippy.js": "^4.3.0", "vue-jest": "~3.0.4", "vue-svg-loader": "~0.12.0" } diff --git a/webapp/yarn.lock b/webapp/yarn.lock index 1059cfb53..5b7364d2f 100644 --- a/webapp/yarn.lock +++ b/webapp/yarn.lock @@ -10355,10 +10355,10 @@ timsort@^0.3.0: resolved "https://registry.yarnpkg.com/timsort/-/timsort-0.3.0.tgz#405411a8e7e6339fe64db9a234de11dc31e02bd4" integrity sha1-QFQRqOfmM5/mTbmiNN4R3DHgK9Q= -tippy.js@^4.2.1: - version "4.2.1" - resolved "https://registry.yarnpkg.com/tippy.js/-/tippy.js-4.2.1.tgz#9e4939d976465f77229b05a3cb233b5dc28cf850" - integrity sha512-xEE7zYNgQxCDdPcuT6T04f0frPh0wO7CcIqJKMFazU/NqusyjCgYSkLRosIHoiRkZMRzSPOudC8wRN5GjvAyOQ== +tippy.js@^4.3.0: + version "4.3.0" + resolved "https://registry.yarnpkg.com/tippy.js/-/tippy.js-4.3.0.tgz#5f661fed7fa30c90609eb87f6657005dd041ede3" + integrity sha512-SjctzIfkx3+waue+Ew58MMTuzYD4SK9wJOnCEdrCmwZiKJ7chZSxOguFmBm11tmTlZuGbxncUC/5Qu6GqzD2qQ== dependencies: popper.js "^1.14.7"