Merge pull request #3930 from Ocelot-Social-Community/3921-set-up-travis-builds

chore:  🍰 Set Up Travis Builds
This commit is contained in:
Alexander Friedland 2020-11-04 08:20:32 +01:00 committed by GitHub
commit 612b9fdc40
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
25 changed files with 114 additions and 102 deletions

View File

@ -69,14 +69,16 @@ after_failure:
before_deploy:
- go get -u github.com/tcnksm/ghr
- ./scripts/setup_kubernetes.sh
# stop deployment to kubernetes until we have set it up
# - ./scripts/setup_kubernetes.sh
deploy:
- provider: script
script: bash scripts/docker_push.sh
on:
branch: master
- provider: script
script: bash scripts/deploy.sh
on:
branch: master
# stop deployment to kubernetes until we have set it up
# - provider: script
# script: bash scripts/deploy.sh
# on:
# branch: master

View File

@ -1,11 +1,11 @@
FROM node:lts-alpine as base
FROM node:12.19.0-alpine3.10 as base
LABEL Description="Backend of the Social Network Human-Connection.org" Vendor="Human Connection gGmbH" Version="0.0.1" Maintainer="Human Connection gGmbH (developer@human-connection.org)"
EXPOSE 4000
CMD ["yarn", "run", "start"]
ARG BUILD_COMMIT
ENV BUILD_COMMIT=$BUILD_COMMIT
ARG WORKDIR=/nitro-backend
ARG WORKDIR=/develop-backend
RUN mkdir -p $WORKDIR
WORKDIR $WORKDIR
@ -22,7 +22,7 @@ RUN NODE_ENV=production yarn run build
# reduce image size with a multistage build
FROM base as production
ENV NODE_ENV=production
COPY --from=build-and-test /nitro-backend/dist ./dist
COPY --from=build-and-test /develop-backend/dist ./dist
COPY ./public/img/ ./public/img/
COPY ./public/providers.json ./public/providers.json
RUN yarn install --production=true --frozen-lockfile --non-interactive --no-cache

View File

@ -44,7 +44,7 @@ spec:
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /nitro-backend/public/uploads
- mountPath: /develop-backend/public/uploads
name: uploads
dnsPolicy: ClusterFirst
restartPolicy: Always

View File

@ -7,13 +7,13 @@ dbInitializion: "yarn prod:migrate init"
# dbMigrations runs the database migrations in a post-upgrade hook.
dbMigrations: "yarn prod:migrate up"
# bakendImage is the docker image for the backend deployment
backendImage: humanconnection/nitro-backend
backendImage: ocelotsocialnetwork/develop-backend
# maintenanceImage is the docker image for the maintenance deployment
maintenanceImage: humanconnection/maintenance
maintenanceImage: ocelotsocialnetwork/develop-maintenance
# neo4jImage is the docker image for the neo4j deployment
neo4jImage: humanconnection/neo4j
neo4jImage: ocelotsocialnetwork/develop-neo4j
# webappImage is the docker image for the webapp deployment
webappImage: humanconnection/nitro-web
webappImage: ocelotsocialnetwork/develop-webapp
# image configures pullPolicy related to the docker images
image:
# pullPolicy indicates when, if ever, pods pull a new image from docker hub.

View File

@ -36,7 +36,7 @@ spec:
name: configmap
- secretRef:
name: human-connection
image: humanconnection/nitro-backend:latest
image: ocelotsocialnetwork/develop-backend:latest
imagePullPolicy: Always
name: backend
ports:
@ -46,7 +46,7 @@ spec:
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /nitro-backend/public/uploads
- mountPath: /develop-backend/public/uploads
name: uploads
dnsPolicy: ClusterFirst
restartPolicy: Always

View File

@ -31,7 +31,7 @@ spec:
- envFrom:
- configMapRef:
name: configmap
image: humanconnection/neo4j:latest
image: ocelotsocialnetwork/develop-neo4j:latest
imagePullPolicy: Always
name: neo4j
ports:

View File

@ -37,7 +37,7 @@ spec:
name: configmap
- secretRef:
name: human-connection
image: humanconnection/nitro-web:latest
image: ocelotsocialnetwork/develop-webapp:latest
imagePullPolicy: Always
name: web
ports:

View File

@ -10,9 +10,10 @@ bring the database into maintenance mode for manual database migrations.
## Deploy the service
We prepared sample configuration, so you can simply run:
```sh
# in folder deployment/
kubectl apply -f human-connection/maintenance
$ kubectl apply -f ocelotsocialnetwork/develop-maintenance
```
This will fire up a maintenance service.
@ -23,6 +24,7 @@ Now if you want to have a controlled downtime and you want to bring your
application into maintenance mode, you can edit your global ingress server.
E.g. in file `deployment/digital-ocean/https/ingress.yaml` change the following:
```yaml
...
@ -31,13 +33,13 @@ E.g. in file `deployment/digital-ocean/https/ingress.yaml` change the following:
paths:
- path: /
backend:
# serviceName: nitro-web
serviceName: maintenance
# serviceName: develop-webapp
serviceName: develop-maintenance
# servicePort: 3000
servicePort: 80
```
Then run ` kubectl apply -f deployment/digital-ocean/https/ingress.yaml`. If you
Then run `$ kubectl apply -f deployment/digital-ocean/https/ingress.yaml`. If you
want to deactivate the maintenance server, just undo the edit and apply the
configuration again.

View File

@ -19,7 +19,7 @@ spec:
env:
- name: HOST
value: 0.0.0.0
image: humanconnection/maintenance:latest
image: ocelotsocialnetwork/develop-maintenance:latest
ports:
- containerPort: 80
imagePullPolicy: Always

View File

@ -4,10 +4,10 @@
data:
SMTP_HOST: "mailserver.human-connection"
SMTP_PORT: "25"
GRAPHQL_URI: "http://nitro-backend.human-connection:4000"
NEO4J_URI: "bolt://nitro-neo4j.human-connection:7687"
GRAPHQL_URI: "http://backend.human-connection:4000"
NEO4J_URI: "bolt://neo4j.human-connection:7687"
NEO4J_AUTH: "none"
CLIENT_URI: "https://nitro-staging.human-connection.org"
CLIENT_URI: "https://staging.human-connection.org"
NEO4J_apoc_import_file_enabled: "true"
NEO4J_dbms_memory_pagecache_size: "490M"
NEO4J_dbms_memory_heap_max__size: "500M"

View File

@ -43,27 +43,27 @@ Then temporarily delete backend and database deployments
```bash
$ kubectl --namespace=human-connection get deployments
NAME READY UP-TO-DATE AVAILABLE AGE
nitro-backend 1/1 1 1 3d11h
nitro-neo4j 1/1 1 1 3d11h
nitro-web 2/2 2 2 73d
$ kubectl --namespace=human-connection delete deployment nitro-neo4j
deployment.extensions "nitro-neo4j" deleted
$ kubectl --namespace=human-connection delete deployment nitro-backend
deployment.extensions "nitro-backend" deleted
develop-backend 1/1 1 1 3d11h
develop-neo4j 1/1 1 1 3d11h
develop-webapp 2/2 2 2 73d
$ kubectl --namespace=human-connection delete deployment develop-neo4j
deployment.extensions "develop-neo4j" deleted
$ kubectl --namespace=human-connection delete deployment develop-backend
deployment.extensions "develop-backend" deleted
```
Deploy one-time maintenance-worker pod:
Deploy one-time develop-maintenance-worker pod:
```bash
# in deployment/legacy-migration/
$ kubectl apply -f maintenance-worker.yaml
pod/nitro-maintenance-worker created
pod/develop-maintenance-worker created
```
Import legacy database and uploads:
```bash
$ kubectl --namespace=human-connection exec -it nitro-maintenance-worker bash
$ kubectl --namespace=human-connection exec -it develop-maintenance-worker bash
$ import_legacy_db
$ import_legacy_uploads
$ exit
@ -72,7 +72,7 @@ $ exit
Delete the pod when you're done:
```bash
$ kubectl --namespace=human-connection delete pod nitro-maintenance-worker
$ kubectl --namespace=human-connection delete pod develop-maintenance-worker
```
Oh, and of course you have to get those deleted deployments back. One way of

View File

@ -2,12 +2,12 @@
kind: Pod
apiVersion: v1
metadata:
name: nitro-maintenance-worker
name: develop-maintenance-worker
namespace: human-connection
spec:
containers:
- name: nitro-maintenance-worker
image: humanconnection/maintenance-worker:latest
- name: develop-maintenance-worker
image: ocelotsocialnetwork/develop-maintenance-worker:latest
imagePullPolicy: Always
resources:
requests:

View File

@ -1,4 +1,4 @@
FROM humanconnection/neo4j:latest
FROM ocelotsocialnetwork/develop-neo4j:latest
ENV NODE_ENV=maintenance
EXPOSE 7687 7474

View File

@ -18,8 +18,8 @@ minikube dashboard, expose the services you want on your host system.
For example:
```text
$ minikube service nitro-web --namespace=human-connection
$ minikube service develop-webapp --namespace=human-connection
# optionally
$ minikube service nitro-backend --namespace=human-connection
$ minikube service develop-backend --namespace=human-connection
```

View File

@ -3,7 +3,7 @@
At the moment, the application needs two persistent volumes:
* The `/data/` folder where `neo4j` stores its database and
* the folder `/nitro-backend/public/uploads` where the backend stores uploads.
* the folder `/develop-backend/public/uploads` where the backend stores uploads.
As a matter of precaution, the persistent volume claims that setup these volumes
live in a separate folder. You don't want to accidently loose all your data in

View File

@ -29,13 +29,15 @@ database connections left and nobody can access the application.
Run the following:
```sh
kubectl --namespace=human-connection edit deployment nitro-neo4j
$ kubectl --namespace=human-connection edit deployment develop-neo4j
```
Add the following to `spec.template.spec.containers`:
```
```sh
["tail", "-f", "/dev/null"]
```
and write the file which will update the deployment.
The command `tail -f /dev/null` is the equivalent of *sleep forever*. It is a
@ -51,32 +53,36 @@ file and trigger an update of the deployment.
## Create a Backup in Kubernetes
First stop your Neo4J database, see above. Then:
```sh
kubectl --namespace=human-connection get pods
$ kubectl --namespace=human-connection get pods
# Copy the ID of the pod running Neo4J.
kubectl --namespace=human-connection exec -it <POD-ID> bash
$ kubectl --namespace=human-connection exec -it <POD-ID> bash
# Once you're in the pod, dump the db to a file e.g. `/root/neo4j-backup`.
neo4j-admin dump --to=/root/neo4j-backup
exit
> neo4j-admin dump --to=/root/neo4j-backup
> exit
# Download the file from the pod to your computer.
kubectl cp human-connection/<POD-ID>:/root/neo4j-backup ./neo4j-backup
$ kubectl cp human-connection/<POD-ID>:/root/neo4j-backup ./neo4j-backup
```
Revert your changes to deployment `nitro-neo4j` which will restart the database.
Revert your changes to deployment `develop-neo4j` which will restart the database.
## Restore a Backup in Kubernetes
First stop your Neo4J database. Then:
```sh
kubectl --namespace=human-connection get pods
$ kubectl --namespace=human-connection get pods
# Copy the ID of the pod running Neo4J.
# Then upload your local backup to the pod. Note that once the pod gets deleted
# e.g. if you change the deployment, the backup file is gone with it.
kubectl cp ./neo4j-backup human-connection/<POD-ID>:/root/
kubectl --namespace=human-connection exec -it <POD-ID> bash
$ kubectl cp ./neo4j-backup human-connection/<POD-ID>:/root/
$ kubectl --namespace=human-connection exec -it <POD-ID> bash
# Once you're in the pod restore the backup and overwrite the default database
# called `graph.db` with `--force`.
# This will delete all existing data in database `graph.db`!
neo4j-admin load --from=/root/neo4j-backup --force
exit
> neo4j-admin load --from=/root/neo4j-backup --force
> exit
```
Revert your changes to deployment `nitro-neo4j` which will restart the database.
Revert your changes to deployment `develop-neo4j` which will restart the database.

View File

@ -7,32 +7,33 @@ database in a kubernetes cluster.
One of the benefits of doing an online backup is that the Neo4j database does not need to be stopped, so there is no downtime. Read [the docs](https://neo4j.com/docs/operations-manual/current/backup/performing/)
To use Neo4j Enterprise you must add this line to your configmap, if using, or your deployment `nitro-neo4j` env.
To use Neo4j Enterprise you must add this line to your configmap, if using, or your deployment `develop-neo4j` env.
```
```sh
NEO4J_ACCEPT_LICENSE_AGREEMENT: "yes"
```
## Create a Backup in Kubernetes
```sh
# Backup the database with one command, this will get the nitro-neo4j pod, ssh into it, and run the backup command
kubectl -n=human-connection exec -it $(kubectl -n=human-connection get pods | grep nitro-neo4j | awk '{ print $1 }') -- neo4j-admin backup --backup-dir=/var/lib/neo4j --name=neo4j-backup
# Backup the database with one command, this will get the develop-neo4j pod, ssh into it, and run the backup command
$ kubectl -n=human-connection exec -it $(kubectl -n=human-connection get pods | grep develop-neo4j | awk '{ print $1 }') -- neo4j-admin backup --backup-dir=/var/lib/neo4j --name=neo4j-backup
# Download the file from the pod to your computer.
kubectl cp human-connection/$(kubectl -n=human-connection get pods | grep nitro-neo4j | awk '{ print $1 }'):/var/lib/neo4j/neo4j-backup ./neo4j-backup/
$ kubectl cp human-connection/$(kubectl -n=human-connection get pods | grep develop-neo4j | awk '{ print $1 }'):/var/lib/neo4j/neo4j-backup ./neo4j-backup/
```
You should now have a backup of the database locally. If you want, you can simulate disaster recovery by sshing into the nitro-neo4j pod, deleting all data and restoring from backup
You should now have a backup of the database locally. If you want, you can simulate disaster recovery by sshing into the develop-neo4j pod, deleting all data and restoring from backup
## Disaster where database data is gone somehow
```sh
kubectl -n=human-connection exec -it $(kubectl -n=human-connection get pods | grep nitro-neo4j |awk '{ print $1 }') bash
$ kubectl -n=human-connection exec -it $(kubectl -n=human-connection get pods | grep develop-neo4j |awk '{ print $1 }') bash
# Enter cypher-shell
cypher-shell
$ cypher-shell
# Delete all data
> MATCH (n) DETACH DELETE (n);
exit
> exit
```
## Restore a backup in Kubernetes
@ -42,16 +43,17 @@ Restoration must be done while the database is not running, see [our docs](https
After, you have stopped the database, and have the pod running, you can restore the database by running these commands:
```sh
kubectl --namespace=human-connection get pods
$ kubectl --namespace=human-connection get pods
# Copy the ID of the pod running Neo4J.
# Then upload your local backup to the pod. Note that once the pod gets deleted
# e.g. if you change the deployment, the backup file is gone with it.
kubectl cp ./neo4j-backup/ human-connection/<POD-ID>:/root/
kubectl --namespace=human-connection exec -it <POD-ID> bash
$ kubectl cp ./neo4j-backup/ human-connection/<POD-ID>:/root/
$ kubectl --namespace=human-connection exec -it <POD-ID> bash
# Once you're in the pod restore the backup and overwrite the default database
# called `graph.db` with `--force`.
# This will delete all existing data in database `graph.db`!
neo4j-admin restore --from=/root/neo4j-backup --force
exit
> neo4j-admin restore --from=/root/neo4j-backup --force
> exit
```
Revert your changes to deployment `nitro-neo4j` which will restart the database.
Revert your changes to deployment `develop-neo4j` which will restart the database.

View File

@ -54,19 +54,19 @@ $ velero backup create hc-backup --include-namespaces=human-connection
That should backup your persistent volumes, too. When you enter:
```
```sh
$ velero backup describe hc-backup --details
```
You should see the persistent volumes at the end of the log:
```
```sh
....
Restic Backups:
Completed:
human-connection/nitro-backend-5b6dd96d6b-q77n6: uploads
human-connection/nitro-neo4j-686d768598-z2vhh: neo4j-data
human-connection/develop-backend-5b6dd96d6b-q77n6: uploads
human-connection/develop-neo4j-686d768598-z2vhh: neo4j-data
```
## Simulate a Disaster

View File

@ -4,14 +4,14 @@ services:
webapp:
environment:
- "CI=${CI}"
image: humanconnection/nitro-web:build-and-test
image: ocelotsocialnetwork/develop-webapp:build-and-test
build:
context: webapp
target: build-and-test
backend:
environment:
- "CI=${CI}"
image: humanconnection/nitro-backend:build-and-test
image: ocelotsocialnetwork/develop-backend:build-and-test
build:
context: backend
target: build-and-test

View File

@ -2,7 +2,7 @@ version: "3.4"
services:
maintenance-worker:
image: humanconnection/maintenance-worker:latest
image: ocelotsocialnetwork/develop-maintenance-worker:latest
build:
context: deployment/legacy-migration/maintenance-worker
volumes:

View File

@ -12,7 +12,7 @@ services:
context: webapp
target: build-and-test
volumes:
- ./webapp:/nitro-web
- ./webapp:/develop-webapp
environment:
- NUXT_BUILD=/tmp/nuxt # avoid file permission issues when `rm -rf .nuxt/`
- PUBLIC_REGISTRATION=false
@ -22,7 +22,7 @@ services:
context: backend
target: build-and-test
volumes:
- ./backend:/nitro-backend
- ./backend:/develop-backend
command: yarn run dev
environment:
- SMTP_HOST=mailserver
@ -31,7 +31,7 @@ services:
- "DEBUG=${DEBUG}"
- PUBLIC_REGISTRATION=false
maintenance:
image: humanconnection/maintenance:latest
image: ocelotsocialnetwork/develop-maintenance:latest
build:
context: webapp
dockerfile: Dockerfile.maintenance

View File

@ -2,7 +2,7 @@ version: "3.4"
services:
webapp:
image: humanconnection/nitro-web:latest
image: ocelotsocialnetwork/develop-webapp:latest
build:
context: webapp
target: production
@ -16,13 +16,13 @@ services:
depends_on:
- backend
volumes:
- webapp_node_modules:/nitro-web/node_modules
- webapp_node_modules:/develop-webapp/node_modules
environment:
- HOST=0.0.0.0
- GRAPHQL_URI=http://backend:4000
- MAPBOX_TOKEN="pk.eyJ1IjoiaHVtYW4tY29ubmVjdGlvbiIsImEiOiJjajl0cnBubGoweTVlM3VwZ2lzNTNud3ZtIn0.bZ8KK9l70omjXbEkkbHGsQ"
backend:
image: humanconnection/nitro-backend:latest
image: ocelotsocialnetwork/develop-backend:latest
build:
context: backend
target: production
@ -35,8 +35,8 @@ services:
ports:
- 4000:4000
volumes:
- backend_node_modules:/nitro-backend/node_modules
- uploads:/nitro-backend/public/uploads
- backend_node_modules:/develop-backend/node_modules
- uploads:/develop-backend/public/uploads
environment:
- NEO4J_URI=bolt://neo4j:7687
- GRAPHQL_URI=http://backend:4000
@ -46,7 +46,7 @@ services:
- PRIVATE_KEY_PASSPHRASE=a7dsf78sadg87ad87sfagsadg78
- "DEBUG=${DEBUG}"
neo4j:
image: humanconnection/neo4j:latest
image: ocelotsocialnetwork/develop-neo4j:latest
build:
context: neo4j
args:

View File

@ -4,26 +4,26 @@ ROOT_DIR=$(dirname "$0")/..
VERSION=$(jq -r '.version' $ROOT_DIR/package.json)
IFS='.' read -r major minor patch <<< $VERSION
apps=(nitro-web nitro-backend neo4j maintenance)
apps=(develop-webapp develop-backend develop-neo4j develop-maintenance)
tags=($major $major.$minor $major.$minor.$patch)
# These three docker images have already been built by now:
# docker build --build-arg BUILD_COMMIT=$BUILD_COMMIT --target production -t humanconnection/nitro-backend:latest $ROOT_DIR/backend
# docker build --build-arg BUILD_COMMIT=$BUILD_COMMIT --target production -t humanconnection/nitro-web:latest $ROOT_DIR/webapp
# docker build --build-arg BUILD_COMMIT=$BUILD_COMMIT -t humanconnection/neo4j:latest $ROOT_DIR/neo4j
docker build -t humanconnection/maintenance:latest $ROOT_DIR/webapp/ -f $ROOT_DIR/webapp/Dockerfile.maintenance
# docker build --build-arg BUILD_COMMIT=$BUILD_COMMIT --target production -t ocelotsocialnetwork/develop-backend:latest $ROOT_DIR/backend
# docker build --build-arg BUILD_COMMIT=$BUILD_COMMIT --target production -t ocelotsocialnetwork/develop-webapp:latest $ROOT_DIR/webapp
# docker build --build-arg BUILD_COMMIT=$BUILD_COMMIT -t ocelotsocialnetwork/develop-neo4j:latest $ROOT_DIR/neo4j
docker build -t ocelotsocialnetwork/develop-maintenance:latest $ROOT_DIR/webapp/ -f $ROOT_DIR/webapp/Dockerfile.maintenance
echo "$DOCKER_PASSWORD" | docker login -u "$DOCKER_USERNAME" --password-stdin
for app in "${apps[@]}"
do
SOURCE="humanconnection/${app}:latest"
SOURCE="ocelotsocialnetwork/${app}:latest"
echo "docker push $SOURCE"
docker push $SOURCE
for tag in "${tags[@]}"
do
TARGET="humanconnection/${app}:${tag}"
TARGET="ocelotsocialnetwork/${app}:${tag}"
if DOCKER_CLI_EXPERIMENTAL=enabled docker manifest inspect $TARGET >/dev/null; then
echo "docker image ${TARGET} already present, skipping ..."
else

View File

@ -1,4 +1,4 @@
FROM node:lts-alpine as base
FROM node:12.19.0-alpine3.10 as base
LABEL Description="Web Frontend of the Social Network Human-Connection.org" Vendor="Human-Connection gGmbH" Version="0.0.1" Maintainer="Human-Connection gGmbH (developer@human-connection.org)"
EXPOSE 3000
@ -7,7 +7,7 @@ CMD ["yarn", "run", "start"]
# Expose the app port
ARG BUILD_COMMIT
ENV BUILD_COMMIT=$BUILD_COMMIT
ARG WORKDIR=/nitro-web
ARG WORKDIR=/develop-webapp
RUN mkdir -p $WORKDIR
WORKDIR $WORKDIR
@ -25,7 +25,7 @@ RUN NODE_ENV=production yarn run build
FROM base as production
RUN yarn install --production=true --frozen-lockfile --non-interactive --no-cache
COPY --from=build-and-test ./nitro-web/.nuxt ./.nuxt
COPY --from=build-and-test ./nitro-web/static ./static
COPY --from=build-and-test ./develop-webapp/.nuxt ./.nuxt
COPY --from=build-and-test ./develop-webapp/static ./static
COPY nuxt.config.js .
COPY locales locales

View File

@ -1,4 +1,4 @@
FROM node:lts-alpine as build
FROM node:12.19.0-alpine3.10 as build
LABEL Description="Maintenance page of the Social Network Human-Connection.org" Vendor="Human-Connection gGmbH" Version="0.0.1" Maintainer="Human-Connection gGmbH (developer@human-connection.org)"
EXPOSE 3000
@ -7,7 +7,7 @@ CMD ["yarn", "run", "start"]
# Expose the app port
ARG BUILD_COMMIT
ENV BUILD_COMMIT=$BUILD_COMMIT
ARG WORKDIR=/nitro-web
ARG WORKDIR=/develop-webapp
RUN mkdir -p $WORKDIR
WORKDIR $WORKDIR
@ -35,6 +35,6 @@ RUN yarn run generate
FROM nginx:alpine
COPY --from=build ./nitro-web/dist/ /usr/share/nginx/html/
COPY --from=build ./develop-webapp/dist/ /usr/share/nginx/html/
RUN rm /etc/nginx/conf.d/default.conf
COPY maintenance/nginx/custom.conf /etc/nginx/conf.d/