diff --git a/.travis.yml b/.travis.yml index 693b93a1d..3cd77a905 100644 --- a/.travis.yml +++ b/.travis.yml @@ -69,14 +69,16 @@ after_failure: before_deploy: - go get -u github.com/tcnksm/ghr - - ./scripts/setup_kubernetes.sh + # stop deployment to kubernetes until we have set it up + # - ./scripts/setup_kubernetes.sh deploy: - provider: script script: bash scripts/docker_push.sh on: branch: master - - provider: script - script: bash scripts/deploy.sh - on: - branch: master + # stop deployment to kubernetes until we have set it up + # - provider: script + # script: bash scripts/deploy.sh + # on: + # branch: master diff --git a/backend/Dockerfile b/backend/Dockerfile index 957bc6ab5..9c32e83d5 100644 --- a/backend/Dockerfile +++ b/backend/Dockerfile @@ -1,11 +1,11 @@ -FROM node:lts-alpine as base +FROM node:12.19.0-alpine3.10 as base LABEL Description="Backend of the Social Network Human-Connection.org" Vendor="Human Connection gGmbH" Version="0.0.1" Maintainer="Human Connection gGmbH (developer@human-connection.org)" EXPOSE 4000 CMD ["yarn", "run", "start"] ARG BUILD_COMMIT ENV BUILD_COMMIT=$BUILD_COMMIT -ARG WORKDIR=/nitro-backend +ARG WORKDIR=/develop-backend RUN mkdir -p $WORKDIR WORKDIR $WORKDIR @@ -22,7 +22,7 @@ RUN NODE_ENV=production yarn run build # reduce image size with a multistage build FROM base as production ENV NODE_ENV=production -COPY --from=build-and-test /nitro-backend/dist ./dist +COPY --from=build-and-test /develop-backend/dist ./dist COPY ./public/img/ ./public/img/ COPY ./public/providers.json ./public/providers.json RUN yarn install --production=true --frozen-lockfile --non-interactive --no-cache diff --git a/deployment/helm/human-connection/templates/deployments/deployment-backend.yaml b/deployment/helm/human-connection/templates/deployments/deployment-backend.yaml index 33aa8a0e0..e3f75a44c 100644 --- a/deployment/helm/human-connection/templates/deployments/deployment-backend.yaml +++ b/deployment/helm/human-connection/templates/deployments/deployment-backend.yaml @@ -44,7 +44,7 @@ spec: terminationMessagePath: /dev/termination-log terminationMessagePolicy: File volumeMounts: - - mountPath: /nitro-backend/public/uploads + - mountPath: /develop-backend/public/uploads name: uploads dnsPolicy: ClusterFirst restartPolicy: Always diff --git a/deployment/helm/human-connection/values.yaml b/deployment/helm/human-connection/values.yaml index f30704f89..792e22084 100644 --- a/deployment/helm/human-connection/values.yaml +++ b/deployment/helm/human-connection/values.yaml @@ -7,13 +7,13 @@ dbInitializion: "yarn prod:migrate init" # dbMigrations runs the database migrations in a post-upgrade hook. dbMigrations: "yarn prod:migrate up" # bakendImage is the docker image for the backend deployment -backendImage: humanconnection/nitro-backend +backendImage: ocelotsocialnetwork/develop-backend # maintenanceImage is the docker image for the maintenance deployment -maintenanceImage: humanconnection/maintenance +maintenanceImage: ocelotsocialnetwork/develop-maintenance # neo4jImage is the docker image for the neo4j deployment -neo4jImage: humanconnection/neo4j +neo4jImage: ocelotsocialnetwork/develop-neo4j # webappImage is the docker image for the webapp deployment -webappImage: humanconnection/nitro-web +webappImage: ocelotsocialnetwork/develop-webapp # image configures pullPolicy related to the docker images image: # pullPolicy indicates when, if ever, pods pull a new image from docker hub. diff --git a/deployment/human-connection/deployment-backend.yaml b/deployment/human-connection/deployment-backend.yaml index 00aab9ffd..733fb2865 100644 --- a/deployment/human-connection/deployment-backend.yaml +++ b/deployment/human-connection/deployment-backend.yaml @@ -36,7 +36,7 @@ spec: name: configmap - secretRef: name: human-connection - image: humanconnection/nitro-backend:latest + image: ocelotsocialnetwork/develop-backend:latest imagePullPolicy: Always name: backend ports: @@ -46,7 +46,7 @@ spec: terminationMessagePath: /dev/termination-log terminationMessagePolicy: File volumeMounts: - - mountPath: /nitro-backend/public/uploads + - mountPath: /develop-backend/public/uploads name: uploads dnsPolicy: ClusterFirst restartPolicy: Always diff --git a/deployment/human-connection/deployment-neo4j.yaml b/deployment/human-connection/deployment-neo4j.yaml index 5ff67b1a6..e296677b1 100644 --- a/deployment/human-connection/deployment-neo4j.yaml +++ b/deployment/human-connection/deployment-neo4j.yaml @@ -31,7 +31,7 @@ spec: - envFrom: - configMapRef: name: configmap - image: humanconnection/neo4j:latest + image: ocelotsocialnetwork/develop-neo4j:latest imagePullPolicy: Always name: neo4j ports: diff --git a/deployment/human-connection/deployment-web.yaml b/deployment/human-connection/deployment-web.yaml index db9c22a1f..242fa33d9 100644 --- a/deployment/human-connection/deployment-web.yaml +++ b/deployment/human-connection/deployment-web.yaml @@ -37,7 +37,7 @@ spec: name: configmap - secretRef: name: human-connection - image: humanconnection/nitro-web:latest + image: ocelotsocialnetwork/develop-webapp:latest imagePullPolicy: Always name: web ports: diff --git a/deployment/human-connection/maintenance/README.md b/deployment/human-connection/maintenance/README.md index 02bcb44e2..eb87535f0 100644 --- a/deployment/human-connection/maintenance/README.md +++ b/deployment/human-connection/maintenance/README.md @@ -10,9 +10,10 @@ bring the database into maintenance mode for manual database migrations. ## Deploy the service We prepared sample configuration, so you can simply run: + ```sh # in folder deployment/ -kubectl apply -f human-connection/maintenance +$ kubectl apply -f ocelotsocialnetwork/develop-maintenance ``` This will fire up a maintenance service. @@ -23,6 +24,7 @@ Now if you want to have a controlled downtime and you want to bring your application into maintenance mode, you can edit your global ingress server. E.g. in file `deployment/digital-ocean/https/ingress.yaml` change the following: + ```yaml ... @@ -31,13 +33,13 @@ E.g. in file `deployment/digital-ocean/https/ingress.yaml` change the following: paths: - path: / backend: - # serviceName: nitro-web - serviceName: maintenance + # serviceName: develop-webapp + serviceName: develop-maintenance # servicePort: 3000 servicePort: 80 ``` -Then run ` kubectl apply -f deployment/digital-ocean/https/ingress.yaml`. If you +Then run `$ kubectl apply -f deployment/digital-ocean/https/ingress.yaml`. If you want to deactivate the maintenance server, just undo the edit and apply the configuration again. diff --git a/deployment/human-connection/maintenance/deployment-maintenance.yaml b/deployment/human-connection/maintenance/deployment-maintenance.yaml index fbbeec639..f84d9bd77 100644 --- a/deployment/human-connection/maintenance/deployment-maintenance.yaml +++ b/deployment/human-connection/maintenance/deployment-maintenance.yaml @@ -19,7 +19,7 @@ spec: env: - name: HOST value: 0.0.0.0 - image: humanconnection/maintenance:latest + image: ocelotsocialnetwork/develop-maintenance:latest ports: - containerPort: 80 imagePullPolicy: Always diff --git a/deployment/human-connection/templates/configmap.template.yaml b/deployment/human-connection/templates/configmap.template.yaml index ae093e4bd..87556b4c7 100644 --- a/deployment/human-connection/templates/configmap.template.yaml +++ b/deployment/human-connection/templates/configmap.template.yaml @@ -4,10 +4,10 @@ data: SMTP_HOST: "mailserver.human-connection" SMTP_PORT: "25" - GRAPHQL_URI: "http://nitro-backend.human-connection:4000" - NEO4J_URI: "bolt://nitro-neo4j.human-connection:7687" + GRAPHQL_URI: "http://backend.human-connection:4000" + NEO4J_URI: "bolt://neo4j.human-connection:7687" NEO4J_AUTH: "none" - CLIENT_URI: "https://nitro-staging.human-connection.org" + CLIENT_URI: "https://staging.human-connection.org" NEO4J_apoc_import_file_enabled: "true" NEO4J_dbms_memory_pagecache_size: "490M" NEO4J_dbms_memory_heap_max__size: "500M" diff --git a/deployment/legacy-migration/README.md b/deployment/legacy-migration/README.md index 7e8b6a205..8dd91287f 100644 --- a/deployment/legacy-migration/README.md +++ b/deployment/legacy-migration/README.md @@ -43,27 +43,27 @@ Then temporarily delete backend and database deployments ```bash $ kubectl --namespace=human-connection get deployments NAME READY UP-TO-DATE AVAILABLE AGE -nitro-backend 1/1 1 1 3d11h -nitro-neo4j 1/1 1 1 3d11h -nitro-web 2/2 2 2 73d -$ kubectl --namespace=human-connection delete deployment nitro-neo4j -deployment.extensions "nitro-neo4j" deleted -$ kubectl --namespace=human-connection delete deployment nitro-backend -deployment.extensions "nitro-backend" deleted +develop-backend 1/1 1 1 3d11h +develop-neo4j 1/1 1 1 3d11h +develop-webapp 2/2 2 2 73d +$ kubectl --namespace=human-connection delete deployment develop-neo4j +deployment.extensions "develop-neo4j" deleted +$ kubectl --namespace=human-connection delete deployment develop-backend +deployment.extensions "develop-backend" deleted ``` -Deploy one-time maintenance-worker pod: +Deploy one-time develop-maintenance-worker pod: ```bash # in deployment/legacy-migration/ $ kubectl apply -f maintenance-worker.yaml -pod/nitro-maintenance-worker created +pod/develop-maintenance-worker created ``` Import legacy database and uploads: ```bash -$ kubectl --namespace=human-connection exec -it nitro-maintenance-worker bash +$ kubectl --namespace=human-connection exec -it develop-maintenance-worker bash $ import_legacy_db $ import_legacy_uploads $ exit @@ -72,7 +72,7 @@ $ exit Delete the pod when you're done: ```bash -$ kubectl --namespace=human-connection delete pod nitro-maintenance-worker +$ kubectl --namespace=human-connection delete pod develop-maintenance-worker ``` Oh, and of course you have to get those deleted deployments back. One way of diff --git a/deployment/legacy-migration/maintenance-worker.yaml b/deployment/legacy-migration/maintenance-worker.yaml index 37c46ab45..8a4d930ea 100644 --- a/deployment/legacy-migration/maintenance-worker.yaml +++ b/deployment/legacy-migration/maintenance-worker.yaml @@ -2,12 +2,12 @@ kind: Pod apiVersion: v1 metadata: - name: nitro-maintenance-worker + name: develop-maintenance-worker namespace: human-connection spec: containers: - - name: nitro-maintenance-worker - image: humanconnection/maintenance-worker:latest + - name: develop-maintenance-worker + image: ocelotsocialnetwork/develop-maintenance-worker:latest imagePullPolicy: Always resources: requests: diff --git a/deployment/legacy-migration/maintenance-worker/Dockerfile b/deployment/legacy-migration/maintenance-worker/Dockerfile index 4502d8d69..760cc06c8 100644 --- a/deployment/legacy-migration/maintenance-worker/Dockerfile +++ b/deployment/legacy-migration/maintenance-worker/Dockerfile @@ -1,4 +1,4 @@ -FROM humanconnection/neo4j:latest +FROM ocelotsocialnetwork/develop-neo4j:latest ENV NODE_ENV=maintenance EXPOSE 7687 7474 diff --git a/deployment/minikube/README.md b/deployment/minikube/README.md index 342675b1b..499fc8290 100644 --- a/deployment/minikube/README.md +++ b/deployment/minikube/README.md @@ -18,8 +18,8 @@ minikube dashboard, expose the services you want on your host system. For example: ```text -$ minikube service nitro-web --namespace=human-connection +$ minikube service develop-webapp --namespace=human-connection # optionally -$ minikube service nitro-backend --namespace=human-connection +$ minikube service develop-backend --namespace=human-connection ``` diff --git a/deployment/volumes/README.md b/deployment/volumes/README.md index 2d08a34cb..10f0fd226 100644 --- a/deployment/volumes/README.md +++ b/deployment/volumes/README.md @@ -3,7 +3,7 @@ At the moment, the application needs two persistent volumes: * The `/data/` folder where `neo4j` stores its database and -* the folder `/nitro-backend/public/uploads` where the backend stores uploads. +* the folder `/develop-backend/public/uploads` where the backend stores uploads. As a matter of precaution, the persistent volume claims that setup these volumes live in a separate folder. You don't want to accidently loose all your data in diff --git a/deployment/volumes/neo4j-offline-backup/README.md b/deployment/volumes/neo4j-offline-backup/README.md index 5d773714b..12e6029e9 100644 --- a/deployment/volumes/neo4j-offline-backup/README.md +++ b/deployment/volumes/neo4j-offline-backup/README.md @@ -29,13 +29,15 @@ database connections left and nobody can access the application. Run the following: ```sh -kubectl --namespace=human-connection edit deployment nitro-neo4j +$ kubectl --namespace=human-connection edit deployment develop-neo4j ``` Add the following to `spec.template.spec.containers`: -``` + +```sh ["tail", "-f", "/dev/null"] ``` + and write the file which will update the deployment. The command `tail -f /dev/null` is the equivalent of *sleep forever*. It is a @@ -51,32 +53,36 @@ file and trigger an update of the deployment. ## Create a Backup in Kubernetes First stop your Neo4J database, see above. Then: + ```sh -kubectl --namespace=human-connection get pods +$ kubectl --namespace=human-connection get pods # Copy the ID of the pod running Neo4J. -kubectl --namespace=human-connection exec -it bash +$ kubectl --namespace=human-connection exec -it bash # Once you're in the pod, dump the db to a file e.g. `/root/neo4j-backup`. -neo4j-admin dump --to=/root/neo4j-backup -exit +> neo4j-admin dump --to=/root/neo4j-backup +> exit # Download the file from the pod to your computer. - kubectl cp human-connection/:/root/neo4j-backup ./neo4j-backup +$ kubectl cp human-connection/:/root/neo4j-backup ./neo4j-backup ``` -Revert your changes to deployment `nitro-neo4j` which will restart the database. + +Revert your changes to deployment `develop-neo4j` which will restart the database. ## Restore a Backup in Kubernetes First stop your Neo4J database. Then: + ```sh -kubectl --namespace=human-connection get pods +$ kubectl --namespace=human-connection get pods # Copy the ID of the pod running Neo4J. # Then upload your local backup to the pod. Note that once the pod gets deleted # e.g. if you change the deployment, the backup file is gone with it. -kubectl cp ./neo4j-backup human-connection/:/root/ -kubectl --namespace=human-connection exec -it bash +$ kubectl cp ./neo4j-backup human-connection/:/root/ +$ kubectl --namespace=human-connection exec -it bash # Once you're in the pod restore the backup and overwrite the default database # called `graph.db` with `--force`. # This will delete all existing data in database `graph.db`! -neo4j-admin load --from=/root/neo4j-backup --force -exit +> neo4j-admin load --from=/root/neo4j-backup --force +> exit ``` -Revert your changes to deployment `nitro-neo4j` which will restart the database. + +Revert your changes to deployment `develop-neo4j` which will restart the database. diff --git a/deployment/volumes/neo4j-online-backup/README.md b/deployment/volumes/neo4j-online-backup/README.md index f096c769f..4eaa511ef 100644 --- a/deployment/volumes/neo4j-online-backup/README.md +++ b/deployment/volumes/neo4j-online-backup/README.md @@ -7,32 +7,33 @@ database in a kubernetes cluster. One of the benefits of doing an online backup is that the Neo4j database does not need to be stopped, so there is no downtime. Read [the docs](https://neo4j.com/docs/operations-manual/current/backup/performing/) -To use Neo4j Enterprise you must add this line to your configmap, if using, or your deployment `nitro-neo4j` env. +To use Neo4j Enterprise you must add this line to your configmap, if using, or your deployment `develop-neo4j` env. -``` +```sh NEO4J_ACCEPT_LICENSE_AGREEMENT: "yes" ``` + ## Create a Backup in Kubernetes ```sh -# Backup the database with one command, this will get the nitro-neo4j pod, ssh into it, and run the backup command -kubectl -n=human-connection exec -it $(kubectl -n=human-connection get pods | grep nitro-neo4j | awk '{ print $1 }') -- neo4j-admin backup --backup-dir=/var/lib/neo4j --name=neo4j-backup +# Backup the database with one command, this will get the develop-neo4j pod, ssh into it, and run the backup command +$ kubectl -n=human-connection exec -it $(kubectl -n=human-connection get pods | grep develop-neo4j | awk '{ print $1 }') -- neo4j-admin backup --backup-dir=/var/lib/neo4j --name=neo4j-backup # Download the file from the pod to your computer. -kubectl cp human-connection/$(kubectl -n=human-connection get pods | grep nitro-neo4j | awk '{ print $1 }'):/var/lib/neo4j/neo4j-backup ./neo4j-backup/ +$ kubectl cp human-connection/$(kubectl -n=human-connection get pods | grep develop-neo4j | awk '{ print $1 }'):/var/lib/neo4j/neo4j-backup ./neo4j-backup/ ``` -You should now have a backup of the database locally. If you want, you can simulate disaster recovery by sshing into the nitro-neo4j pod, deleting all data and restoring from backup +You should now have a backup of the database locally. If you want, you can simulate disaster recovery by sshing into the develop-neo4j pod, deleting all data and restoring from backup ## Disaster where database data is gone somehow ```sh -kubectl -n=human-connection exec -it $(kubectl -n=human-connection get pods | grep nitro-neo4j |awk '{ print $1 }') bash +$ kubectl -n=human-connection exec -it $(kubectl -n=human-connection get pods | grep develop-neo4j |awk '{ print $1 }') bash # Enter cypher-shell -cypher-shell +$ cypher-shell # Delete all data > MATCH (n) DETACH DELETE (n); -exit +> exit ``` ## Restore a backup in Kubernetes @@ -42,16 +43,17 @@ Restoration must be done while the database is not running, see [our docs](https After, you have stopped the database, and have the pod running, you can restore the database by running these commands: ```sh -kubectl --namespace=human-connection get pods +$ kubectl --namespace=human-connection get pods # Copy the ID of the pod running Neo4J. # Then upload your local backup to the pod. Note that once the pod gets deleted # e.g. if you change the deployment, the backup file is gone with it. -kubectl cp ./neo4j-backup/ human-connection/:/root/ -kubectl --namespace=human-connection exec -it bash +$ kubectl cp ./neo4j-backup/ human-connection/:/root/ +$ kubectl --namespace=human-connection exec -it bash # Once you're in the pod restore the backup and overwrite the default database # called `graph.db` with `--force`. # This will delete all existing data in database `graph.db`! -neo4j-admin restore --from=/root/neo4j-backup --force -exit +> neo4j-admin restore --from=/root/neo4j-backup --force +> exit ``` -Revert your changes to deployment `nitro-neo4j` which will restart the database. \ No newline at end of file + +Revert your changes to deployment `develop-neo4j` which will restart the database. \ No newline at end of file diff --git a/deployment/volumes/velero/README.md b/deployment/volumes/velero/README.md index e469ad117..5b8fc9d2e 100644 --- a/deployment/volumes/velero/README.md +++ b/deployment/volumes/velero/README.md @@ -54,19 +54,19 @@ $ velero backup create hc-backup --include-namespaces=human-connection That should backup your persistent volumes, too. When you enter: -``` +```sh $ velero backup describe hc-backup --details ``` You should see the persistent volumes at the end of the log: -``` +```sh .... Restic Backups: Completed: - human-connection/nitro-backend-5b6dd96d6b-q77n6: uploads - human-connection/nitro-neo4j-686d768598-z2vhh: neo4j-data + human-connection/develop-backend-5b6dd96d6b-q77n6: uploads + human-connection/develop-neo4j-686d768598-z2vhh: neo4j-data ``` ## Simulate a Disaster diff --git a/docker-compose.build-and-test.yml b/docker-compose.build-and-test.yml index 27aa9fc6b..dbbb16d9b 100644 --- a/docker-compose.build-and-test.yml +++ b/docker-compose.build-and-test.yml @@ -4,14 +4,14 @@ services: webapp: environment: - "CI=${CI}" - image: humanconnection/nitro-web:build-and-test + image: ocelotsocialnetwork/develop-webapp:build-and-test build: context: webapp target: build-and-test backend: environment: - "CI=${CI}" - image: humanconnection/nitro-backend:build-and-test + image: ocelotsocialnetwork/develop-backend:build-and-test build: context: backend target: build-and-test diff --git a/docker-compose.maintenance.yml b/docker-compose.maintenance.yml index b6a869c57..e6b280826 100644 --- a/docker-compose.maintenance.yml +++ b/docker-compose.maintenance.yml @@ -2,7 +2,7 @@ version: "3.4" services: maintenance-worker: - image: humanconnection/maintenance-worker:latest + image: ocelotsocialnetwork/develop-maintenance-worker:latest build: context: deployment/legacy-migration/maintenance-worker volumes: diff --git a/docker-compose.override.yml b/docker-compose.override.yml index 56f13939e..7ca6debeb 100644 --- a/docker-compose.override.yml +++ b/docker-compose.override.yml @@ -12,7 +12,7 @@ services: context: webapp target: build-and-test volumes: - - ./webapp:/nitro-web + - ./webapp:/develop-webapp environment: - NUXT_BUILD=/tmp/nuxt # avoid file permission issues when `rm -rf .nuxt/` - PUBLIC_REGISTRATION=false @@ -22,7 +22,7 @@ services: context: backend target: build-and-test volumes: - - ./backend:/nitro-backend + - ./backend:/develop-backend command: yarn run dev environment: - SMTP_HOST=mailserver @@ -31,7 +31,7 @@ services: - "DEBUG=${DEBUG}" - PUBLIC_REGISTRATION=false maintenance: - image: humanconnection/maintenance:latest + image: ocelotsocialnetwork/develop-maintenance:latest build: context: webapp dockerfile: Dockerfile.maintenance diff --git a/docker-compose.yml b/docker-compose.yml index 7b0c00163..7ab71aa83 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -2,7 +2,7 @@ version: "3.4" services: webapp: - image: humanconnection/nitro-web:latest + image: ocelotsocialnetwork/develop-webapp:latest build: context: webapp target: production @@ -16,13 +16,13 @@ services: depends_on: - backend volumes: - - webapp_node_modules:/nitro-web/node_modules + - webapp_node_modules:/develop-webapp/node_modules environment: - HOST=0.0.0.0 - GRAPHQL_URI=http://backend:4000 - MAPBOX_TOKEN="pk.eyJ1IjoiaHVtYW4tY29ubmVjdGlvbiIsImEiOiJjajl0cnBubGoweTVlM3VwZ2lzNTNud3ZtIn0.bZ8KK9l70omjXbEkkbHGsQ" backend: - image: humanconnection/nitro-backend:latest + image: ocelotsocialnetwork/develop-backend:latest build: context: backend target: production @@ -35,8 +35,8 @@ services: ports: - 4000:4000 volumes: - - backend_node_modules:/nitro-backend/node_modules - - uploads:/nitro-backend/public/uploads + - backend_node_modules:/develop-backend/node_modules + - uploads:/develop-backend/public/uploads environment: - NEO4J_URI=bolt://neo4j:7687 - GRAPHQL_URI=http://backend:4000 @@ -46,7 +46,7 @@ services: - PRIVATE_KEY_PASSPHRASE=a7dsf78sadg87ad87sfagsadg78 - "DEBUG=${DEBUG}" neo4j: - image: humanconnection/neo4j:latest + image: ocelotsocialnetwork/develop-neo4j:latest build: context: neo4j args: diff --git a/scripts/docker_push.sh b/scripts/docker_push.sh index 3c746af92..b1ae8fbc0 100755 --- a/scripts/docker_push.sh +++ b/scripts/docker_push.sh @@ -4,26 +4,26 @@ ROOT_DIR=$(dirname "$0")/.. VERSION=$(jq -r '.version' $ROOT_DIR/package.json) IFS='.' read -r major minor patch <<< $VERSION -apps=(nitro-web nitro-backend neo4j maintenance) +apps=(develop-webapp develop-backend develop-neo4j develop-maintenance) tags=($major $major.$minor $major.$minor.$patch) # These three docker images have already been built by now: -# docker build --build-arg BUILD_COMMIT=$BUILD_COMMIT --target production -t humanconnection/nitro-backend:latest $ROOT_DIR/backend -# docker build --build-arg BUILD_COMMIT=$BUILD_COMMIT --target production -t humanconnection/nitro-web:latest $ROOT_DIR/webapp -# docker build --build-arg BUILD_COMMIT=$BUILD_COMMIT -t humanconnection/neo4j:latest $ROOT_DIR/neo4j -docker build -t humanconnection/maintenance:latest $ROOT_DIR/webapp/ -f $ROOT_DIR/webapp/Dockerfile.maintenance +# docker build --build-arg BUILD_COMMIT=$BUILD_COMMIT --target production -t ocelotsocialnetwork/develop-backend:latest $ROOT_DIR/backend +# docker build --build-arg BUILD_COMMIT=$BUILD_COMMIT --target production -t ocelotsocialnetwork/develop-webapp:latest $ROOT_DIR/webapp +# docker build --build-arg BUILD_COMMIT=$BUILD_COMMIT -t ocelotsocialnetwork/develop-neo4j:latest $ROOT_DIR/neo4j +docker build -t ocelotsocialnetwork/develop-maintenance:latest $ROOT_DIR/webapp/ -f $ROOT_DIR/webapp/Dockerfile.maintenance echo "$DOCKER_PASSWORD" | docker login -u "$DOCKER_USERNAME" --password-stdin for app in "${apps[@]}" do - SOURCE="humanconnection/${app}:latest" + SOURCE="ocelotsocialnetwork/${app}:latest" echo "docker push $SOURCE" docker push $SOURCE for tag in "${tags[@]}" do - TARGET="humanconnection/${app}:${tag}" + TARGET="ocelotsocialnetwork/${app}:${tag}" if DOCKER_CLI_EXPERIMENTAL=enabled docker manifest inspect $TARGET >/dev/null; then echo "docker image ${TARGET} already present, skipping ..." else diff --git a/webapp/Dockerfile b/webapp/Dockerfile index c5025949b..fb0489476 100644 --- a/webapp/Dockerfile +++ b/webapp/Dockerfile @@ -1,4 +1,4 @@ -FROM node:lts-alpine as base +FROM node:12.19.0-alpine3.10 as base LABEL Description="Web Frontend of the Social Network Human-Connection.org" Vendor="Human-Connection gGmbH" Version="0.0.1" Maintainer="Human-Connection gGmbH (developer@human-connection.org)" EXPOSE 3000 @@ -7,7 +7,7 @@ CMD ["yarn", "run", "start"] # Expose the app port ARG BUILD_COMMIT ENV BUILD_COMMIT=$BUILD_COMMIT -ARG WORKDIR=/nitro-web +ARG WORKDIR=/develop-webapp RUN mkdir -p $WORKDIR WORKDIR $WORKDIR @@ -25,7 +25,7 @@ RUN NODE_ENV=production yarn run build FROM base as production RUN yarn install --production=true --frozen-lockfile --non-interactive --no-cache -COPY --from=build-and-test ./nitro-web/.nuxt ./.nuxt -COPY --from=build-and-test ./nitro-web/static ./static +COPY --from=build-and-test ./develop-webapp/.nuxt ./.nuxt +COPY --from=build-and-test ./develop-webapp/static ./static COPY nuxt.config.js . COPY locales locales diff --git a/webapp/Dockerfile.maintenance b/webapp/Dockerfile.maintenance index ca4ba37bc..dfa392b24 100644 --- a/webapp/Dockerfile.maintenance +++ b/webapp/Dockerfile.maintenance @@ -1,4 +1,4 @@ -FROM node:lts-alpine as build +FROM node:12.19.0-alpine3.10 as build LABEL Description="Maintenance page of the Social Network Human-Connection.org" Vendor="Human-Connection gGmbH" Version="0.0.1" Maintainer="Human-Connection gGmbH (developer@human-connection.org)" EXPOSE 3000 @@ -7,7 +7,7 @@ CMD ["yarn", "run", "start"] # Expose the app port ARG BUILD_COMMIT ENV BUILD_COMMIT=$BUILD_COMMIT -ARG WORKDIR=/nitro-web +ARG WORKDIR=/develop-webapp RUN mkdir -p $WORKDIR WORKDIR $WORKDIR @@ -35,6 +35,6 @@ RUN yarn run generate FROM nginx:alpine -COPY --from=build ./nitro-web/dist/ /usr/share/nginx/html/ +COPY --from=build ./develop-webapp/dist/ /usr/share/nginx/html/ RUN rm /etc/nginx/conf.d/default.conf COPY maintenance/nginx/custom.conf /etc/nginx/conf.d/