Rename dockerhub organizations

..plus get rid of obsolete prefix `nitro-`.
This commit is contained in:
roschaefer 2020-06-04 13:03:49 +02:00
parent c9dc7d9b5e
commit 23ae569509
25 changed files with 70 additions and 70 deletions

View File

@ -5,7 +5,7 @@ EXPOSE 4000
CMD ["yarn", "run", "start"] CMD ["yarn", "run", "start"]
ARG BUILD_COMMIT ARG BUILD_COMMIT
ENV BUILD_COMMIT=$BUILD_COMMIT ENV BUILD_COMMIT=$BUILD_COMMIT
ARG WORKDIR=/nitro-backend ARG WORKDIR=/backend
RUN mkdir -p $WORKDIR RUN mkdir -p $WORKDIR
WORKDIR $WORKDIR WORKDIR $WORKDIR
@ -22,7 +22,7 @@ RUN NODE_ENV=production yarn run build
# reduce image size with a multistage build # reduce image size with a multistage build
FROM base as production FROM base as production
ENV NODE_ENV=production ENV NODE_ENV=production
COPY --from=build-and-test /nitro-backend/dist ./dist COPY --from=build-and-test /backend/dist ./dist
COPY ./public/img/ ./public/img/ COPY ./public/img/ ./public/img/
COPY ./public/providers.json ./public/providers.json COPY ./public/providers.json ./public/providers.json
RUN yarn install --production=true --frozen-lockfile --non-interactive --no-cache RUN yarn install --production=true --frozen-lockfile --non-interactive --no-cache

View File

@ -14,7 +14,7 @@ import { InMemoryCache } from 'apollo-cache-inmemory'
import fetch from 'node-fetch' import fetch from 'node-fetch'
import { ApolloClient } from 'apollo-client' import { ApolloClient } from 'apollo-client'
import trunc from 'trunc-html' import trunc from 'trunc-html'
const debug = require('debug')('ea:nitro-datasource') const debug = require('debug')('ea:datasource')
export default class NitroDataSource { export default class NitroDataSource {
constructor(uri) { constructor(uri) {

View File

@ -44,7 +44,7 @@ spec:
terminationMessagePath: /dev/termination-log terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File terminationMessagePolicy: File
volumeMounts: volumeMounts:
- mountPath: /nitro-backend/public/uploads - mountPath: /backend/public/uploads
name: uploads name: uploads
dnsPolicy: ClusterFirst dnsPolicy: ClusterFirst
restartPolicy: Always restartPolicy: Always

View File

@ -7,13 +7,13 @@ dbInitializion: "yarn prod:migrate init"
# dbMigrations runs the database migrations in a post-upgrade hook. # dbMigrations runs the database migrations in a post-upgrade hook.
dbMigrations: "yarn prod:migrate up" dbMigrations: "yarn prod:migrate up"
# bakendImage is the docker image for the backend deployment # bakendImage is the docker image for the backend deployment
backendImage: humanconnection/nitro-backend backendImage: schoolsinmotion/backend
# maintenanceImage is the docker image for the maintenance deployment # maintenanceImage is the docker image for the maintenance deployment
maintenanceImage: humanconnection/maintenance maintenanceImage: schoolsinmotion/maintenance
# neo4jImage is the docker image for the neo4j deployment # neo4jImage is the docker image for the neo4j deployment
neo4jImage: humanconnection/neo4j neo4jImage: schoolsinmotion/neo4j
# webappImage is the docker image for the webapp deployment # webappImage is the docker image for the webapp deployment
webappImage: humanconnection/nitro-web webappImage: schoolsinmotion/webapp
# image configures pullPolicy related to the docker images # image configures pullPolicy related to the docker images
image: image:
# pullPolicy indicates when, if ever, pods pull a new image from docker hub. # pullPolicy indicates when, if ever, pods pull a new image from docker hub.
@ -50,4 +50,4 @@ privateKeyPassphrase: "YTdkc2Y3OHNhZGc4N2FkODdzZmFnc2FkZzc4"
mapboxToken: "cGsuZXlKMUlqb2lhSFZ0WVc0dFkyOXVibVZqZEdsdmJpSXNJbUVpT2lKamFqbDBjbkJ1Ykdvd2VUVmxNM1Z3WjJsek5UTnVkM1p0SW4wLktaOEtLOWw3MG9talhiRWtrYkhHc1E=" mapboxToken: "cGsuZXlKMUlqb2lhSFZ0WVc0dFkyOXVibVZqZEdsdmJpSXNJbUVpT2lKamFqbDBjbkJ1Ykdvd2VUVmxNM1Z3WjJsek5UTnVkM1p0SW4wLktaOEtLOWw3MG9talhiRWtrYkhHc1E="
uploadsStorage: "25Gi" uploadsStorage: "25Gi"
neo4jStorage: "5Gi" neo4jStorage: "5Gi"
developmentMailserverDomain: nitro-mailserver.human-connection.org developmentMailserverDomain: nitro-mailserver.human-connection.org

View File

@ -36,7 +36,7 @@ spec:
name: configmap name: configmap
- secretRef: - secretRef:
name: human-connection name: human-connection
image: humanconnection/nitro-backend:latest image: schoolsinmotion/backend:latest
imagePullPolicy: Always imagePullPolicy: Always
name: backend name: backend
ports: ports:
@ -46,7 +46,7 @@ spec:
terminationMessagePath: /dev/termination-log terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File terminationMessagePolicy: File
volumeMounts: volumeMounts:
- mountPath: /nitro-backend/public/uploads - mountPath: /backend/public/uploads
name: uploads name: uploads
dnsPolicy: ClusterFirst dnsPolicy: ClusterFirst
restartPolicy: Always restartPolicy: Always

View File

@ -31,7 +31,7 @@ spec:
- envFrom: - envFrom:
- configMapRef: - configMapRef:
name: configmap name: configmap
image: humanconnection/neo4j:latest image: schoolsinmotion/neo4j:latest
imagePullPolicy: Always imagePullPolicy: Always
name: neo4j name: neo4j
ports: ports:

View File

@ -37,7 +37,7 @@ spec:
name: configmap name: configmap
- secretRef: - secretRef:
name: human-connection name: human-connection
image: humanconnection/nitro-web:latest image: schoolsinmotion/webapp:latest
imagePullPolicy: Always imagePullPolicy: Always
name: web name: web
ports: ports:

View File

@ -31,7 +31,7 @@ E.g. in file `deployment/digital-ocean/https/ingress.yaml` change the following:
paths: paths:
- path: / - path: /
backend: backend:
# serviceName: nitro-web # serviceName: webapp
serviceName: maintenance serviceName: maintenance
# servicePort: 3000 # servicePort: 3000
servicePort: 80 servicePort: 80

View File

@ -19,7 +19,7 @@ spec:
env: env:
- name: HOST - name: HOST
value: 0.0.0.0 value: 0.0.0.0
image: humanconnection/maintenance:latest image: schoolsinmotion/maintenance:latest
ports: ports:
- containerPort: 80 - containerPort: 80
imagePullPolicy: Always imagePullPolicy: Always

View File

@ -4,8 +4,8 @@
data: data:
SMTP_HOST: "mailserver.human-connection" SMTP_HOST: "mailserver.human-connection"
SMTP_PORT: "25" SMTP_PORT: "25"
GRAPHQL_URI: "http://nitro-backend.human-connection:4000" GRAPHQL_URI: "http://backend.human-connection:4000"
NEO4J_URI: "bolt://nitro-neo4j.human-connection:7687" NEO4J_URI: "bolt://neo4j.human-connection:7687"
NEO4J_AUTH: "none" NEO4J_AUTH: "none"
CLIENT_URI: "https://nitro-staging.human-connection.org" CLIENT_URI: "https://nitro-staging.human-connection.org"
NEO4J_apoc_import_file_enabled: "true" NEO4J_apoc_import_file_enabled: "true"

View File

@ -43,13 +43,13 @@ Then temporarily delete backend and database deployments
```bash ```bash
$ kubectl --namespace=human-connection get deployments $ kubectl --namespace=human-connection get deployments
NAME READY UP-TO-DATE AVAILABLE AGE NAME READY UP-TO-DATE AVAILABLE AGE
nitro-backend 1/1 1 1 3d11h backend 1/1 1 1 3d11h
nitro-neo4j 1/1 1 1 3d11h neo4j 1/1 1 1 3d11h
nitro-web 2/2 2 2 73d webapp 2/2 2 2 73d
$ kubectl --namespace=human-connection delete deployment nitro-neo4j $ kubectl --namespace=human-connection delete deployment neo4j
deployment.extensions "nitro-neo4j" deleted deployment.extensions "neo4j" deleted
$ kubectl --namespace=human-connection delete deployment nitro-backend $ kubectl --namespace=human-connection delete deployment backend
deployment.extensions "nitro-backend" deleted deployment.extensions "backend" deleted
``` ```
Deploy one-time maintenance-worker pod: Deploy one-time maintenance-worker pod:
@ -57,13 +57,13 @@ Deploy one-time maintenance-worker pod:
```bash ```bash
# in deployment/legacy-migration/ # in deployment/legacy-migration/
$ kubectl apply -f maintenance-worker.yaml $ kubectl apply -f maintenance-worker.yaml
pod/nitro-maintenance-worker created pod/maintenance-worker created
``` ```
Import legacy database and uploads: Import legacy database and uploads:
```bash ```bash
$ kubectl --namespace=human-connection exec -it nitro-maintenance-worker bash $ kubectl --namespace=human-connection exec -it maintenance-worker bash
$ import_legacy_db $ import_legacy_db
$ import_legacy_uploads $ import_legacy_uploads
$ exit $ exit
@ -72,7 +72,7 @@ $ exit
Delete the pod when you're done: Delete the pod when you're done:
```bash ```bash
$ kubectl --namespace=human-connection delete pod nitro-maintenance-worker $ kubectl --namespace=human-connection delete pod maintenance-worker
``` ```
Oh, and of course you have to get those deleted deployments back. One way of Oh, and of course you have to get those deleted deployments back. One way of

View File

@ -2,12 +2,12 @@
kind: Pod kind: Pod
apiVersion: v1 apiVersion: v1
metadata: metadata:
name: nitro-maintenance-worker name: maintenance-worker
namespace: human-connection namespace: human-connection
spec: spec:
containers: containers:
- name: nitro-maintenance-worker - name: maintenance-worker
image: humanconnection/maintenance-worker:latest image: schoolsinmotion/maintenance-worker:latest
imagePullPolicy: Always imagePullPolicy: Always
resources: resources:
requests: requests:

View File

@ -1,4 +1,4 @@
FROM humanconnection/neo4j:latest FROM schoolsinmotion/neo4j:latest
ENV NODE_ENV=maintenance ENV NODE_ENV=maintenance
EXPOSE 7687 7474 EXPOSE 7687 7474

View File

@ -18,8 +18,8 @@ minikube dashboard, expose the services you want on your host system.
For example: For example:
```text ```text
$ minikube service nitro-web --namespace=human-connection $ minikube service webapp --namespace=human-connection
# optionally # optionally
$ minikube service nitro-backend --namespace=human-connection $ minikube service backend --namespace=human-connection
``` ```

View File

@ -3,7 +3,7 @@
At the moment, the application needs two persistent volumes: At the moment, the application needs two persistent volumes:
* The `/data/` folder where `neo4j` stores its database and * The `/data/` folder where `neo4j` stores its database and
* the folder `/nitro-backend/public/uploads` where the backend stores uploads. * the folder `/backend/public/uploads` where the backend stores uploads.
As a matter of precaution, the persistent volume claims that setup these volumes As a matter of precaution, the persistent volume claims that setup these volumes
live in a separate folder. You don't want to accidently loose all your data in live in a separate folder. You don't want to accidently loose all your data in

View File

@ -29,7 +29,7 @@ database connections left and nobody can access the application.
Run the following: Run the following:
```sh ```sh
kubectl --namespace=human-connection edit deployment nitro-neo4j kubectl --namespace=human-connection edit deployment neo4j
``` ```
Add the following to `spec.template.spec.containers`: Add the following to `spec.template.spec.containers`:
@ -61,7 +61,7 @@ exit
# Download the file from the pod to your computer. # Download the file from the pod to your computer.
kubectl cp human-connection/<POD-ID>:/root/neo4j-backup ./neo4j-backup kubectl cp human-connection/<POD-ID>:/root/neo4j-backup ./neo4j-backup
``` ```
Revert your changes to deployment `nitro-neo4j` which will restart the database. Revert your changes to deployment `neo4j` which will restart the database.
## Restore a Backup in Kubernetes ## Restore a Backup in Kubernetes
@ -79,4 +79,4 @@ kubectl --namespace=human-connection exec -it <POD-ID> bash
neo4j-admin load --from=/root/neo4j-backup --force neo4j-admin load --from=/root/neo4j-backup --force
exit exit
``` ```
Revert your changes to deployment `nitro-neo4j` which will restart the database. Revert your changes to deployment `neo4j` which will restart the database.

View File

@ -7,7 +7,7 @@ database in a kubernetes cluster.
One of the benefits of doing an online backup is that the Neo4j database does not need to be stopped, so there is no downtime. Read [the docs](https://neo4j.com/docs/operations-manual/current/backup/performing/) One of the benefits of doing an online backup is that the Neo4j database does not need to be stopped, so there is no downtime. Read [the docs](https://neo4j.com/docs/operations-manual/current/backup/performing/)
To use Neo4j Enterprise you must add this line to your configmap, if using, or your deployment `nitro-neo4j` env. To use Neo4j Enterprise you must add this line to your configmap, if using, or your deployment `neo4j` env.
``` ```
NEO4J_ACCEPT_LICENSE_AGREEMENT: "yes" NEO4J_ACCEPT_LICENSE_AGREEMENT: "yes"
@ -15,18 +15,18 @@ NEO4J_ACCEPT_LICENSE_AGREEMENT: "yes"
## Create a Backup in Kubernetes ## Create a Backup in Kubernetes
```sh ```sh
# Backup the database with one command, this will get the nitro-neo4j pod, ssh into it, and run the backup command # Backup the database with one command, this will get the neo4j pod, ssh into it, and run the backup command
kubectl -n=human-connection exec -it $(kubectl -n=human-connection get pods | grep nitro-neo4j | awk '{ print $1 }') -- neo4j-admin backup --backup-dir=/var/lib/neo4j --name=neo4j-backup kubectl -n=human-connection exec -it $(kubectl -n=human-connection get pods | grep neo4j | awk '{ print $1 }') -- neo4j-admin backup --backup-dir=/var/lib/neo4j --name=neo4j-backup
# Download the file from the pod to your computer. # Download the file from the pod to your computer.
kubectl cp human-connection/$(kubectl -n=human-connection get pods | grep nitro-neo4j | awk '{ print $1 }'):/var/lib/neo4j/neo4j-backup ./neo4j-backup/ kubectl cp human-connection/$(kubectl -n=human-connection get pods | grep neo4j | awk '{ print $1 }'):/var/lib/neo4j/neo4j-backup ./neo4j-backup/
``` ```
You should now have a backup of the database locally. If you want, you can simulate disaster recovery by sshing into the nitro-neo4j pod, deleting all data and restoring from backup You should now have a backup of the database locally. If you want, you can simulate disaster recovery by sshing into the neo4j pod, deleting all data and restoring from backup
## Disaster where database data is gone somehow ## Disaster where database data is gone somehow
```sh ```sh
kubectl -n=human-connection exec -it $(kubectl -n=human-connection get pods | grep nitro-neo4j |awk '{ print $1 }') bash kubectl -n=human-connection exec -it $(kubectl -n=human-connection get pods | grep neo4j |awk '{ print $1 }') bash
# Enter cypher-shell # Enter cypher-shell
cypher-shell cypher-shell
# Delete all data # Delete all data
@ -54,4 +54,4 @@ kubectl --namespace=human-connection exec -it <POD-ID> bash
neo4j-admin restore --from=/root/neo4j-backup --force neo4j-admin restore --from=/root/neo4j-backup --force
exit exit
``` ```
Revert your changes to deployment `nitro-neo4j` which will restart the database. Revert your changes to deployment `neo4j` which will restart the database.

View File

@ -65,8 +65,8 @@ You should see the persistent volumes at the end of the log:
Restic Backups: Restic Backups:
Completed: Completed:
human-connection/nitro-backend-5b6dd96d6b-q77n6: uploads human-connection/backend-5b6dd96d6b-q77n6: uploads
human-connection/nitro-neo4j-686d768598-z2vhh: neo4j-data human-connection/neo4j-686d768598-z2vhh: neo4j-data
``` ```
## Simulate a Disaster ## Simulate a Disaster

View File

@ -4,14 +4,14 @@ services:
webapp: webapp:
environment: environment:
- "CI=${CI}" - "CI=${CI}"
image: humanconnection/nitro-web:build-and-test image: schoolsinmotion/webapp:build-and-test
build: build:
context: webapp context: webapp
target: build-and-test target: build-and-test
backend: backend:
environment: environment:
- "CI=${CI}" - "CI=${CI}"
image: humanconnection/nitro-backend:build-and-test image: schoolsinmotion/backend:build-and-test
build: build:
context: backend context: backend
target: build-and-test target: build-and-test

View File

@ -2,7 +2,7 @@ version: "3.4"
services: services:
maintenance-worker: maintenance-worker:
image: humanconnection/maintenance-worker:latest image: schoolsinmotion/maintenance-worker:latest
build: build:
context: deployment/legacy-migration/maintenance-worker context: deployment/legacy-migration/maintenance-worker
volumes: volumes:

View File

@ -12,7 +12,7 @@ services:
context: webapp context: webapp
target: build-and-test target: build-and-test
volumes: volumes:
- ./webapp:/nitro-web - ./webapp:/webapp
environment: environment:
- NUXT_BUILD=/tmp/nuxt # avoid file permission issues when `rm -rf .nuxt/` - NUXT_BUILD=/tmp/nuxt # avoid file permission issues when `rm -rf .nuxt/`
- PUBLIC_REGISTRATION=false - PUBLIC_REGISTRATION=false
@ -22,7 +22,7 @@ services:
context: backend context: backend
target: build-and-test target: build-and-test
volumes: volumes:
- ./backend:/nitro-backend - ./backend:/backend
command: yarn run dev command: yarn run dev
environment: environment:
- SMTP_HOST=mailserver - SMTP_HOST=mailserver
@ -31,7 +31,7 @@ services:
- "DEBUG=${DEBUG}" - "DEBUG=${DEBUG}"
- PUBLIC_REGISTRATION=false - PUBLIC_REGISTRATION=false
maintenance: maintenance:
image: humanconnection/maintenance:latest image: schoolsinmotion/maintenance:latest
build: build:
context: webapp context: webapp
dockerfile: Dockerfile.maintenance dockerfile: Dockerfile.maintenance

View File

@ -2,7 +2,7 @@ version: "3.4"
services: services:
webapp: webapp:
image: humanconnection/nitro-web:latest image: schoolsinmotion/webapp:latest
build: build:
context: webapp context: webapp
target: production target: production
@ -16,13 +16,13 @@ services:
depends_on: depends_on:
- backend - backend
volumes: volumes:
- webapp_node_modules:/nitro-web/node_modules - webapp_node_modules:/webapp/node_modules
environment: environment:
- HOST=0.0.0.0 - HOST=0.0.0.0
- GRAPHQL_URI=http://backend:4000 - GRAPHQL_URI=http://backend:4000
- MAPBOX_TOKEN="pk.eyJ1IjoiaHVtYW4tY29ubmVjdGlvbiIsImEiOiJjajl0cnBubGoweTVlM3VwZ2lzNTNud3ZtIn0.bZ8KK9l70omjXbEkkbHGsQ" - MAPBOX_TOKEN="pk.eyJ1IjoiaHVtYW4tY29ubmVjdGlvbiIsImEiOiJjajl0cnBubGoweTVlM3VwZ2lzNTNud3ZtIn0.bZ8KK9l70omjXbEkkbHGsQ"
backend: backend:
image: humanconnection/nitro-backend:latest image: schoolsinmotion/backend:latest
build: build:
context: backend context: backend
target: production target: production
@ -35,8 +35,8 @@ services:
ports: ports:
- 4000:4000 - 4000:4000
volumes: volumes:
- backend_node_modules:/nitro-backend/node_modules - backend_node_modules:/backend/node_modules
- uploads:/nitro-backend/public/uploads - uploads:/backend/public/uploads
environment: environment:
- NEO4J_URI=bolt://neo4j:7687 - NEO4J_URI=bolt://neo4j:7687
- GRAPHQL_URI=http://backend:4000 - GRAPHQL_URI=http://backend:4000
@ -46,7 +46,7 @@ services:
- PRIVATE_KEY_PASSPHRASE=a7dsf78sadg87ad87sfagsadg78 - PRIVATE_KEY_PASSPHRASE=a7dsf78sadg87ad87sfagsadg78
- "DEBUG=${DEBUG}" - "DEBUG=${DEBUG}"
neo4j: neo4j:
image: humanconnection/neo4j:latest image: schoolsinmotion/neo4j:latest
build: build:
context: neo4j context: neo4j
args: args:

View File

@ -4,26 +4,26 @@ ROOT_DIR=$(dirname "$0")/..
VERSION=$(jq -r '.version' $ROOT_DIR/package.json) VERSION=$(jq -r '.version' $ROOT_DIR/package.json)
IFS='.' read -r major minor patch <<< $VERSION IFS='.' read -r major minor patch <<< $VERSION
apps=(nitro-web nitro-backend neo4j maintenance) apps=(webapp backend neo4j maintenance)
tags=($major $major.$minor $major.$minor.$patch) tags=($major $major.$minor $major.$minor.$patch)
# These three docker images have already been built by now: # These three docker images have already been built by now:
# docker build --build-arg BUILD_COMMIT=$BUILD_COMMIT --target production -t humanconnection/nitro-backend:latest $ROOT_DIR/backend # docker build --build-arg BUILD_COMMIT=$BUILD_COMMIT --target production -t schoolsinmotion/backend:latest $ROOT_DIR/backend
# docker build --build-arg BUILD_COMMIT=$BUILD_COMMIT --target production -t humanconnection/nitro-web:latest $ROOT_DIR/webapp # docker build --build-arg BUILD_COMMIT=$BUILD_COMMIT --target production -t schoolsinmotion/webapp:latest $ROOT_DIR/webapp
# docker build --build-arg BUILD_COMMIT=$BUILD_COMMIT -t humanconnection/neo4j:latest $ROOT_DIR/neo4j # docker build --build-arg BUILD_COMMIT=$BUILD_COMMIT -t schoolsinmotion/neo4j:latest $ROOT_DIR/neo4j
docker build -t humanconnection/maintenance:latest $ROOT_DIR/webapp/ -f $ROOT_DIR/webapp/Dockerfile.maintenance docker build -t schoolsinmotion/maintenance:latest $ROOT_DIR/webapp/ -f $ROOT_DIR/webapp/Dockerfile.maintenance
echo "$DOCKER_PASSWORD" | docker login -u "$DOCKER_USERNAME" --password-stdin echo "$DOCKER_PASSWORD" | docker login -u "$DOCKER_USERNAME" --password-stdin
for app in "${apps[@]}" for app in "${apps[@]}"
do do
SOURCE="humanconnection/${app}:latest" SOURCE="schoolsinmotion/${app}:latest"
echo "docker push $SOURCE" echo "docker push $SOURCE"
docker push $SOURCE docker push $SOURCE
for tag in "${tags[@]}" for tag in "${tags[@]}"
do do
TARGET="humanconnection/${app}:${tag}" TARGET="schoolsinmotion/${app}:${tag}"
if DOCKER_CLI_EXPERIMENTAL=enabled docker manifest inspect $TARGET >/dev/null; then if DOCKER_CLI_EXPERIMENTAL=enabled docker manifest inspect $TARGET >/dev/null; then
echo "docker image ${TARGET} already present, skipping ..." echo "docker image ${TARGET} already present, skipping ..."
else else

View File

@ -7,7 +7,7 @@ CMD ["yarn", "run", "start"]
# Expose the app port # Expose the app port
ARG BUILD_COMMIT ARG BUILD_COMMIT
ENV BUILD_COMMIT=$BUILD_COMMIT ENV BUILD_COMMIT=$BUILD_COMMIT
ARG WORKDIR=/nitro-web ARG WORKDIR=/webapp
RUN mkdir -p $WORKDIR RUN mkdir -p $WORKDIR
WORKDIR $WORKDIR WORKDIR $WORKDIR
@ -25,7 +25,7 @@ RUN NODE_ENV=production yarn run build
FROM base as production FROM base as production
RUN yarn install --production=true --frozen-lockfile --non-interactive --no-cache RUN yarn install --production=true --frozen-lockfile --non-interactive --no-cache
COPY --from=build-and-test ./nitro-web/.nuxt ./.nuxt COPY --from=build-and-test ./webapp/.nuxt ./.nuxt
COPY --from=build-and-test ./nitro-web/static ./static COPY --from=build-and-test ./webapp/static ./static
COPY nuxt.config.js . COPY nuxt.config.js .
COPY locales locales COPY locales locales

View File

@ -7,7 +7,7 @@ CMD ["yarn", "run", "start"]
# Expose the app port # Expose the app port
ARG BUILD_COMMIT ARG BUILD_COMMIT
ENV BUILD_COMMIT=$BUILD_COMMIT ENV BUILD_COMMIT=$BUILD_COMMIT
ARG WORKDIR=/nitro-web ARG WORKDIR=/webapp
RUN mkdir -p $WORKDIR RUN mkdir -p $WORKDIR
WORKDIR $WORKDIR WORKDIR $WORKDIR
@ -35,6 +35,6 @@ RUN yarn run generate
FROM nginx:alpine FROM nginx:alpine
COPY --from=build ./nitro-web/dist/ /usr/share/nginx/html/ COPY --from=build ./webapp/dist/ /usr/share/nginx/html/
RUN rm /etc/nginx/conf.d/default.conf RUN rm /etc/nginx/conf.d/default.conf
COPY maintenance/nginx/custom.conf /etc/nginx/conf.d/ COPY maintenance/nginx/custom.conf /etc/nginx/conf.d/