mirror of
https://github.com/Ocelot-Social-Community/Ocelot-Social.git
synced 2025-12-12 23:35:58 +00:00
Rename dockerhub organizations
..plus get rid of obsolete prefix `nitro-`.
This commit is contained in:
parent
c9dc7d9b5e
commit
23ae569509
@ -5,7 +5,7 @@ EXPOSE 4000
|
||||
CMD ["yarn", "run", "start"]
|
||||
ARG BUILD_COMMIT
|
||||
ENV BUILD_COMMIT=$BUILD_COMMIT
|
||||
ARG WORKDIR=/nitro-backend
|
||||
ARG WORKDIR=/backend
|
||||
RUN mkdir -p $WORKDIR
|
||||
WORKDIR $WORKDIR
|
||||
|
||||
@ -22,7 +22,7 @@ RUN NODE_ENV=production yarn run build
|
||||
# reduce image size with a multistage build
|
||||
FROM base as production
|
||||
ENV NODE_ENV=production
|
||||
COPY --from=build-and-test /nitro-backend/dist ./dist
|
||||
COPY --from=build-and-test /backend/dist ./dist
|
||||
COPY ./public/img/ ./public/img/
|
||||
COPY ./public/providers.json ./public/providers.json
|
||||
RUN yarn install --production=true --frozen-lockfile --non-interactive --no-cache
|
||||
|
||||
@ -14,7 +14,7 @@ import { InMemoryCache } from 'apollo-cache-inmemory'
|
||||
import fetch from 'node-fetch'
|
||||
import { ApolloClient } from 'apollo-client'
|
||||
import trunc from 'trunc-html'
|
||||
const debug = require('debug')('ea:nitro-datasource')
|
||||
const debug = require('debug')('ea:datasource')
|
||||
|
||||
export default class NitroDataSource {
|
||||
constructor(uri) {
|
||||
|
||||
@ -44,7 +44,7 @@ spec:
|
||||
terminationMessagePath: /dev/termination-log
|
||||
terminationMessagePolicy: File
|
||||
volumeMounts:
|
||||
- mountPath: /nitro-backend/public/uploads
|
||||
- mountPath: /backend/public/uploads
|
||||
name: uploads
|
||||
dnsPolicy: ClusterFirst
|
||||
restartPolicy: Always
|
||||
|
||||
@ -7,13 +7,13 @@ dbInitializion: "yarn prod:migrate init"
|
||||
# dbMigrations runs the database migrations in a post-upgrade hook.
|
||||
dbMigrations: "yarn prod:migrate up"
|
||||
# bakendImage is the docker image for the backend deployment
|
||||
backendImage: humanconnection/nitro-backend
|
||||
backendImage: schoolsinmotion/backend
|
||||
# maintenanceImage is the docker image for the maintenance deployment
|
||||
maintenanceImage: humanconnection/maintenance
|
||||
maintenanceImage: schoolsinmotion/maintenance
|
||||
# neo4jImage is the docker image for the neo4j deployment
|
||||
neo4jImage: humanconnection/neo4j
|
||||
neo4jImage: schoolsinmotion/neo4j
|
||||
# webappImage is the docker image for the webapp deployment
|
||||
webappImage: humanconnection/nitro-web
|
||||
webappImage: schoolsinmotion/webapp
|
||||
# image configures pullPolicy related to the docker images
|
||||
image:
|
||||
# pullPolicy indicates when, if ever, pods pull a new image from docker hub.
|
||||
|
||||
@ -36,7 +36,7 @@ spec:
|
||||
name: configmap
|
||||
- secretRef:
|
||||
name: human-connection
|
||||
image: humanconnection/nitro-backend:latest
|
||||
image: schoolsinmotion/backend:latest
|
||||
imagePullPolicy: Always
|
||||
name: backend
|
||||
ports:
|
||||
@ -46,7 +46,7 @@ spec:
|
||||
terminationMessagePath: /dev/termination-log
|
||||
terminationMessagePolicy: File
|
||||
volumeMounts:
|
||||
- mountPath: /nitro-backend/public/uploads
|
||||
- mountPath: /backend/public/uploads
|
||||
name: uploads
|
||||
dnsPolicy: ClusterFirst
|
||||
restartPolicy: Always
|
||||
|
||||
@ -31,7 +31,7 @@ spec:
|
||||
- envFrom:
|
||||
- configMapRef:
|
||||
name: configmap
|
||||
image: humanconnection/neo4j:latest
|
||||
image: schoolsinmotion/neo4j:latest
|
||||
imagePullPolicy: Always
|
||||
name: neo4j
|
||||
ports:
|
||||
|
||||
@ -37,7 +37,7 @@ spec:
|
||||
name: configmap
|
||||
- secretRef:
|
||||
name: human-connection
|
||||
image: humanconnection/nitro-web:latest
|
||||
image: schoolsinmotion/webapp:latest
|
||||
imagePullPolicy: Always
|
||||
name: web
|
||||
ports:
|
||||
|
||||
@ -31,7 +31,7 @@ E.g. in file `deployment/digital-ocean/https/ingress.yaml` change the following:
|
||||
paths:
|
||||
- path: /
|
||||
backend:
|
||||
# serviceName: nitro-web
|
||||
# serviceName: webapp
|
||||
serviceName: maintenance
|
||||
# servicePort: 3000
|
||||
servicePort: 80
|
||||
|
||||
@ -19,7 +19,7 @@ spec:
|
||||
env:
|
||||
- name: HOST
|
||||
value: 0.0.0.0
|
||||
image: humanconnection/maintenance:latest
|
||||
image: schoolsinmotion/maintenance:latest
|
||||
ports:
|
||||
- containerPort: 80
|
||||
imagePullPolicy: Always
|
||||
|
||||
@ -4,8 +4,8 @@
|
||||
data:
|
||||
SMTP_HOST: "mailserver.human-connection"
|
||||
SMTP_PORT: "25"
|
||||
GRAPHQL_URI: "http://nitro-backend.human-connection:4000"
|
||||
NEO4J_URI: "bolt://nitro-neo4j.human-connection:7687"
|
||||
GRAPHQL_URI: "http://backend.human-connection:4000"
|
||||
NEO4J_URI: "bolt://neo4j.human-connection:7687"
|
||||
NEO4J_AUTH: "none"
|
||||
CLIENT_URI: "https://nitro-staging.human-connection.org"
|
||||
NEO4J_apoc_import_file_enabled: "true"
|
||||
|
||||
@ -43,13 +43,13 @@ Then temporarily delete backend and database deployments
|
||||
```bash
|
||||
$ kubectl --namespace=human-connection get deployments
|
||||
NAME READY UP-TO-DATE AVAILABLE AGE
|
||||
nitro-backend 1/1 1 1 3d11h
|
||||
nitro-neo4j 1/1 1 1 3d11h
|
||||
nitro-web 2/2 2 2 73d
|
||||
$ kubectl --namespace=human-connection delete deployment nitro-neo4j
|
||||
deployment.extensions "nitro-neo4j" deleted
|
||||
$ kubectl --namespace=human-connection delete deployment nitro-backend
|
||||
deployment.extensions "nitro-backend" deleted
|
||||
backend 1/1 1 1 3d11h
|
||||
neo4j 1/1 1 1 3d11h
|
||||
webapp 2/2 2 2 73d
|
||||
$ kubectl --namespace=human-connection delete deployment neo4j
|
||||
deployment.extensions "neo4j" deleted
|
||||
$ kubectl --namespace=human-connection delete deployment backend
|
||||
deployment.extensions "backend" deleted
|
||||
```
|
||||
|
||||
Deploy one-time maintenance-worker pod:
|
||||
@ -57,13 +57,13 @@ Deploy one-time maintenance-worker pod:
|
||||
```bash
|
||||
# in deployment/legacy-migration/
|
||||
$ kubectl apply -f maintenance-worker.yaml
|
||||
pod/nitro-maintenance-worker created
|
||||
pod/maintenance-worker created
|
||||
```
|
||||
|
||||
Import legacy database and uploads:
|
||||
|
||||
```bash
|
||||
$ kubectl --namespace=human-connection exec -it nitro-maintenance-worker bash
|
||||
$ kubectl --namespace=human-connection exec -it maintenance-worker bash
|
||||
$ import_legacy_db
|
||||
$ import_legacy_uploads
|
||||
$ exit
|
||||
@ -72,7 +72,7 @@ $ exit
|
||||
Delete the pod when you're done:
|
||||
|
||||
```bash
|
||||
$ kubectl --namespace=human-connection delete pod nitro-maintenance-worker
|
||||
$ kubectl --namespace=human-connection delete pod maintenance-worker
|
||||
```
|
||||
|
||||
Oh, and of course you have to get those deleted deployments back. One way of
|
||||
|
||||
@ -2,12 +2,12 @@
|
||||
kind: Pod
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: nitro-maintenance-worker
|
||||
name: maintenance-worker
|
||||
namespace: human-connection
|
||||
spec:
|
||||
containers:
|
||||
- name: nitro-maintenance-worker
|
||||
image: humanconnection/maintenance-worker:latest
|
||||
- name: maintenance-worker
|
||||
image: schoolsinmotion/maintenance-worker:latest
|
||||
imagePullPolicy: Always
|
||||
resources:
|
||||
requests:
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
FROM humanconnection/neo4j:latest
|
||||
FROM schoolsinmotion/neo4j:latest
|
||||
|
||||
ENV NODE_ENV=maintenance
|
||||
EXPOSE 7687 7474
|
||||
|
||||
@ -18,8 +18,8 @@ minikube dashboard, expose the services you want on your host system.
|
||||
For example:
|
||||
|
||||
```text
|
||||
$ minikube service nitro-web --namespace=human-connection
|
||||
$ minikube service webapp --namespace=human-connection
|
||||
# optionally
|
||||
$ minikube service nitro-backend --namespace=human-connection
|
||||
$ minikube service backend --namespace=human-connection
|
||||
```
|
||||
|
||||
|
||||
@ -3,7 +3,7 @@
|
||||
At the moment, the application needs two persistent volumes:
|
||||
|
||||
* The `/data/` folder where `neo4j` stores its database and
|
||||
* the folder `/nitro-backend/public/uploads` where the backend stores uploads.
|
||||
* the folder `/backend/public/uploads` where the backend stores uploads.
|
||||
|
||||
As a matter of precaution, the persistent volume claims that setup these volumes
|
||||
live in a separate folder. You don't want to accidently loose all your data in
|
||||
|
||||
@ -29,7 +29,7 @@ database connections left and nobody can access the application.
|
||||
Run the following:
|
||||
|
||||
```sh
|
||||
kubectl --namespace=human-connection edit deployment nitro-neo4j
|
||||
kubectl --namespace=human-connection edit deployment neo4j
|
||||
```
|
||||
|
||||
Add the following to `spec.template.spec.containers`:
|
||||
@ -61,7 +61,7 @@ exit
|
||||
# Download the file from the pod to your computer.
|
||||
kubectl cp human-connection/<POD-ID>:/root/neo4j-backup ./neo4j-backup
|
||||
```
|
||||
Revert your changes to deployment `nitro-neo4j` which will restart the database.
|
||||
Revert your changes to deployment `neo4j` which will restart the database.
|
||||
|
||||
## Restore a Backup in Kubernetes
|
||||
|
||||
@ -79,4 +79,4 @@ kubectl --namespace=human-connection exec -it <POD-ID> bash
|
||||
neo4j-admin load --from=/root/neo4j-backup --force
|
||||
exit
|
||||
```
|
||||
Revert your changes to deployment `nitro-neo4j` which will restart the database.
|
||||
Revert your changes to deployment `neo4j` which will restart the database.
|
||||
|
||||
@ -7,7 +7,7 @@ database in a kubernetes cluster.
|
||||
|
||||
One of the benefits of doing an online backup is that the Neo4j database does not need to be stopped, so there is no downtime. Read [the docs](https://neo4j.com/docs/operations-manual/current/backup/performing/)
|
||||
|
||||
To use Neo4j Enterprise you must add this line to your configmap, if using, or your deployment `nitro-neo4j` env.
|
||||
To use Neo4j Enterprise you must add this line to your configmap, if using, or your deployment `neo4j` env.
|
||||
|
||||
```
|
||||
NEO4J_ACCEPT_LICENSE_AGREEMENT: "yes"
|
||||
@ -15,18 +15,18 @@ NEO4J_ACCEPT_LICENSE_AGREEMENT: "yes"
|
||||
## Create a Backup in Kubernetes
|
||||
|
||||
```sh
|
||||
# Backup the database with one command, this will get the nitro-neo4j pod, ssh into it, and run the backup command
|
||||
kubectl -n=human-connection exec -it $(kubectl -n=human-connection get pods | grep nitro-neo4j | awk '{ print $1 }') -- neo4j-admin backup --backup-dir=/var/lib/neo4j --name=neo4j-backup
|
||||
# Backup the database with one command, this will get the neo4j pod, ssh into it, and run the backup command
|
||||
kubectl -n=human-connection exec -it $(kubectl -n=human-connection get pods | grep neo4j | awk '{ print $1 }') -- neo4j-admin backup --backup-dir=/var/lib/neo4j --name=neo4j-backup
|
||||
# Download the file from the pod to your computer.
|
||||
kubectl cp human-connection/$(kubectl -n=human-connection get pods | grep nitro-neo4j | awk '{ print $1 }'):/var/lib/neo4j/neo4j-backup ./neo4j-backup/
|
||||
kubectl cp human-connection/$(kubectl -n=human-connection get pods | grep neo4j | awk '{ print $1 }'):/var/lib/neo4j/neo4j-backup ./neo4j-backup/
|
||||
```
|
||||
|
||||
You should now have a backup of the database locally. If you want, you can simulate disaster recovery by sshing into the nitro-neo4j pod, deleting all data and restoring from backup
|
||||
You should now have a backup of the database locally. If you want, you can simulate disaster recovery by sshing into the neo4j pod, deleting all data and restoring from backup
|
||||
|
||||
## Disaster where database data is gone somehow
|
||||
|
||||
```sh
|
||||
kubectl -n=human-connection exec -it $(kubectl -n=human-connection get pods | grep nitro-neo4j |awk '{ print $1 }') bash
|
||||
kubectl -n=human-connection exec -it $(kubectl -n=human-connection get pods | grep neo4j |awk '{ print $1 }') bash
|
||||
# Enter cypher-shell
|
||||
cypher-shell
|
||||
# Delete all data
|
||||
@ -54,4 +54,4 @@ kubectl --namespace=human-connection exec -it <POD-ID> bash
|
||||
neo4j-admin restore --from=/root/neo4j-backup --force
|
||||
exit
|
||||
```
|
||||
Revert your changes to deployment `nitro-neo4j` which will restart the database.
|
||||
Revert your changes to deployment `neo4j` which will restart the database.
|
||||
|
||||
@ -65,8 +65,8 @@ You should see the persistent volumes at the end of the log:
|
||||
|
||||
Restic Backups:
|
||||
Completed:
|
||||
human-connection/nitro-backend-5b6dd96d6b-q77n6: uploads
|
||||
human-connection/nitro-neo4j-686d768598-z2vhh: neo4j-data
|
||||
human-connection/backend-5b6dd96d6b-q77n6: uploads
|
||||
human-connection/neo4j-686d768598-z2vhh: neo4j-data
|
||||
```
|
||||
|
||||
## Simulate a Disaster
|
||||
|
||||
@ -4,14 +4,14 @@ services:
|
||||
webapp:
|
||||
environment:
|
||||
- "CI=${CI}"
|
||||
image: humanconnection/nitro-web:build-and-test
|
||||
image: schoolsinmotion/webapp:build-and-test
|
||||
build:
|
||||
context: webapp
|
||||
target: build-and-test
|
||||
backend:
|
||||
environment:
|
||||
- "CI=${CI}"
|
||||
image: humanconnection/nitro-backend:build-and-test
|
||||
image: schoolsinmotion/backend:build-and-test
|
||||
build:
|
||||
context: backend
|
||||
target: build-and-test
|
||||
|
||||
@ -2,7 +2,7 @@ version: "3.4"
|
||||
|
||||
services:
|
||||
maintenance-worker:
|
||||
image: humanconnection/maintenance-worker:latest
|
||||
image: schoolsinmotion/maintenance-worker:latest
|
||||
build:
|
||||
context: deployment/legacy-migration/maintenance-worker
|
||||
volumes:
|
||||
|
||||
@ -12,7 +12,7 @@ services:
|
||||
context: webapp
|
||||
target: build-and-test
|
||||
volumes:
|
||||
- ./webapp:/nitro-web
|
||||
- ./webapp:/webapp
|
||||
environment:
|
||||
- NUXT_BUILD=/tmp/nuxt # avoid file permission issues when `rm -rf .nuxt/`
|
||||
- PUBLIC_REGISTRATION=false
|
||||
@ -22,7 +22,7 @@ services:
|
||||
context: backend
|
||||
target: build-and-test
|
||||
volumes:
|
||||
- ./backend:/nitro-backend
|
||||
- ./backend:/backend
|
||||
command: yarn run dev
|
||||
environment:
|
||||
- SMTP_HOST=mailserver
|
||||
@ -31,7 +31,7 @@ services:
|
||||
- "DEBUG=${DEBUG}"
|
||||
- PUBLIC_REGISTRATION=false
|
||||
maintenance:
|
||||
image: humanconnection/maintenance:latest
|
||||
image: schoolsinmotion/maintenance:latest
|
||||
build:
|
||||
context: webapp
|
||||
dockerfile: Dockerfile.maintenance
|
||||
|
||||
@ -2,7 +2,7 @@ version: "3.4"
|
||||
|
||||
services:
|
||||
webapp:
|
||||
image: humanconnection/nitro-web:latest
|
||||
image: schoolsinmotion/webapp:latest
|
||||
build:
|
||||
context: webapp
|
||||
target: production
|
||||
@ -16,13 +16,13 @@ services:
|
||||
depends_on:
|
||||
- backend
|
||||
volumes:
|
||||
- webapp_node_modules:/nitro-web/node_modules
|
||||
- webapp_node_modules:/webapp/node_modules
|
||||
environment:
|
||||
- HOST=0.0.0.0
|
||||
- GRAPHQL_URI=http://backend:4000
|
||||
- MAPBOX_TOKEN="pk.eyJ1IjoiaHVtYW4tY29ubmVjdGlvbiIsImEiOiJjajl0cnBubGoweTVlM3VwZ2lzNTNud3ZtIn0.bZ8KK9l70omjXbEkkbHGsQ"
|
||||
backend:
|
||||
image: humanconnection/nitro-backend:latest
|
||||
image: schoolsinmotion/backend:latest
|
||||
build:
|
||||
context: backend
|
||||
target: production
|
||||
@ -35,8 +35,8 @@ services:
|
||||
ports:
|
||||
- 4000:4000
|
||||
volumes:
|
||||
- backend_node_modules:/nitro-backend/node_modules
|
||||
- uploads:/nitro-backend/public/uploads
|
||||
- backend_node_modules:/backend/node_modules
|
||||
- uploads:/backend/public/uploads
|
||||
environment:
|
||||
- NEO4J_URI=bolt://neo4j:7687
|
||||
- GRAPHQL_URI=http://backend:4000
|
||||
@ -46,7 +46,7 @@ services:
|
||||
- PRIVATE_KEY_PASSPHRASE=a7dsf78sadg87ad87sfagsadg78
|
||||
- "DEBUG=${DEBUG}"
|
||||
neo4j:
|
||||
image: humanconnection/neo4j:latest
|
||||
image: schoolsinmotion/neo4j:latest
|
||||
build:
|
||||
context: neo4j
|
||||
args:
|
||||
|
||||
@ -4,26 +4,26 @@ ROOT_DIR=$(dirname "$0")/..
|
||||
|
||||
VERSION=$(jq -r '.version' $ROOT_DIR/package.json)
|
||||
IFS='.' read -r major minor patch <<< $VERSION
|
||||
apps=(nitro-web nitro-backend neo4j maintenance)
|
||||
apps=(webapp backend neo4j maintenance)
|
||||
tags=($major $major.$minor $major.$minor.$patch)
|
||||
|
||||
# These three docker images have already been built by now:
|
||||
# docker build --build-arg BUILD_COMMIT=$BUILD_COMMIT --target production -t humanconnection/nitro-backend:latest $ROOT_DIR/backend
|
||||
# docker build --build-arg BUILD_COMMIT=$BUILD_COMMIT --target production -t humanconnection/nitro-web:latest $ROOT_DIR/webapp
|
||||
# docker build --build-arg BUILD_COMMIT=$BUILD_COMMIT -t humanconnection/neo4j:latest $ROOT_DIR/neo4j
|
||||
docker build -t humanconnection/maintenance:latest $ROOT_DIR/webapp/ -f $ROOT_DIR/webapp/Dockerfile.maintenance
|
||||
# docker build --build-arg BUILD_COMMIT=$BUILD_COMMIT --target production -t schoolsinmotion/backend:latest $ROOT_DIR/backend
|
||||
# docker build --build-arg BUILD_COMMIT=$BUILD_COMMIT --target production -t schoolsinmotion/webapp:latest $ROOT_DIR/webapp
|
||||
# docker build --build-arg BUILD_COMMIT=$BUILD_COMMIT -t schoolsinmotion/neo4j:latest $ROOT_DIR/neo4j
|
||||
docker build -t schoolsinmotion/maintenance:latest $ROOT_DIR/webapp/ -f $ROOT_DIR/webapp/Dockerfile.maintenance
|
||||
|
||||
echo "$DOCKER_PASSWORD" | docker login -u "$DOCKER_USERNAME" --password-stdin
|
||||
|
||||
for app in "${apps[@]}"
|
||||
do
|
||||
SOURCE="humanconnection/${app}:latest"
|
||||
SOURCE="schoolsinmotion/${app}:latest"
|
||||
echo "docker push $SOURCE"
|
||||
docker push $SOURCE
|
||||
|
||||
for tag in "${tags[@]}"
|
||||
do
|
||||
TARGET="humanconnection/${app}:${tag}"
|
||||
TARGET="schoolsinmotion/${app}:${tag}"
|
||||
if DOCKER_CLI_EXPERIMENTAL=enabled docker manifest inspect $TARGET >/dev/null; then
|
||||
echo "docker image ${TARGET} already present, skipping ..."
|
||||
else
|
||||
|
||||
@ -7,7 +7,7 @@ CMD ["yarn", "run", "start"]
|
||||
# Expose the app port
|
||||
ARG BUILD_COMMIT
|
||||
ENV BUILD_COMMIT=$BUILD_COMMIT
|
||||
ARG WORKDIR=/nitro-web
|
||||
ARG WORKDIR=/webapp
|
||||
RUN mkdir -p $WORKDIR
|
||||
WORKDIR $WORKDIR
|
||||
|
||||
@ -25,7 +25,7 @@ RUN NODE_ENV=production yarn run build
|
||||
|
||||
FROM base as production
|
||||
RUN yarn install --production=true --frozen-lockfile --non-interactive --no-cache
|
||||
COPY --from=build-and-test ./nitro-web/.nuxt ./.nuxt
|
||||
COPY --from=build-and-test ./nitro-web/static ./static
|
||||
COPY --from=build-and-test ./webapp/.nuxt ./.nuxt
|
||||
COPY --from=build-and-test ./webapp/static ./static
|
||||
COPY nuxt.config.js .
|
||||
COPY locales locales
|
||||
|
||||
@ -7,7 +7,7 @@ CMD ["yarn", "run", "start"]
|
||||
# Expose the app port
|
||||
ARG BUILD_COMMIT
|
||||
ENV BUILD_COMMIT=$BUILD_COMMIT
|
||||
ARG WORKDIR=/nitro-web
|
||||
ARG WORKDIR=/webapp
|
||||
RUN mkdir -p $WORKDIR
|
||||
WORKDIR $WORKDIR
|
||||
|
||||
@ -35,6 +35,6 @@ RUN yarn run generate
|
||||
|
||||
|
||||
FROM nginx:alpine
|
||||
COPY --from=build ./nitro-web/dist/ /usr/share/nginx/html/
|
||||
COPY --from=build ./webapp/dist/ /usr/share/nginx/html/
|
||||
RUN rm /etc/nginx/conf.d/default.conf
|
||||
COPY maintenance/nginx/custom.conf /etc/nginx/conf.d/
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user