diff --git a/.github/workflows/check-documentation.yml b/.github/workflows/check-documentation.yml
index 1a7d278e8..7729bc8c5 100644
--- a/.github/workflows/check-documentation.yml
+++ b/.github/workflows/check-documentation.yml
@@ -30,8 +30,8 @@ jobs:
- name: Checkout code
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.1.7
- - name: Remove old documentation files
- run: rm -rf ./deployment/src/old/ ./CHANGELOG.md # workaround until https://github.com/gaurav-nelson/github-action-markdown-link-check/pull/183 has been done
+ - name: Remove uncheckable documentation files
+ run: rm -rf ./CHANGELOG.md # workaround until https://github.com/gaurav-nelson/github-action-markdown-link-check/pull/183 has been done
- name: Check Markdown Links
uses: gaurav-nelson/github-action-markdown-link-check@1b916f2cf6c36510a6059943104e3c42ce6c16bc # 1.0.15
diff --git a/.github/workflows/docker-push.yml b/.github/workflows/docker-push.yml
new file mode 100644
index 000000000..93dc45bd3
--- /dev/null
+++ b/.github/workflows/docker-push.yml
@@ -0,0 +1,91 @@
+name: docker-push
+
+on: push
+
+jobs:
+ build-and-push-images:
+ strategy:
+ matrix:
+ app:
+ - name: neo4j
+ context: neo4j
+ file: neo4j/Dockerfile
+ target: community
+ - name: backend-base
+ context: backend
+ file: backend/Dockerfile
+ target: base
+ - name: backend-build
+ context: backend
+ file: backend/Dockerfile
+ target: build
+ - name: backend
+ context: backend
+ file: backend/Dockerfile
+ target: production
+ - name: webapp-base
+ context: webapp
+ file: webapp/Dockerfile
+ target: base
+ - name: webapp-build
+ context: webapp
+ file: webapp/Dockerfile
+ target: build
+ - name: webapp
+ context: webapp
+ file: webapp/Dockerfile
+ target: production
+ - name: maintenance-base
+ context: webapp
+ file: webapp/Dockerfile.maintenance
+ target: base
+ - name: maintenance-build
+ context: webapp
+ file: webapp/Dockerfile.maintenance
+ target: build
+ - name: maintenance
+ context: webapp
+ file: webapp/Dockerfile.maintenance
+ target: production
+ runs-on: ubuntu-latest
+ env:
+ REGISTRY: ghcr.io
+ IMAGE_NAME: ${{ github.repository }}/${{ matrix.app.name }}
+ permissions:
+ contents: read
+ packages: write
+ attestations: write
+ id-token: write
+
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4.1.7
+ - name: Log in to the Container registry
+ uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567
+ with:
+ registry: ${{ env.REGISTRY }}
+ username: ${{ github.actor }}
+ password: ${{ secrets.GITHUB_TOKEN }}
+ - name: Extract metadata (tags, labels) for Docker
+ id: meta
+ uses: docker/metadata-action@70b2cdc6480c1a8b86edf1777157f8f437de2166
+ with:
+ images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
+ tags: |
+ type=schedule
+ type=semver,pattern={{version}}
+ type=semver,pattern={{major}}.{{minor}}
+ type=semver,pattern={{major}}
+ type=ref,event=branch
+ type=ref,event=pr
+ type=sha
+ - name: Build and push Docker images
+ id: push
+ uses: docker/build-push-action@4f58ea79222b3b9dc2c8bbdd6debcef730109a75
+ with:
+ context: ${{ matrix.app.context }}
+ target: ${{ matrix.app.target }}
+ file: ${{ matrix.app.file }}
+ push: true
+ tags: ${{ steps.meta.outputs.tags }}
+ labels: ${{ steps.meta.outputs.labels }}
diff --git a/.github/workflows/test-backend.yml b/.github/workflows/test-backend.yml
index 49a3aa8a1..ba0bb3c74 100644
--- a/.github/workflows/test-backend.yml
+++ b/.github/workflows/test-backend.yml
@@ -112,7 +112,8 @@ jobs:
cp backend/.env.template backend/.env
- name: backend | docker compose
- run: docker compose -f docker-compose.yml -f docker-compose.test.yml up --detach --no-deps neo4j backend
+ # doesn't work without the --build flag - this either means we should not load the cached images or cache the correct image
+ run: docker compose -f docker-compose.yml -f docker-compose.test.yml up --detach --no-deps neo4j backend --build
- name: backend | Initialize Database
run: docker compose exec -T backend yarn db:migrate init
diff --git a/.github/workflows/test-e2e.yml b/.github/workflows/test-e2e.yml
index 3203411ff..fe8a514dc 100644
--- a/.github/workflows/test-e2e.yml
+++ b/.github/workflows/test-e2e.yml
@@ -77,7 +77,7 @@ jobs:
docker load < /tmp/images/neo4j.tar
docker load < /tmp/images/backend.tar
docker load < /tmp/images/webapp.tar
- docker compose -f docker-compose.yml -f docker-compose.test.yml up --detach --no-deps webapp neo4j backend
+ docker compose -f docker-compose.yml -f docker-compose.test.yml up --detach --no-deps webapp neo4j backend --build
sleep 90s
- name: Full stack tests | run tests
diff --git a/.github/workflows/test-webapp.yml b/.github/workflows/test-webapp.yml
index 2019d309e..21473f3fe 100644
--- a/.github/workflows/test-webapp.yml
+++ b/.github/workflows/test-webapp.yml
@@ -94,7 +94,8 @@ jobs:
cp backend/.env.template backend/.env
- name: backend | docker compose
- run: docker compose -f docker-compose.yml -f docker-compose.test.yml up --detach --no-deps webapp
+ # doesn't work without the --build flag - this either means we should not load the cached images or cache the correct image
+ run: docker compose -f docker-compose.yml -f docker-compose.test.yml up --detach --no-deps webapp --build
- name: webapp | Unit tests incl. coverage check
run: docker compose exec -T webapp yarn test
diff --git a/.tool-versions b/.tool-versions
new file mode 100644
index 000000000..4120b7f87
--- /dev/null
+++ b/.tool-versions
@@ -0,0 +1 @@
+nodejs 20.12.1
diff --git a/README.md b/README.md
index 321e8931d..b1fe0ea14 100644
--- a/README.md
+++ b/README.md
@@ -186,6 +186,9 @@ $ cp .env.template .env
# in folder backend/
$ cp .env.template .env
+
+# in folder frontend/
+$ cp .env.template .env
```
For Development:
diff --git a/backend/Dockerfile b/backend/Dockerfile
index a7931f31a..40b78225a 100644
--- a/backend/Dockerfile
+++ b/backend/Dockerfile
@@ -1,103 +1,42 @@
-##################################################################################
-# BASE (Is pushed to DockerHub for rebranding) ###################################
-##################################################################################
FROM node:20.12.1-alpine3.19 AS base
-
-# ENVs
-## DOCKER_WORKDIR would be a classical ARG, but that is not multi layer persistent - shame
-ENV DOCKER_WORKDIR="/app"
-## We Cannot do `$(date -u +'%Y-%m-%dT%H:%M:%SZ')` here so we use unix timestamp=0
-ARG BBUILD_DATE="1970-01-01T00:00:00.00Z"
-ENV BUILD_DATE=$BBUILD_DATE
-## We cannot do $(yarn run version)-${BUILD_NUMBER} here so we default to 0.0.0-0
-ARG BBUILD_VERSION="0.0.0-0"
-ENV BUILD_VERSION=$BBUILD_VERSION
-## We cannot do `$(git rev-parse --short HEAD)` here so we default to 0000000
-ARG BBUILD_COMMIT="0000000"
-ENV BUILD_COMMIT=$BBUILD_COMMIT
-## SET NODE_ENV
-ENV NODE_ENV="production"
-## App relevant Envs
-ENV PORT="4000"
-
-# Labels
-LABEL org.label-schema.build-date="${BUILD_DATE}"
LABEL org.label-schema.name="ocelot.social:backend"
LABEL org.label-schema.description="Backend of the Social Network Software ocelot.social"
LABEL org.label-schema.usage="https://github.com/Ocelot-Social-Community/Ocelot-Social/blob/master/README.md"
LABEL org.label-schema.url="https://ocelot.social"
LABEL org.label-schema.vcs-url="https://github.com/Ocelot-Social-Community/Ocelot-Social/tree/master/backend"
-LABEL org.label-schema.vcs-ref="${BUILD_COMMIT}"
LABEL org.label-schema.vendor="ocelot.social Community"
-LABEL org.label-schema.version="${BUILD_VERSION}"
LABEL org.label-schema.schema-version="1.0"
LABEL maintainer="devops@ocelot.social"
-
-# Install Additional Software
-## install: git
-RUN apk --no-cache add git python3 make g++
-
-# Settings
-## Expose Container Port
+ENV NODE_ENV="production"
+ENV PORT="4000"
EXPOSE ${PORT}
+RUN apk --no-cache add git python3 make g++ bash
+RUN mkdir -p /app
+WORKDIR /app
+CMD ["/bin/bash", "-c", "yarn run start"]
-## Workdir
-RUN mkdir -p ${DOCKER_WORKDIR}
-WORKDIR ${DOCKER_WORKDIR}
-
-##################################################################################
-# DEVELOPMENT (Connected to the local environment, to reload on demand) ##########
-##################################################################################
FROM base AS development
+CMD ["/bin/sh", "-c", "yarn install && yarn run dev"]
-# We don't need to copy or build anything since we gonna bind to the
-# local filesystem which will need a rebuild anyway
-
-# Run command
-# (for development we need to execute yarn install since the
-# node_modules are on another volume and need updating)
-CMD /bin/sh -c "yarn install && yarn run dev"
-
-##################################################################################
-# CODE (Does contain all code files and is pushed to DockerHub for rebranding) ###
-##################################################################################
-FROM base AS code
-
-# copy everything, but do not build.
+FROM base AS build
COPY . .
+ONBUILD COPY ./branding/constants/ src/config/tmp
+ONBUILD RUN tools/replace-constants.sh
+ONBUILD COPY ./branding/email/ src/middleware/helpers/email/
+ONBUILD RUN yarn install --production=false --frozen-lockfile --non-interactive
+ONBUILD RUN yarn run build
+ONBUILD RUN mkdir /build
+ONBUILD RUN cp -r ./build /build
+ONBUILD RUN cp -r ./public /build/build
+ONBUILD RUN cp -r ./package.json yarn.lock /build
+ONBUILD RUN cd /build && yarn install --production=true --frozen-lockfile --non-interactive
-##################################################################################
-# BUILD (Does contain all files and the compilate and is therefore bloated) ######
-##################################################################################
-FROM code AS build
-
-# yarn install
-RUN yarn install --production=false --frozen-lockfile --non-interactive
-# yarn build
-RUN /bin/sh -c "yarn run build"
-
-##################################################################################
-# TEST ###########################################################################
-##################################################################################
FROM build AS test
+# required for the migrations
+# ONBUILD RUN cp -r ./src /src
+CMD ["/bin/bash", "-c", "yarn run dev"]
-# Run command
-CMD /bin/sh -c "yarn run dev"
+FROM build AS production_build
-##################################################################################
-# PRODUCTION (Does contain only "binary"- and static-files to reduce image size) #
-##################################################################################
FROM base AS production
-
-# Copy "binary"-files from build image
-COPY --from=build ${DOCKER_WORKDIR}/build ./build
-COPY --from=build ${DOCKER_WORKDIR}/node_modules ./node_modules
-# Copy static files
-# TODO - externalize the uploads so we can copy the whole folder
-COPY --from=build ${DOCKER_WORKDIR}/public/img/ ./public/img/
-COPY --from=build ${DOCKER_WORKDIR}/public/providers.json ./public/providers.json
-# Copy package.json for script definitions (lock file should not be needed)
-COPY --from=build ${DOCKER_WORKDIR}/package.json ./package.json
-
-# Run command
-CMD /bin/sh -c "yarn run start"
+COPY --from=production_build /build .
diff --git a/deployment/src/kubernetes/charts/.gitkeep b/backend/branding/constants/.gitkeep
similarity index 100%
rename from deployment/src/kubernetes/charts/.gitkeep
rename to backend/branding/constants/.gitkeep
diff --git a/deployment/src/kubernetes/crds/.gitkeep b/backend/branding/email/.gitkeep
similarity index 100%
rename from deployment/src/kubernetes/crds/.gitkeep
rename to backend/branding/email/.gitkeep
diff --git a/backend/src/schema/resolvers/filter-posts.spec.ts b/backend/src/schema/resolvers/filter-posts.spec.ts
index 41fbd0ea7..95a072d8a 100644
--- a/backend/src/schema/resolvers/filter-posts.spec.ts
+++ b/backend/src/schema/resolvers/filter-posts.spec.ts
@@ -179,7 +179,9 @@ describe('Filter Posts', () => {
})
})
- describe('order events by event start ascending', () => {
+ // Does not work on months end
+ // eslint-disable-next-line jest/no-disabled-tests
+ describe.skip('order events by event start ascending', () => {
it('finds the events ordered accordingly', async () => {
const {
data: { Post: result },
@@ -201,7 +203,9 @@ describe('Filter Posts', () => {
})
})
- describe('filter events by event start date', () => {
+ // Does not work on months end
+ // eslint-disable-next-line jest/no-disabled-tests
+ describe.skip('filter events by event start date', () => {
it('finds only events after given date', async () => {
const {
data: { Post: result },
diff --git a/backend/tools/replace-constants.sh b/backend/tools/replace-constants.sh
new file mode 100755
index 000000000..e7cee6ee3
--- /dev/null
+++ b/backend/tools/replace-constants.sh
@@ -0,0 +1,7 @@
+#!/bin/bash
+
+# TODO: this is a hack, we should find a better way to share files between backend and webapp
+[ -f src/config/tmp/emails.js ] && mv src/config/tmp/emails.js src/config/emails.ts
+[ -f src/config/tmp/logos.js ] && mv src/config/tmp/logos.js src/config/logos.ts
+[ -f src/config/tmp/metadata.js ] && mv src/config/tmp/metadata.js src/config/metadata.ts
+exit 0
diff --git a/deployment/.env.dist b/deployment/.env.dist
deleted file mode 100644
index 14d793e06..000000000
--- a/deployment/.env.dist
+++ /dev/null
@@ -1,7 +0,0 @@
-# branding folder used for "docker compose up" run in deployment folder
-CONFIGURATION=stage.ocelot.social
-
-# used in "scripts/clusters.backup-multiple-servers.sh"
-BACKUP_CONFIGURATIONS="stage.ocelot.social stage.wir.social"
-# if '<= 0' no backups will be deleted
-BACKUP_SAVED_BACKUPS_NUMBER=7
\ No newline at end of file
diff --git a/deployment/DOCKER_MORE_CLOSELY.md b/deployment/DOCKER_MORE_CLOSELY.md
deleted file mode 100644
index 4504bbef7..000000000
--- a/deployment/DOCKER_MORE_CLOSELY.md
+++ /dev/null
@@ -1,27 +0,0 @@
-# Docker
-
-## Apple M1 Platform
-
-***Attention:** For using Docker commands in Apple M1 environments!*
-
-```bash
-# set env variable for your shell
-$ export DOCKER_DEFAULT_PLATFORM=linux/amd64
-```
-
-### Docker Compose Override File For Apple M1 Platform
-
-For Docker compose `up` or `build` commands, you can use our Apple M1 override file that specifies the M1 platform:
-
-```bash
-# in main folder
-
-# for production
-$ docker compose -f docker-compose.yml -f docker-compose.apple-m1.override.yml up
-
-# for production testing Docker images from DockerHub
-$ docker compose -f docker-compose.ocelotsocial-branded.yml -f docker-compose.apple-m1.override.yml up
-
-# only once: init admin user and create indexes and constraints in Neo4j database
-$ docker compose exec backend /bin/sh -c "yarn prod:migrate init"
-```
diff --git a/deployment/Minikube.md b/deployment/Minikube.md
deleted file mode 100644
index a552e670f..000000000
--- a/deployment/Minikube.md
+++ /dev/null
@@ -1,25 +0,0 @@
-# Minikube
-
-There are many Kubernetes providers, but if you're just getting started, Minikube is a tool that you can use to get your feet wet.
-
-After you [installed Minikube](https://kubernetes.io/docs/tasks/tools/install-minikube/)
-open your minikube dashboard:
-
-```text
-$ minikube dashboard
-```
-
-This will give you an overview. Some of the steps below need some timing to make resources available to other dependent deployments. Keeping an eye on the dashboard is a great way to check that.
-
-Follow the installation instruction for [Kubernetes with Helm](./src/kubernetes/README.md).
-
-If all the pods and services have settled and everything looks green in your
-minikube dashboard, expose the services you want on your host system.
-
-For example:
-
-```text
-$ minikube service webapp --namespace=ocelotsocialnetwork
-# optionally
-$ minikube service backend --namespace=ocelotsocialnetwork
-```
diff --git a/deployment/README.md b/deployment/README.md
deleted file mode 100644
index 2732919c9..000000000
--- a/deployment/README.md
+++ /dev/null
@@ -1,138 +0,0 @@
-# Ocelot.Social Deploy And Rebranding
-
-[](https://github.com/Ocelot-Social-Community/Ocelot-Social-Deploy-Rebranding/actions)
-[](https://github.com/Ocelot-Social-Community/Ocelot-Social-Deploy-Rebranding/blob/master/LICENSE.md)
-[](https://discord.gg/AJSX9DCSUA)
-[](https://www.codetriage.com/ocelot-social-community/ocelot-social-deploy-rebranding)
-
-This repository is an in use template to rebrand, configure, and deploy [ocelot.social](https://github.com/Ocelot-Social-Community/Ocelot-Social) networks.
-The forked original repository is [stage.ocelot.social](https://github.com/Ocelot-Social-Community/stage.ocelot.social).
-
-
-
-
-
-
-
-## Live demo
-
-__Try out our deployed [development environment](https://stage.ocelot.social).__
-
-Visit our staging networks:
-
-- central staging network: [stage.ocelot.social](https://stage.ocelot.social)
-
-
-Logins:
-
-| email | password | role |
-| :--- | :--- | :--- |
-| `user@example.org` | 1234 | user |
-| `moderator@example.org` | 1234 | moderator |
-| `admin@example.org` | 1234 | admin |
-
-## Usage
-
-Fork this repository to configure and rebrand it for your own [ocelot.social](https://github.com/Ocelot-Social-Community/Ocelot-Social) network.
-
-### Package.Json And DockerHub Organisation
-
-Write your own data into the main configuration file:
-
-- [package.json](https://github.com/Ocelot-Social-Community/Ocelot-Social/blob/master/package.json)
-
-Since all deployment methods described here depend on [Docker](https://docker.com) and [DockerHub](https://hub.docker.com), you need to create your own organisation on DockerHub and put its name in the [package.json](https://github.com/Ocelot-Social-Community/Ocelot-Social/blob/master/package.json) file as your `dockerOrganisation`.
-
-### Configure And Branding
-
-The next step is:
-
-- [Set Environment Variables and Configurations](./deployment-values.md)
-
-- [Configure And Branding](./configurations/stage.ocelot.social/branding/README.md)
-
-
-### Optional: Locally Testing Configuration And Branding
-
-Just in case you have Docker installed and run the following, you can check your branding locally:
-
-```bash
-# in main folder
-$ docker-compose up
-# fill the database with an initial admin
-$ docker-compose exec backend yarn run prod:migrate init
-```
-
-The database is then initialised with the default administrator:
-
-- E-mail: admin@example.org
-- Password: 1234
-
-For login or registration have a look in your browser at `http://localhost:3000/`.
-For the maintenance page have a look in your browser at `http://localhost:5000/`.
-
-### Push Changes To GitHub
-
-Before merging these changes into the "master" branch on your GitHub fork repository, you need to configure the GitHub repository secrets. This is necessary to [publish](https://github.com/Ocelot-Social-Community/Ocelot-Social/blob/master/.github/workflows/publish.yml) the Docker images by pushing them via GitHub actions to repositories belonging to your DockerHub organisation.
-
-First, go to your DockerHub profile under `Account Settings` and click on the `Security` tab. There you create an access token called `-access-token` and copy the token to a safe place.
-
-Secondly, in your GitHub repository, click on the 'Settings' tab and go to the 'Secrets' tab. There you create two secrets by clicking on `New repository secret`:
-
-1. Named `DOCKERHUB_TOKEN` with the newly created DockerHub token (only the code, not the token name).
-2. Named `DOCKERHUB_USERNAME` with your DockerHub username.
-
-### Optional: Locally Testing Your DockerHub Images
-
-Just in case you like to check your pushed Docker images in your organisation's DockerHub repositories locally:
-
-- rename the file `docker-compose.ocelotsocial-branded.yml` with your network name
-- in the file, rename the ocelot.social DockerHub organisation `ocelotsocialnetwork` to your organisations name
-
-Remove any local Docker images if necessary and do the following:
-
-```bash
-# in main folder
-$ docker-compose -f docker-compose.-branded.yml up
-# fill the database with an initial admin
-$ docker-compose exec backend yarn run prod:migrate init
-```
-
-See the login details and browser addresses above.
-
-### Deployment
-
-Afterwards you can [deploy](./deployment.md) it on your server:
-
-- [Kubernetes with Helm](./src/kubernetes/README.md)
-
-## Developer Chat
-
-Join our friendly open-source community on [Discord](https://discord.gg/AJSX9DCSUA) :heart_eyes_cat:
-Just introduce yourself at `#introduce-yourself` and mention `@@Mentor` to get you onboard :neckbeard:
-Check out the [contribution guideline](https://github.com/Ocelot-Social-Community/Ocelot-Social/blob/master/CONTRIBUTING.md), too!
-
-We give write permissions to every developer who asks for it. Just text us on
-[Discord](https://discord.gg/AJSX9DCSUA).
-
-## Technology Stack
-
-- [Docker](https://www.docker.com)
-- [Kubernetes](https://kubernetes.io)
-- [Helm](https://helm.sh)
-
-
-
-## License
-
-See the [LICENSE](https://github.com/Ocelot-Social-Community/Ocelot-Social/blob/master/LICENSE.md) file for license rights and limitations (MIT).
-
-We need `DOCKER_BUILDKIT=0` for this to work.
diff --git a/deployment/configurations/.gitignore b/deployment/configurations/.gitignore
deleted file mode 100644
index 5a7d01850..000000000
--- a/deployment/configurations/.gitignore
+++ /dev/null
@@ -1,3 +0,0 @@
-/*
-!/example
-!.gitignore
\ No newline at end of file
diff --git a/deployment/configurations/stage.ocelot.social b/deployment/configurations/stage.ocelot.social
deleted file mode 160000
index fdc2e52fa..000000000
--- a/deployment/configurations/stage.ocelot.social
+++ /dev/null
@@ -1 +0,0 @@
-Subproject commit fdc2e52fa444b300e1c4736600bc0e9ae3314222
diff --git a/deployment/deployment-values.md b/deployment/deployment-values.md
deleted file mode 100644
index d1eae1822..000000000
--- a/deployment/deployment-values.md
+++ /dev/null
@@ -1,73 +0,0 @@
-# Deployment Values
-
-For each deployment, you need to set the environment variables and configurations.
-Here is some specific information on how to set the values.
-
-## Webapp
-
-We have several configuration possibilities just in the frontend.
-
-### Date Time
-
-In file `branding/constants/dateTime.js`.
-
-- `RELATIVE_DATETIME`
- - `true` (default) or `false`
-- `ABSOLUT_DATETIME_FORMAT`
- - definition see [date-fns, format](https://date-fns.org/v3.3.1/docs/format):
- - `P`: just localized date
- - `Pp`: just localized date and time
-
-## E-Mails
-
-You need to set environment variables to send registration and invitation information or notifications to users, for example.
-
-### SPF and DKIM
-
-More and more e-mail providers require settings for authorization and verification of e-mail senders.
-
-### SPF
-
-Sometimes it is enough to create an SPF record in your DNS.
-
-### DKIM
-
-However, if you need DKIM authorization and verification, you must set the appropriate environment variables in: `.env`, `docker-compose.yml` or Helm script `values.yaml`:
-
-```bash
-SMTP_DKIM_DOMAINNAME=
-SMTP_DKIM_KEYSELECTOR=2017
-SMTP_DKIM_PRIVATKEY="-----BEGIN RSA PRIVATE KEY-----\n\n-----END RSA PRIVATE KEY-----\n"
-```
-
-You can find out how DKIM works here:
-
-
-
-To create the private and public DKIM key, see here:
-
-
-
-Information about the required PEM format can be found here:
-
-
-
-## Neo4j Database
-
-We have several configuration options for our Neo4j database.
-
-### DBMS_DEFAULT_DATABASE – Default Database Name to be Used
-
-If you need to set the default database name in Neo4j to be used for all operations and terminal commands like our backup scripts, you must set the appropriate environment variable in: `.env`, `docker-compose.yml` or Helm script `values.yaml`:
-
-```yaml
-DBMS_DEFAULT_DATABASE: "graph.db"
-```
-
-The default value is `neo4j` if it is not set.
-
-As example see files:
-
-- `neo4j/.env.template`
-- `deployment/docker-compose.yml`
-- `deployment/configurations/stage.ocelot.social/kubernetes/values.yaml.template`
diff --git a/deployment/deployment.md b/deployment/deployment.md
deleted file mode 100644
index 52495ec44..000000000
--- a/deployment/deployment.md
+++ /dev/null
@@ -1,148 +0,0 @@
-# Deployment
-
-Before you start the deployment you have to do preparations.
-
-## Deployment Preparations
-
-Since all deployment methods described here depend on [Docker](https://docker.com) and [DockerHub](https://hub.docker.com), you need to create your own organisation on DockerHub and put its name in the [package.json](https://github.com/Ocelot-Social-Community/Ocelot-Social/blob/master/package.json) file as your `dockerOrganisation`.
-Read more details in the [main README](https://github.com/Ocelot-Social-Community/Ocelot-Social/blob/master/README.md) under [Usage](https://github.com/Ocelot-Social-Community/Ocelot-Social/blob/master/README.md#usage).
-
-## Deployment Methods
-
-You have the following options for a deployment:
-
-- [Kubernetes with Helm](./src/kubernetes/README.md)
-
-## After Deployment
-
-After the first deployment of the new network on your server, the database is initialized with the default administrator:
-
-- E-mail: `admin@example.org`
-- Password: `1234`
-
-***ATTENTION:*** When you are logged in for the first time, please change your (the admin's) e-mail to an existing one and change your password to a secure one !!!
-
-## Using the Scripts
-
-To use most of the scripts you have to set the variable `CONFIGURATION` in your terminal by entering:
-
-```bash
-# in deployment folder
-
-# set configuration name to folder name in 'configurations' folder (network name)
-$ export CONFIGURATION=
-# to check this
-$ echo $CONFIGURATION
-```
-
-### Secrets Encrypt/Decrypt
-
-To encrypt and decrypt the secrets of your network in your terminal set a correct password in a (new) file `configurations//SECRET`.
-If done please enter:
-
-```bash
-# in deployment folder
-
-# encrypt secrets
-$ scripts/secrets.encrypt.sh
-
-# decrypt secrets
-$ scripts/secrets.decrypt.sh
-```
-
-### Maintenance Mode On/Off
-
-Activate or deactivate maintenance mode in your terminal:
-
-```bash
-# in deployment folder
-
-# activate maintenance mode
-$ scripts/cluster.maintenance.sh on
-
-# deactivate maintenance mode
-$ scripts/cluster.maintenance.sh off
-```
-
-### Backup Scripts
-
-Save backups.
-
-#### Single Backup
-
-To save a local backup of the database and uploaded images:
-
-```bash
-# in deployment folder
-
-# save backup
-$ scripts/cluster.backup.sh
-```
-
-The backup will be saved into your network folders `backup` folder in a new folder with the date and time.
-
-##### Default Database Name
-
-To execute this script, it may be necessary to set the default database name in Neo4j.
-
-In our deployments there are cases where the database is called `neo4j` (used by default) and in other cases `graph.db` (accidentally happened when we loaded the database into a new cluster).
-
-In the new deployment with Helm, we set the default database name by the environment variable `NEO4J_dbms_default__database` in the Helm `values.yaml`.
-See [Docker-specific configuration settings](https://neo4j.com/docs/operations-manual/4.4/docker/ref-settings/)
-
-For more information see [Database Management Commands](../neo4j/README.md#database-management-commands).
-
-#### Multiple Networks Backup
-
-In order to save several network backups locally, you must define the configuration names of all networks in `.env`. The template for this is `deployment/.env.dist`:
-
-```bash
-# in the deployment folders '.env' set as example
-BACKUP_CONFIGURATIONS="stage.ocelot.social stage.wir.social"
-BACKUP_SAVED_BACKUPS_NUMBER=7
-```
-
-If `BACKUP_SAVED_BACKUPS_NUMBER <= 0` then no backups will be deleted.
-
-To actually save all the backups run:
-
-```bash
-# in deployment folder
-
-# save all backups listed in 'BACKUP_CONFIGURATIONS'
-# delete all backups older then the 'BACKUP_SAVED_BACKUPS_NUMBER' newest ones
-$ scripts/clusters.backup-multiple-servers.sh
-```
-
-The backups will be saved into your networks folders `backup` folder in a new folder with the date and time.
-
-#### Automated Backups
-
-⚠️ *Attention: Please check carefully whether really the oldest backups have been deleted. As shells on different systems behave differently with regard to the commands used in this script.*
-
-Install automated backups by a [cron job](https://en.wikipedia.org/wiki/Cron).
-Be aware of having the bash shell installed to run the script.
-The environment variables for the automated backups are described above.
-
-Installing a cron job by editing the cron table file:
-
-```bash
-# edit cron job table
-$ crontab -e
-```
-
-In the editor add the line:
-
-```bash
-# in cron job table file
-
-# set a cron job every night at 04am server time
-# min hour day month weekday command
-00 04 * * * /root/Ocelot-Social/deployment/scripts/clusters.backup-multiple-servers.sh >> /root/Ocelot-Social/deployment/backup-cron-job.log
-```
-
-This way the terminal output is written into a log file named `backup-cron-job.log` located in the deployment folder.
-
-Be aware that the server datetime can differ from your local time.
-Especially by the change between summer and winter time, because servers usually have UTC.
-Find out the actual difference by running the command `date` on your server.
diff --git a/deployment/docker-compose.ocelotsocial-branded.yml b/deployment/docker-compose.ocelotsocial-branded.yml
deleted file mode 100644
index 2899168d3..000000000
--- a/deployment/docker-compose.ocelotsocial-branded.yml
+++ /dev/null
@@ -1,100 +0,0 @@
-services:
-
- ########################################################
- # WEBAPP ###############################################
- ########################################################
- webapp:
- # name the image to match our image to be tested from our DockerHub repository so that it can be pulled from there, otherwise it will be created locally from the 'dockerfile'
- image: ocelotsocialnetwork/webapp-branded:latest
- ports:
- - 3000:3000
- networks:
- - test-network
- depends_on:
- - backend
- environment:
- - HOST=0.0.0.0
- - GRAPHQL_URI=http://backend:4000
- - MAPBOX_TOKEN="pk.eyJ1IjoiYnVzZmFrdG9yIiwiYSI6ImNraDNiM3JxcDBhaWQydG1uczhpZWtpOW4ifQ.7TNRTO-o9aK1Y6MyW_Nd4g"
- # - WEBSOCKETS_URI=ws://backend:4000/graphql # is not working and not given in Docker YAML in main repo
- - PUBLIC_REGISTRATION=true
- - INVITE_REGISTRATION=true
- - CATEGORIES_ACTIVE=true
-
- ########################################################
- # BACKEND ##############################################
- ########################################################
- backend:
- # name the image to match our image to be tested from our DockerHub repository so that it can be pulled from there, otherwise it will be created locally from the 'dockerfile'
- image: ocelotsocialnetwork/backend-branded:latest
- networks:
- - test-network
- depends_on:
- - neo4j
- ports:
- - 4000:4000
- volumes:
- - backend_uploads:/app/public/uploads
- environment:
- - NEO4J_URI=bolt://neo4j:7687
- - GRAPHQL_URI=http://backend:4000
- - CLIENT_URI=http://localhost:3000
- - JWT_SECRET=b/&&7b78BF&fv/Vd
- - MAPBOX_TOKEN=pk.eyJ1IjoiYnVzZmFrdG9yIiwiYSI6ImNraDNiM3JxcDBhaWQydG1uczhpZWtpOW4ifQ.7TNRTO-o9aK1Y6MyW_Nd4g
- - PRIVATE_KEY_PASSPHRASE=a7dsf78sadg87ad87sfagsadg78
- - EMAIL_SUPPORT=support@wir.social
- - EMAIL_DEFAULT_SENDER=info@wir.social
- # - PRODUCTION_DB_CLEAN_ALLOW=false # only true for production environments on staging servers
- - PUBLIC_REGISTRATION=true
- - INVITE_REGISTRATION=true
- - CATEGORIES_ACTIVE=true
- - SMTP_USERNAME=${SMTP_USERNAME}
- - SMTP_PASSWORD=${SMTP_PASSWORD}
- - SMTP_HOST=mailserver
- - SMTP_PORT=25
- - SMTP_IGNORE_TLS=true
-
- ########################################################
- # MAINTENANCE ##########################################
- ########################################################
- maintenance:
- # name the image to match our image to be tested from our DockerHub repository so that it can be pulled from there, otherwise it will be created locally from the 'dockerfile'
- image: ocelotsocialnetwork/maintenance-branded:latest
- networks:
- - test-network
- ports:
- - 3001:80
-
- ########################################################
- # NEO4J ################################################
- ########################################################
- neo4j:
- # name the image to match our image to be tested from our DockerHub repository so that it can be pulled from there, otherwise it will be created locally from the 'dockerfile'
- image: ocelotsocialnetwork/neo4j-community-branded:latest
- networks:
- - test-network
- environment:
- - NEO4J_AUTH=none
- - NEO4J_dbms_security_procedures_unrestricted=algo.*,apoc.*
- - NEO4J_ACCEPT_LICENSE_AGREEMENT=yes
- ports:
- - 7687:7687
- volumes:
- - neo4j_data:/data
-
- ########################################################
- # MAILSERVER TO FAKE SMTP ##############################
- ########################################################
- mailserver:
- image: djfarrelly/maildev
- ports:
- - 1080:80
- networks:
- - test-network
-
-networks:
- test-network:
-
-volumes:
- backend_uploads:
- neo4j_data:
diff --git a/deployment/docker-compose.yml b/deployment/docker-compose.yml
deleted file mode 100644
index 42919177a..000000000
--- a/deployment/docker-compose.yml
+++ /dev/null
@@ -1,190 +0,0 @@
-services:
-
- webapp-base:
- image: ocelotsocialnetwork/webapp:local-base
- build:
- dockerfile: ../webapp/Dockerfile
- context: ../webapp
- target: base
- command: sleep 0
-
- webapp-code:
- image: ocelotsocialnetwork/webapp:local-code
- build:
- dockerfile: ../webapp/Dockerfile
- context: ../webapp
- target: code
- command: sleep 0
-
- webapp:
- image: ocelotsocialnetwork/webapp-branded:local-${CONFIGURATION}
- container_name: webapp-branded
- build:
- dockerfile: src/docker/webapp.Dockerfile
- target: branded
- context: .
- args:
- - CONFIGURATION=$CONFIGURATION
- - APP_IMAGE_TAG_BASE=local-base
- - APP_IMAGE_TAG_CODE=local-code
- ports:
- - 3000:3000
- networks:
- - test-network
- depends_on:
- - backend
- - webapp-base
- - webapp-code
- env_file:
- - .env
- environment:
- - HOST=0.0.0.0
- - GRAPHQL_URI=http://backend:4000
- - MAPBOX_TOKEN="pk.eyJ1IjoiYnVzZmFrdG9yIiwiYSI6ImNraDNiM3JxcDBhaWQydG1uczhpZWtpOW4ifQ.7TNRTO-o9aK1Y6MyW_Nd4g"
- # - WEBSOCKETS_URI=ws://backend:4000/graphql # is not working and not given in Docker YAML in main repo
- - PUBLIC_REGISTRATION=true
- - INVITE_REGISTRATION=true
- - CATEGORIES_ACTIVE=true
-
- backend-base:
- image: ocelotsocialnetwork/backend:local-base
- build:
- dockerfile: ../backend/Dockerfile
- context: ../backend
- target: base
- command: sleep 0
-
- backend-code:
- image: ocelotsocialnetwork/backend:local-code
- build:
- dockerfile: ../backend/Dockerfile
- context: ../backend
- target: code
- command: sleep 0
-
- backend:
- image: ocelotsocialnetwork/backend-branded:local-${CONFIGURATION}
- container_name: backend-branded
- build:
- dockerfile: src/docker/backend.Dockerfile
- target: branded
- context: .
- args:
- - CONFIGURATION=$CONFIGURATION
- - APP_IMAGE_TAG_BASE=local-base
- - APP_IMAGE_TAG_CODE=local-code
- networks:
- - test-network
- depends_on:
- - neo4j
- - backend-base
- - backend-code
- ports:
- - 4000:4000
- volumes:
- - backend_uploads:/app/public/uploads
- environment:
- - NEO4J_URI=bolt://neo4j:7687
- - GRAPHQL_URI=http://backend:4000
- - CLIENT_URI=http://localhost:3000
- - JWT_SECRET=b/&&7b78BF&fv/Vd
- - MAPBOX_TOKEN=pk.eyJ1IjoiYnVzZmFrdG9yIiwiYSI6ImNraDNiM3JxcDBhaWQydG1uczhpZWtpOW4ifQ.7TNRTO-o9aK1Y6MyW_Nd4g
- - PRIVATE_KEY_PASSPHRASE=a7dsf78sadg87ad87sfagsadg78
- - EMAIL_SUPPORT=support@wir.social
- - EMAIL_DEFAULT_SENDER=info@wir.social
- - PUBLIC_REGISTRATION=true
- - INVITE_REGISTRATION=true
- - CATEGORIES_ACTIVE=true
- - SMTP_USERNAME=${SMTP_USERNAME}
- - SMTP_PASSWORD=${SMTP_PASSWORD}
- - SMTP_HOST=mailserver
- - SMTP_PORT=25
- - SMTP_IGNORE_TLS=true
- #- PRODUCTION_DB_CLEAN_ALLOW=true
- - NODE_ENV=development
-
- maintenance-base:
- image: ocelotsocialnetwork/maintenance:local-base
- build:
- dockerfile: ../webapp/Dockerfile.maintenance
- context: ../webapp
- target: base
- command: sleep 0
-
- maintenance-code:
- image: ocelotsocialnetwork/maintenance:local-code
- build:
- dockerfile: ../webapp/Dockerfile.maintenance
- context: ../webapp
- target: code
- command: sleep 0
-
- maintenance:
- # name the image so that it cannot be found in a DockerHub repository, otherwise it will not be built locally from the 'dockerfile' but pulled from there
- image: ocelotsocialnetwork/maintenance-branded:local-${CONFIGURATION}
- container_name: maintenance-branded
- build:
- # TODO: Separate from webapp, this must be independent
- dockerfile: src/docker/maintenance.Dockerfile
- target: branded
- context: .
- args:
- - CONFIGURATION=$CONFIGURATION
- - APP_IMAGE_TAG_BASE=local-base
- - APP_IMAGE_TAG_CODE=local-code
- networks:
- - test-network
- depends_on:
- - maintenance-base
- - maintenance-code
- ports:
- - 3001:80
-
- neo4j:
- # Neo4j v3.5.14-community
- # image: wollehuss/neo4j-community-branded:latest
- # Neo4j 4.4-community
- image: ocelotsocialnetwork/neo4j-community:latest
- container_name: neo4j-branded
- networks:
- - test-network
- ports:
- - 7687:7687
- # only for development
- # - 7474:7474
- - 7474:7474
- volumes:
- - neo4j_data:/data
- environment:
- # settings reference: https://neo4j.com/docs/operations-manual/4.4/docker/ref-settings/
- # TODO: This sounds scary for a production environment
- - NEO4J_AUTH=none
- - NEO4J_dbms_security_procedures_unrestricted=algo.*,apoc.*
- - NEO4J_dbms_allow__format__migration=true
- - NEO4J_dbms_allow__upgrade=true
- # TODO: clarify if that is the only thing needed to unlock the Enterprise version
- # - NEO4J_ACCEPT_LICENSE_AGREEMENT=yes
- # Uncomment following line for Neo4j Enterprise version instead of Community version
- # TODO: clarify if that is the only thing needed to unlock the Enterprise version
- # - NEO4J_ACCEPT_LICENSE_AGREEMENT=yes
- # set the name of the database to be used
- # - NEO4J_dbms_default__database=graph.db
- # - NEO4J_dbms_default__database=neo4j
- # TODO: Remove the playground from production
- # bring the database in offline mode to export or load dumps
- # command: ["tail", "-f", "/dev/null"]
-
- mailserver:
- image: djfarrelly/maildev
- container_name: mailserver-branded
- ports:
- - 1080:80
- networks:
- - test-network
-
-networks:
- test-network:
-
-volumes:
- backend_uploads:
- neo4j_data:
diff --git a/deployment/helm/charts/.helmignore b/deployment/helm/charts/.helmignore
new file mode 100644
index 000000000..0e8a0eb36
--- /dev/null
+++ b/deployment/helm/charts/.helmignore
@@ -0,0 +1,23 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*.orig
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/deployment/helm/charts/ocelot-neo4j/Chart.yaml b/deployment/helm/charts/ocelot-neo4j/Chart.yaml
new file mode 100644
index 000000000..f8a6c88f1
--- /dev/null
+++ b/deployment/helm/charts/ocelot-neo4j/Chart.yaml
@@ -0,0 +1,24 @@
+apiVersion: v2
+name: ocelot-neo4j
+description: A Helm chart for the neo4j database of ocelot-social
+
+# A chart can be either an 'application' or a 'library' chart.
+#
+# Application charts are a collection of templates that can be packaged into versioned archives
+# to be deployed.
+#
+# Library charts provide useful utilities or functions for the chart developer. They're included as
+# a dependency of application charts to inject those utilities and functions into the rendering
+# pipeline. Library charts do not define any templates and therefore cannot be deployed.
+type: application
+
+# This is the chart version. This version number should be incremented each time you make changes
+# to the chart and its templates, including the app version.
+# Versions are expected to follow Semantic Versioning (https://semver.org/)
+version: 0.1.0
+
+# This is the version number of the application being deployed. This version number should be
+# incremented each time you make changes to the application. Versions are not expected to
+# follow Semantic Versioning. They should reflect the version the application is using.
+# It is recommended to use it with quotes.
+appVersion: "3.2.0"
diff --git a/deployment/helm/charts/ocelot-neo4j/old/job.yaml b/deployment/helm/charts/ocelot-neo4j/old/job.yaml
new file mode 100644
index 000000000..b9051206c
--- /dev/null
+++ b/deployment/helm/charts/ocelot-neo4j/old/job.yaml
@@ -0,0 +1,34 @@
+apiVersion: batch/v1
+kind: Job
+metadata:
+ name: {{ .Release.Name }}-neo4j-backup
+spec:
+ template:
+ spec:
+ restartPolicy: OnFailure
+ containers:
+ - name: container-{{ .Release.Name }}-neo4j-backup
+ image: "{{ .Values.neo4j.image.repository }}:{{ default .Values.global.image.tag .Values.neo4j.image.tag .Chart.AppVersion "latest" }}"
+ imagePullPolicy: {{ quote .Values.global.image.pullPolicy }}
+ command:
+ - neo4j-admin
+ - dump
+ - --to
+ - "/backups/neo4j-dump-{{ now | date "20060102150405" }}"
+ envFrom:
+ - configMapRef:
+ name: {{ .Release.Name }}-neo4j-env
+ - secretRef:
+ name: {{ .Release.Name }}-neo4j-secret-env
+ volumeMounts:
+ - mountPath: /data/
+ name: neo4j-data
+ - mountPath: /backups/
+ name: neo4j-backups
+ volumes:
+ - name: neo4j-data
+ persistentVolumeClaim:
+ claimName: {{ .Release.Name }}-neo4j-data
+ - name: neo4j-backups
+ persistentVolumeClaim:
+ claimName: {{ .Release.Name }}-neo4j-backups
diff --git a/deployment/helm/charts/ocelot-neo4j/templates/_helpers.tpl b/deployment/helm/charts/ocelot-neo4j/templates/_helpers.tpl
new file mode 100644
index 000000000..d4d9b841f
--- /dev/null
+++ b/deployment/helm/charts/ocelot-neo4j/templates/_helpers.tpl
@@ -0,0 +1,10 @@
+{{- define "defaultTag" -}}
+{{- .Values.global.image.tag | default .Chart.AppVersion }}
+{{- end -}}
+
+{{- define "resources" }}
+{{- if . }}
+resources:
+{{ . | toYaml | indent 2 }}
+{{- end }}
+{{- end }}
diff --git a/deployment/helm/charts/ocelot-neo4j/templates/neo4j/configmap.yml b/deployment/helm/charts/ocelot-neo4j/templates/neo4j/configmap.yml
new file mode 100644
index 000000000..e4aa51875
--- /dev/null
+++ b/deployment/helm/charts/ocelot-neo4j/templates/neo4j/configmap.yml
@@ -0,0 +1,6 @@
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: {{ .Release.Name }}-neo4j-env
+data:
+{{ .Values.neo4j.env | toYaml | indent 2 }}
diff --git a/deployment/helm/charts/ocelot-neo4j/templates/neo4j/persistent-volume-claim.yaml b/deployment/helm/charts/ocelot-neo4j/templates/neo4j/persistent-volume-claim.yaml
new file mode 100644
index 000000000..96843e557
--- /dev/null
+++ b/deployment/helm/charts/ocelot-neo4j/templates/neo4j/persistent-volume-claim.yaml
@@ -0,0 +1,22 @@
+---
+kind: PersistentVolumeClaim
+apiVersion: v1
+metadata:
+ name: {{ .Release.Name }}-neo4j-data
+spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: {{ .Values.neo4j.storage }}
+---
+kind: PersistentVolumeClaim
+apiVersion: v1
+metadata:
+ name: {{ .Release.Name }}-neo4j-backups
+spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: {{ .Values.neo4j.storageBackups }}
diff --git a/deployment/helm/charts/ocelot-neo4j/templates/neo4j/secret.yaml b/deployment/helm/charts/ocelot-neo4j/templates/neo4j/secret.yaml
new file mode 100644
index 000000000..de67571c0
--- /dev/null
+++ b/deployment/helm/charts/ocelot-neo4j/templates/neo4j/secret.yaml
@@ -0,0 +1,6 @@
+kind: Secret
+apiVersion: v1
+metadata:
+ name: {{ .Release.Name }}-neo4j-secret-env
+stringData:
+{{ .Values.secrets.neo4j.env | toYaml | indent 2 }}
diff --git a/deployment/helm/charts/ocelot-neo4j/templates/neo4j/service.yaml b/deployment/helm/charts/ocelot-neo4j/templates/neo4j/service.yaml
new file mode 100644
index 000000000..02dd927db
--- /dev/null
+++ b/deployment/helm/charts/ocelot-neo4j/templates/neo4j/service.yaml
@@ -0,0 +1,14 @@
+kind: Service
+apiVersion: v1
+metadata:
+ name: {{ .Release.Name }}-neo4j
+spec:
+ ports:
+ - name: {{ .Release.Name }}-bolt
+ port: 7687
+ targetPort: 7687
+ - name: {{ .Release.Name }}-http # for debugging only
+ port: 7474
+ targetPort: 7474
+ selector:
+ app: {{ .Release.Name }}-neo4j
diff --git a/deployment/helm/charts/ocelot-neo4j/templates/neo4j/stateful-set.yaml b/deployment/helm/charts/ocelot-neo4j/templates/neo4j/stateful-set.yaml
new file mode 100644
index 000000000..65341d120
--- /dev/null
+++ b/deployment/helm/charts/ocelot-neo4j/templates/neo4j/stateful-set.yaml
@@ -0,0 +1,38 @@
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: {{ .Release.Name }}-neo4j
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: {{ .Release.Name }}-neo4j
+ template:
+ metadata:
+ name: neo4j
+ annotations:
+ backup.velero.io/backup-volumes: neo4j-data
+ labels:
+ app: {{ .Release.Name }}-neo4j
+ spec:
+ restartPolicy: Always
+ containers:
+ - name: container-{{ .Release.Name }}-neo4j
+ image: "{{ .Values.neo4j.image.repository }}:{{ .Values.neo4j.image.tag | default (include "defaultTag" .) }}"
+ imagePullPolicy: {{ quote .Values.global.image.pullPolicy }}
+ {{- include "resources" .Values.neo4j.resources | indent 8 }}
+ ports:
+ - containerPort: 7687
+ - containerPort: 7474
+ envFrom:
+ - configMapRef:
+ name: {{ .Release.Name }}-neo4j-env
+ - secretRef:
+ name: {{ .Release.Name }}-neo4j-secret-env
+ volumeMounts:
+ - mountPath: /data/
+ name: neo4j-data
+ volumes:
+ - name: neo4j-data
+ persistentVolumeClaim:
+ claimName: {{ .Release.Name }}-neo4j-data
diff --git a/deployment/helm/charts/ocelot-neo4j/values.yaml b/deployment/helm/charts/ocelot-neo4j/values.yaml
new file mode 100644
index 000000000..0bc9874df
--- /dev/null
+++ b/deployment/helm/charts/ocelot-neo4j/values.yaml
@@ -0,0 +1,25 @@
+underMaintenance: false
+
+global:
+ image:
+ tag:
+
+neo4j:
+ image:
+ repository: ghcr.io/ocelot-social-community/ocelot-social/neo4j
+ tag:
+
+ storage: "5Gi"
+ storageBackups: "10Gi"
+ env:
+ NEO4J_ACCEPT_LICENSE_AGREEMENT: "no"
+ NEO4J_AUTH: "none"
+ NEO4J_dbms_connector_bolt_thread__pool__max__size: "400"
+ NEO4J_dbms_memory_heap_initial__size: ""
+ NEO4J_dbms_memory_heap_max__size: ""
+ NEO4J_dbms_memory_pagecache_size: ""
+ NEO4J_dbms_security_procedures_unrestricted: "algo.*,apoc.*"
+ NEO4J_dbms_default__database: neo4j
+ NEO4J_apoc_import_file_enabled: "false"
+ NEO4J_dbms_allow__format__migration: "true"
+ NEO4J_dbms_allow__upgrade: "true"
diff --git a/deployment/helm/charts/ocelot-social/Chart.yaml b/deployment/helm/charts/ocelot-social/Chart.yaml
new file mode 100644
index 000000000..c363a90cb
--- /dev/null
+++ b/deployment/helm/charts/ocelot-social/Chart.yaml
@@ -0,0 +1,24 @@
+apiVersion: v2
+name: ocelot-social
+description: A Helm chart for ocelot-social
+
+# A chart can be either an 'application' or a 'library' chart.
+#
+# Application charts are a collection of templates that can be packaged into versioned archives
+# to be deployed.
+#
+# Library charts provide useful utilities or functions for the chart developer. They're included as
+# a dependency of application charts to inject those utilities and functions into the rendering
+# pipeline. Library charts do not define any templates and therefore cannot be deployed.
+type: application
+
+# This is the chart version. This version number should be incremented each time you make changes
+# to the chart and its templates, including the app version.
+# Versions are expected to follow Semantic Versioning (https://semver.org/)
+version: 0.1.0
+
+# This is the version number of the application being deployed. This version number should be
+# incremented each time you make changes to the application. Versions are not expected to
+# follow Semantic Versioning. They should reflect the version the application is using.
+# It is recommended to use it with quotes.
+appVersion: "3.2.0"
diff --git a/deployment/helm/charts/ocelot-social/templates/_helpers.tpl b/deployment/helm/charts/ocelot-social/templates/_helpers.tpl
new file mode 100644
index 000000000..d4d9b841f
--- /dev/null
+++ b/deployment/helm/charts/ocelot-social/templates/_helpers.tpl
@@ -0,0 +1,10 @@
+{{- define "defaultTag" -}}
+{{- .Values.global.image.tag | default .Chart.AppVersion }}
+{{- end -}}
+
+{{- define "resources" }}
+{{- if . }}
+resources:
+{{ . | toYaml | indent 2 }}
+{{- end }}
+{{- end }}
diff --git a/deployment/helm/charts/ocelot-social/templates/acme-issuer.yaml b/deployment/helm/charts/ocelot-social/templates/acme-issuer.yaml
new file mode 100644
index 000000000..6654aab77
--- /dev/null
+++ b/deployment/helm/charts/ocelot-social/templates/acme-issuer.yaml
@@ -0,0 +1,39 @@
+---
+apiVersion: cert-manager.io/v1
+kind: Issuer
+metadata:
+ name: {{ .Release.Name }}-letsencrypt-staging
+spec:
+ acme:
+ # The ACME server URL
+ server: https://acme-staging-v02.api.letsencrypt.org/directory
+ # Email address used for ACME registration
+ email: {{ quote .Values.secrets.acme_email }}
+ # Name of a secret used to store the ACME account private key
+ privateKeySecretRef:
+ name: {{ .Release.Name }}-letsencrypt-staging
+ # Enable the HTTP-01 challenge provider
+ solvers:
+ - http01:
+ ingress:
+ class: traefik
+
+---
+apiVersion: cert-manager.io/v1
+kind: Issuer
+metadata:
+ name: {{ .Release.Name }}-letsencrypt-prod
+spec:
+ acme:
+ # The ACME server URL
+ server: https://acme-v02.api.letsencrypt.org/directory
+ # Email address used for ACME registration
+ email: {{ quote .Values.secrets.acme_email }}
+ # Name of a secret used to store the ACME account private key
+ privateKeySecretRef:
+ name: {{ .Release.Name }}-letsencrypt-prod
+ # Enable the HTTP-01 challenge provider
+ solvers:
+ - http01:
+ ingress:
+ class: traefik
diff --git a/deployment/helm/charts/ocelot-social/templates/backend/configmap.yml b/deployment/helm/charts/ocelot-social/templates/backend/configmap.yml
new file mode 100644
index 000000000..06afc5962
--- /dev/null
+++ b/deployment/helm/charts/ocelot-social/templates/backend/configmap.yml
@@ -0,0 +1,6 @@
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: {{ .Release.Name }}-backend-env
+data:
+{{ .Values.backend.env | toYaml | indent 2 }}
diff --git a/deployment/helm/charts/ocelot-social/templates/backend/persistent-volume-claim.yaml b/deployment/helm/charts/ocelot-social/templates/backend/persistent-volume-claim.yaml
new file mode 100644
index 000000000..831b95347
--- /dev/null
+++ b/deployment/helm/charts/ocelot-social/templates/backend/persistent-volume-claim.yaml
@@ -0,0 +1,10 @@
+kind: PersistentVolumeClaim
+apiVersion: v1
+metadata:
+ name: {{ .Release.Name }}-uploads
+spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: {{ .Values.backend.storage }}
diff --git a/deployment/helm/charts/ocelot-social/templates/backend/secret.yaml b/deployment/helm/charts/ocelot-social/templates/backend/secret.yaml
new file mode 100644
index 000000000..a26eec86c
--- /dev/null
+++ b/deployment/helm/charts/ocelot-social/templates/backend/secret.yaml
@@ -0,0 +1,7 @@
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ .Release.Name }}-backend-secret-env
+type: Opaque
+stringData:
+{{ .Values.secrets.backend.env | toYaml | indent 2 }}
diff --git a/deployment/helm/charts/ocelot-social/templates/backend/service.yaml b/deployment/helm/charts/ocelot-social/templates/backend/service.yaml
new file mode 100644
index 000000000..e484fa7a9
--- /dev/null
+++ b/deployment/helm/charts/ocelot-social/templates/backend/service.yaml
@@ -0,0 +1,11 @@
+kind: Service
+apiVersion: v1
+metadata:
+ name: {{ .Release.Name }}-backend
+spec:
+ ports:
+ - name: {{ .Release.Name }}-graphql
+ port: 4000
+ targetPort: 4000
+ selector:
+ app: {{ .Release.Name }}-backend
diff --git a/deployment/helm/charts/ocelot-social/templates/backend/stateful-set.yaml b/deployment/helm/charts/ocelot-social/templates/backend/stateful-set.yaml
new file mode 100644
index 000000000..98eb3fcad
--- /dev/null
+++ b/deployment/helm/charts/ocelot-social/templates/backend/stateful-set.yaml
@@ -0,0 +1,52 @@
+kind: StatefulSet
+apiVersion: apps/v1
+metadata:
+ name: {{ .Release.Name }}-backend
+spec:
+ selector:
+ matchLabels:
+ app: {{ .Release.Name }}-backend
+ template:
+ metadata:
+ annotations:
+ backup.velero.io/backup-volumes: uploads
+ labels:
+ app: {{ .Release.Name }}-backend
+ spec:
+ restartPolicy: Always
+ initContainers:
+ - name: {{ .Release.Name }}-backend-migrations
+ image: "{{ .Values.backend.image.repository }}:{{ .Values.backend.image.tag | default (include "defaultTag" .) }}"
+ imagePullPolicy: {{ quote .Values.global.image.pullPolicy }}
+ command: ["/bin/sh", "-c", "yarn prod:migrate up"]
+ {{- include "resources" .Values.backend.resources | indent 10 }}
+ envFrom:
+ - configMapRef:
+ name: {{ .Release.Name }}-backend-env
+ - secretRef:
+ name: {{ .Release.Name }}-backend-secret-env
+ containers:
+ - name: {{ .Release.Name }}-backend
+ image: "{{ .Values.backend.image.repository }}:{{ .Values.backend.image.tag | default (include "defaultTag" .) }}"
+ imagePullPolicy: {{ quote .Values.global.image.pullPolicy }}
+ {{- include "resources" .Values.backend.resources | indent 10 }}
+ env:
+ - name: GRAPHQL_URI
+ value: "http://{{ .Release.Name }}-backend:4000"
+ - name: CLIENT_URI
+ value: "https://{{ .Values.domain }}"
+ envFrom:
+ - configMapRef:
+ name: {{ .Release.Name }}-backend-env
+ - secretRef:
+ name: {{ .Release.Name }}-backend-secret-env
+ ports:
+ - containerPort: 4000
+ protocol: TCP
+ volumeMounts:
+ - mountPath: /app/public/uploads
+ name: uploads
+ volumes:
+ - name: uploads
+ persistentVolumeClaim:
+ claimName: {{ .Release.Name }}-uploads
diff --git a/deployment/helm/charts/ocelot-social/templates/configmap.yaml b/deployment/helm/charts/ocelot-social/templates/configmap.yaml
new file mode 100644
index 000000000..e01d5bf1b
--- /dev/null
+++ b/deployment/helm/charts/ocelot-social/templates/configmap.yaml
@@ -0,0 +1,6 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ .Release.Name }}
+data:
+{{ .Values.configmap | toYaml | indent 2 }}
diff --git a/deployment/helm/charts/ocelot-social/templates/ingress.yaml b/deployment/helm/charts/ocelot-social/templates/ingress.yaml
new file mode 100644
index 000000000..56142f650
--- /dev/null
+++ b/deployment/helm/charts/ocelot-social/templates/ingress.yaml
@@ -0,0 +1,65 @@
+---
+{{- define "joinRedirectMiddlewares" -}}
+{{- $local := dict "first" true -}}
+{{- range $k, $v := .Values.redirect_domains -}}{{- if not $local.first -}},{{- end -}}{{$.Release.Namespace}}-redirect-{{- $v | replace "." "-" -}}@kubernetescrd{{- $_ := set $local "first" false -}}{{- end -}}
+{{- end -}}
+---
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+ name: {{ .Release.Name }}-ocelot
+ annotations:
+ cert-manager.io/issuer: {{ .Values.cert_manager.issuer | default (printf "%s-letsencrypt-staging" .Release.Name) }}
+ traefik.ingress.kubernetes.io/router.middlewares: {{ quote (include "joinRedirectMiddlewares" $)}}
+spec:
+ tls:
+ - hosts:
+ - {{ quote .Values.domain }}
+ {{- range .Values.redirect_domains }}
+ - {{ quote . }}
+ {{- end }}
+ secretName: {{ .Release.Name }}-letsencrypt-tls
+
+ rules:
+ - host: {{ quote .Values.domain }}
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ {{- if .Values.underMaintenance }}
+ name: {{ .Release.Name }}-maintenance
+ port:
+ number: 80
+ {{- else }}
+ name: {{ .Release.Name }}-webapp
+ port:
+ number: 3000
+ {{- end }}
+ {{- range .Values.redirect_domains }}
+ - host: {{ quote . }} # the service must be defined, else the redirect is not working
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: {{ $.Release.Name }}-maintenance
+ port:
+ number: 80
+ {{- end }}
+
+{{- range .Values.redirect_domains }}
+---
+# Redirect with domain replacement
+apiVersion: traefik.io/v1alpha1
+kind: Middleware
+metadata:
+ name: redirect-{{ . | replace "." "-" }}
+spec:
+ redirectRegex:
+ regex: ^https://{{ . }}(.*)
+ replacement: https://{{ $.Values.domain }}${1}
+ permanent: true
+{{- end }}
\ No newline at end of file
diff --git a/deployment/helm/charts/ocelot-social/templates/maintenance/deployment.yaml b/deployment/helm/charts/ocelot-social/templates/maintenance/deployment.yaml
new file mode 100644
index 000000000..a06e66541
--- /dev/null
+++ b/deployment/helm/charts/ocelot-social/templates/maintenance/deployment.yaml
@@ -0,0 +1,24 @@
+kind: Deployment
+apiVersion: apps/v1
+metadata:
+ name: {{ .Release.Name }}-maintenance
+spec:
+ selector:
+ matchLabels:
+ app: {{ .Release.Name }}-maintenance
+ template:
+ metadata:
+ labels:
+ app: {{ .Release.Name }}-maintenance
+ spec:
+ restartPolicy: Always
+ containers:
+ - name: {{ .Release.Name }}-maintenance
+ image: "{{ .Values.maintenance.image.repository }}:{{ .Values.maintenance.image.tag | default (include "defaultTag" .) }}"
+ imagePullPolicy: {{ quote .Values.global.image.pullPolicy }}
+ {{- include "resources" .Values.maintenance.resources | indent 8 }}
+ env:
+ - name: HOST
+ value: 0.0.0.0
+ ports:
+ - containerPort: 80
diff --git a/deployment/helm/charts/ocelot-social/templates/maintenance/service.yaml b/deployment/helm/charts/ocelot-social/templates/maintenance/service.yaml
new file mode 100644
index 000000000..fc60e687f
--- /dev/null
+++ b/deployment/helm/charts/ocelot-social/templates/maintenance/service.yaml
@@ -0,0 +1,11 @@
+kind: Service
+apiVersion: v1
+metadata:
+ name: {{ .Release.Name }}-maintenance
+spec:
+ ports:
+ - name: {{ .Release.Name }}-http
+ port: 80
+ targetPort: 80
+ selector:
+ app: {{ .Release.Name }}-maintenance
diff --git a/deployment/helm/charts/ocelot-social/templates/webapp/configmap.yml b/deployment/helm/charts/ocelot-social/templates/webapp/configmap.yml
new file mode 100644
index 000000000..f5ad51a21
--- /dev/null
+++ b/deployment/helm/charts/ocelot-social/templates/webapp/configmap.yml
@@ -0,0 +1,6 @@
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: {{ .Release.Name }}-webapp-env
+data:
+{{ .Values.webapp.env | toYaml | indent 2 }}
diff --git a/deployment/helm/charts/ocelot-social/templates/webapp/deployment.yaml b/deployment/helm/charts/ocelot-social/templates/webapp/deployment.yaml
new file mode 100644
index 000000000..f23705db1
--- /dev/null
+++ b/deployment/helm/charts/ocelot-social/templates/webapp/deployment.yaml
@@ -0,0 +1,34 @@
+kind: Deployment
+apiVersion: apps/v1
+metadata:
+ name: {{ .Release.Name }}-webapp
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: {{ .Release.Name }}-webapp
+ template:
+ metadata:
+ labels:
+ app: {{ .Release.Name }}-webapp
+ spec:
+ restartPolicy: Always
+ containers:
+ - name: {{ .Release.Name }}-webapp
+ image: "{{ .Values.webapp.image.repository }}:{{ .Values.webapp.image.tag | default (include "defaultTag" .) }}"
+ imagePullPolicy: {{ quote .Values.global.image.pullPolicy }}
+ {{- include "resources" .Values.webapp.resources | indent 8 }}
+ ports:
+ - containerPort: 3000
+ env:
+ - name: WEBSOCKETS_URI
+ value: "wss://{{ .Values.domain }}/api/graphql"
+ - name: HOST
+ value: "0.0.0.0"
+ - name: GRAPHQL_URI
+ value: "http://{{ .Release.Name }}-backend:4000"
+ envFrom:
+ - configMapRef:
+ name: {{ .Release.Name }}-webapp-env
+ - secretRef:
+ name: {{ .Release.Name }}-webapp-secret-env
diff --git a/deployment/helm/charts/ocelot-social/templates/webapp/secret.yaml b/deployment/helm/charts/ocelot-social/templates/webapp/secret.yaml
new file mode 100644
index 000000000..32179e90e
--- /dev/null
+++ b/deployment/helm/charts/ocelot-social/templates/webapp/secret.yaml
@@ -0,0 +1,7 @@
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ .Release.Name }}-webapp-secret-env
+type: Opaque
+stringData:
+{{ .Values.secrets.webapp.env | toYaml | indent 2 }}
diff --git a/deployment/helm/charts/ocelot-social/templates/webapp/service.yaml b/deployment/helm/charts/ocelot-social/templates/webapp/service.yaml
new file mode 100644
index 000000000..1e1c4c712
--- /dev/null
+++ b/deployment/helm/charts/ocelot-social/templates/webapp/service.yaml
@@ -0,0 +1,11 @@
+kind: Service
+apiVersion: v1
+metadata:
+ name: {{ .Release.Name }}-webapp
+spec:
+ ports:
+ - name: {{ .Release.Name }}-http
+ port: 3000
+ targetPort: 3000
+ selector:
+ app: {{ .Release.Name }}-webapp
diff --git a/deployment/helm/charts/ocelot-social/values.yaml b/deployment/helm/charts/ocelot-social/values.yaml
new file mode 100644
index 000000000..2213c5007
--- /dev/null
+++ b/deployment/helm/charts/ocelot-social/values.yaml
@@ -0,0 +1,27 @@
+domain: stage.ocelot.social
+redirect_domains: []
+
+cert_manager:
+ issuer:
+
+underMaintenance: false
+
+global:
+ image:
+ pullPolicy: IfNotPresent
+ tag:
+
+backend:
+ image:
+ repository: ghcr.io/ocelot-social-community/ocelot-social/backend
+ storage: "10Gi"
+ env:
+ NEO4J_URI: "bolt://ocelot-social-neo4j:7687"
+
+webapp:
+ image:
+ repository: ghcr.io/ocelot-social-community/ocelot-social/webapp
+
+maintenance:
+ image:
+ repository: ghcr.io/ocelot-social-community/ocelot-social/maintenance
diff --git a/deployment/helm/helmfile/helmfile.yaml.gotmpl b/deployment/helm/helmfile/helmfile.yaml.gotmpl
new file mode 100644
index 000000000..6ea0b29d0
--- /dev/null
+++ b/deployment/helm/helmfile/helmfile.yaml.gotmpl
@@ -0,0 +1,16 @@
+releases:
+ - name: ocelot-social
+ namespace: ocelot-social
+ chart: ../charts/ocelot-social
+ values:
+ - ./values/ocelot.yaml
+ secrets:
+ - ./secrets/ocelot.yaml
+
+ - name: ocelot-neo4j
+ namespace: ocelot-social
+ chart: ../charts/ocelot-neo4j
+ values:
+ - ./values/ocelot.yaml
+ secrets:
+ - ./secrets/ocelot.yaml
diff --git a/deployment/helm/helmfile/secrets/ocelot.yaml b/deployment/helm/helmfile/secrets/ocelot.yaml
new file mode 100644
index 000000000..3965bc09e
--- /dev/null
+++ b/deployment/helm/helmfile/secrets/ocelot.yaml
@@ -0,0 +1,76 @@
+secrets:
+ acme_email: ENC[AES256_GCM,data:o+2HnrEqa/uXJwqUwdYU14FiZYPfLcKqkQ==,iv:1ouUU4ewzRL4ZDnwJm6BTVg3a64iC5+I2v+AWIF8W2Q=,tag:7ytv959cVmgSmXMC7A8zxA==,type:str]
+ webapp:
+ env:
+ MAPBOX_TOKEN: ENC[AES256_GCM,data:7Ka4BvQh6NDw9NKUcgGjLwxNHOqhVrZEj/DcGnyv1nXQIG/2WWGGHazAFWUCFpCUmCSaTPSkyLHPFyGQtQ7VAON3AG3tHtv5JvcBb4KDYrjAIzxhAAiHMYFtVJs=,iv:X0YL2dW42TUidJdBlRKb4Vq86X1OzHqipNHTBxmE7ds=,tag:KDH9NwDy6ghqdkXeZxuHgg==,type:str]
+ backend:
+ env:
+ JWT_SECRET: ENC[AES256_GCM,data:8qGviTFMOv9QyoNVwnlFNZ2PmvedbKJM,iv:rmZgs8h2QVsokzMzdGdEcInBLv8AX3xFUjkGhTf3sF0=,tag:SUJpMaIGAb14yg8RxCVUtA==,type:str]
+ MAPBOX_TOKEN: ENC[AES256_GCM,data:qK6iTYKiWfkvXBodm8zVmfr5ACTTz1+7Pt7Q/hwgv3SYERyo5NyqfsvbVKuDAD90kTCNODpSwUApJE6do/Umedg4s8mrnHXCckIDbX5BztoeHJBehsUC54ELcrQ=,iv:b65yqfdoOX366UXt7HS6nhL8hlZn4l5hQfrhI6NXc+I=,tag:vF48V+TRS5g9ezXhzAJnPw==,type:str]
+ PRIVATE_KEY_PASSPHRASE: ENC[AES256_GCM,data:05WXBFKIk0BtfUYmkWSwAP+/Y7v18LUow4X/,iv:y7VyymcoRLr2CK96BiErXvKP2Gn/QhECBZyeP+wo8LA=,tag:Hg/fIGyIDMY8P3mWfVupCw==,type:str]
+ #ENC[AES256_GCM,data:llx+JN8fRqwrLd2ahkmPrhPwcGIkn695l3Ox8VEs9YAR+1wpz3yujA==,iv:4Ctez8zMeqo3cpCCUVy6ZP4T1Z/myPw/FTq+++YAYbc=,tag:al/J8DLqNz6CoLl+TgUdOw==,type:comment]
+ EMAIL_DEFAULT_SENDER: ENC[AES256_GCM,data:z1EyEokf/TNkFLhRzsCbHew/6T8=,iv:Satr1c8aZQE73ZolC6n+PO74r+Gj3un5Mj0DIYb3n14=,tag:iK6l0GXuhLauBtFXTmLyKQ==,type:str]
+ SMTP_HOST: ENC[AES256_GCM,data:r0qbaUBB3CSUHR76,iv:TJIx71HW1aBB0sCEd1TB/tTgPBxLR1sdGAEf0t7Qilg=,tag:arXYtwVbIXVaUJpyommokQ==,type:str]
+ SMTP_USERNAME: ENC[AES256_GCM,data:lZ05DvSu,iv:Tyu7poao1shqKGd/sjTCgGNHU1xgRpjwjMRd+ArGf6o=,tag:dKms4G683JvFzja7YOwYKg==,type:str]
+ SMTP_PASSWORD: ENC[AES256_GCM,data:c9rnPIaKHIh2LNIJON3ib1IsA09OWGchDxRPRpvrtJw=,iv:08Acxl74lJbYtEEU6crVIYRXwkER8t1XPrhBA2PwEio=,tag:F0xrrt2PkBUMEyp7a81ssw==,type:str]
+ SMTP_PORT: ENC[AES256_GCM,data:MGmv,iv:IFg6oEncN0ICEmw96XL4EuPKqEZ6KLwU5FJYkveMSpY=,tag:kIVXlt0o5TfhOtRVqU/c4w==,type:str]
+ SMTP_IGNORE_TLS: ENC[AES256_GCM,data:ORAIWtg=,iv:6X4V3RDeYHrFdBTjsb3Ji0KWsZ2meL8ilqHNGQbcV/M=,tag:R87FgoQwqpes+0ejcOlrPg==,type:str]
+ #ENC[AES256_GCM,data:wEE3/SPsZqy9LATseOZG7LsCbjG5gY4VUT/TzxhHLJqcYP5I,iv:gcOA0XiUGWq15G4zTRPZ0qZ/XYMTjr+9krbOx0dwpeY=,tag:jd8LTiVT7UQShqMR9zZUZA==,type:comment]
+ SMTP_SECURE: ENC[AES256_GCM,data:PowbGhU=,iv:a1dK5AVySu749vPQvX9OLfMuD+tZkLNtXTMr17+4KuA=,tag:fuJQ7c4RBl25If01MSAmug==,type:str]
+ SMTP_DKIM_PRIVATKEY: null
+ SMTP_DKIM_DOMAINNAME: null
+ SMTP_DKIM_KEYSELECTOR: null
+ NEO4J_USERNAME: null
+ NEO4J_PASSWORD: null
+ REDIS_PASSWORD: null
+ neo4j:
+ env:
+ NEO4J_USERNAME: ""
+ NEO4J_PASSWORD: ""
+sops:
+ kms: []
+ gcp_kms: []
+ azure_kv: []
+ hc_vault: []
+ age:
+ - recipient: age1llp6k66265q3rzqemxpnq0x3562u20989vcjf65fl9s3hjhgcscq6mhnjw
+ enc: |
+ -----BEGIN AGE ENCRYPTED FILE-----
+ YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBRbjk3QXdyZU5yZnE0dElE
+ SW91VGIvSnovRmc4MCtiNDhET3RHQTFoakd3ClB4RlZUZXRwSTgvUTR3Q1AwUGJo
+ NEpySWVEOFE4ZmIzek03NzczeVhyY0EKLS0tIG9SZ2ZwQXdFSUVTbWxCQXpUeWd2
+ VDlsRlY2Z1RjWFZjcU9UeUpJZHJuSmMKTuy/s49nIwfRQyDyCGBWZPvyR9oNEXxV
+ 6C0oVQXVTifkMvDet3dZWnOy6TeMkZBLD4BZHXSI+l6DkNdmIiwIpw==
+ -----END AGE ENCRYPTED FILE-----
+ - recipient: age1zycwtk6dkxj6vuqhj9jw7932ythky9p3att6df4z9qasyw8v5dxquejcmp
+ enc: |
+ -----BEGIN AGE ENCRYPTED FILE-----
+ YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBQaHd0YW83bS9NZ1RBSWl6
+ cU0vMStYT3QxOFhOYmdNMUpNaHBLOVJGUFJVCnRjbWswbDhzOStFZTdXSVhTemJx
+ TVo1YnpxMDZxd1NWMVpNYXlYbzZtaVkKLS0tIGhmaHZzc2hnYi9WSStpc2lkbkRP
+ MElZK25Nc0lZTXBtc1BOQUpCandFKzAKnareBqzmHiSY551Iw8zPNg6aJN2QM0iN
+ f05TgS58OSEzXL60/9wBEN+E4Y1VErwOYP9CH8MdiAv1iRwLYgSJ/Q==
+ -----END AGE ENCRYPTED FILE-----
+ - recipient: age15arcg8x6ltnsacwalvny0h2d4d4wkdmax328mw3v5vda9zm97uqshtavmr
+ enc: |
+ -----BEGIN AGE ENCRYPTED FILE-----
+ YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSAyUWtnd1JObWNZZzZtWndv
+ dVhLWlRSNDNacHdSMXJ1ejV2RC80elA2TG1rCmc1MTFSMlpYM3hsSDNwWUJ0R3NC
+ Y2RrT2pZQllyTkdpcEs2akF0cENpc0EKLS0tIDFxV1B6bzZZVFVlSk5qZWxDbEd4
+ MkpsL3phc0M0VXBuUGQ2dFZOZHlKS1EKEmCasI2+d4FBgiI4Ter8Gxbl87yrfBq+
+ xze5n0df0GKK6JsML/0m2Z7HoqtCAEsjEfm45GdfAaiqPVh7gJG8TQ==
+ -----END AGE ENCRYPTED FILE-----
+ - recipient: age1khw2eps099audp3uu5s9rk07qznllh5c8a43gv5dtpnq2a7lue6qrehn5s
+ enc: |
+ -----BEGIN AGE ENCRYPTED FILE-----
+ YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBRcDlPb1BlVUIwSEUvTjBx
+ KytIS0xQWjlzeEJPSDI5SEg5RmpXWFhKZVRvCm1XLzlMUmo1U1BZL2ZFS25GSkhY
+ V0tESW1hYTU0V01UQzEvNjZjMDk2WDAKLS0tIEl5TG84VE1UN0V3bk13cFU3bTUr
+ aGNFeXZZRmlJM041OHdTM0pmM3BBdGMKGvFgYY1jhKwciAOZKyw0hlFVNbOk7CM7
+ 041g17JXNV1Wk6WgMZ4w8p54RKQVaWCT4wxChy6wNNdQ3IeKgqEU2w==
+ -----END AGE ENCRYPTED FILE-----
+ lastmodified: "2024-10-29T14:26:49Z"
+ mac: ENC[AES256_GCM,data:YXX7MEAK0wmuxLTmdr7q5uVd6DG6FhGUeE+EzbhWe/OovH6n+CjKZGklnEX+5ztDO0IgZh/T9Hx1CgFYuVbcOkvDoFBDwNpRA/QOQrM0p/+tRlMNCypC/Wh2xL0DhA4A/Qum2oyE/BDkt1Yy8N5wZDZn575+ZAjXEgAzlhpT5qk=,iv:ire3gkHTY6+0lgbV1Es6Lf8bcKTg4WKnq46M+b/VRcU=,tag:MkZULKcwROvIw/C0YtcUbA==,type:str]
+ pgp: []
+ unencrypted_suffix: _unencrypted
+ version: 3.9.0
diff --git a/deployment/helm/helmfile/values/ocelot.yaml b/deployment/helm/helmfile/values/ocelot.yaml
new file mode 100644
index 000000000..89c65f3be
--- /dev/null
+++ b/deployment/helm/helmfile/values/ocelot.yaml
@@ -0,0 +1,2 @@
+cert_manager:
+ issuer: ocelot-social-letsencrypt-prod
diff --git a/deployment/scripts/branded-images.build.sh b/deployment/scripts/branded-images.build.sh
deleted file mode 100755
index fa9da67d1..000000000
--- a/deployment/scripts/branded-images.build.sh
+++ /dev/null
@@ -1,74 +0,0 @@
-#!/bin/bash
-
-# for a branded version you should pass the following env variables:
-# CONFIGURATION - your configuration folder name
-# DOCKERHUB_ORGANISATION - your dockerhub organisation
-# OCELOT_VERSION - specify the specific tag to build upon e.g. 2.4.0-300
-
-# base setup
-SCRIPT_PATH=$(realpath $0)
-SCRIPT_DIR=$(dirname $SCRIPT_PATH)
-
-# check CONFIGURATION
-if [ -z ${CONFIGURATION} ]; then
- echo "You must provide a `CONFIGURATION` via environment variable"
- exit 1
-fi
-echo "Using CONFIGURATION=${CONFIGURATION}"
-
-# check DOCKERHUB_BRAND_VARRIANT
-if [ -z ${DOCKERHUB_BRAND_VARRIANT} ]; then
- echo "You must provide a `DOCKERHUB_BRAND_VARRIANT` via environment variable"
- exit 1
-fi
-echo "Using DOCKERHUB_BRAND_VARRIANT=${DOCKERHUB_BRAND_VARRIANT}"
-
-# configuration
-DOCKERHUB_ORGANISATION=${DOCKERHUB_ORGANISATION:-"ocelotsocialnetwork"}
-OCELOT_VERSION=${OCELOT_VERSION:-$(node -p -e "require('${SCRIPT_DIR}/../../package.json').version")}
-OCELOT_GITHUB_RUN_NUMBER=${OCELOT_GITHUB_RUN_NUMBER:-master}
-OCELOT_VERSION_BUILD=${OCELOT_VERSION_BUILD:-${OCELOT_VERSION}-${OCELOT_GITHUB_RUN_NUMBER}}
-BRANDED_VERSION=${BRANDED_VERSION:-${GITHUB_RUN_NUMBER:-"local"}}
-BUILD_DATE=${BUILD_DATE:-$(date -u +'%Y-%m-%dT%H:%M:%SZ')}
-BUILD_VERSION_BASE=${BRANDED_VERSION}-ocelot.social${OCELOT_VERSION}
-BUILD_VERSION=${BRANDED_VERSION}-ocelot.social${OCELOT_VERSION_BUILD}
-BUILD_COMMIT=${GITHUB_SHA:-"0000000"}
-
-# backend
-docker build --target branded \
- -t "${DOCKERHUB_ORGANISATION}/backend-${DOCKERHUB_BRAND_VARRIANT}:latest" \
- -t "${DOCKERHUB_ORGANISATION}/backend-${DOCKERHUB_BRAND_VARRIANT}:${OCELOT_VERSION}" \
- -t "${DOCKERHUB_ORGANISATION}/backend-${DOCKERHUB_BRAND_VARRIANT}:${OCELOT_VERSION_BUILD}" \
- -t "${DOCKERHUB_ORGANISATION}/backend-${DOCKERHUB_BRAND_VARRIANT}:${BUILD_VERSION_BASE}" \
- -t "${DOCKERHUB_ORGANISATION}/backend-${DOCKERHUB_BRAND_VARRIANT}:${BUILD_VERSION}" \
- -f "${SCRIPT_DIR}/../src/docker/backend.Dockerfile" \
- --build-arg "CONFIGURATION=${CONFIGURATION}" \
- --build-arg "APP_IMAGE_TAG_CODE=${OCELOT_VERSION}-code" \
- --build-arg "APP_IMAGE_TAG_BASE=${OCELOT_VERSION}-base" \
- "${SCRIPT_DIR}/../."
-
-# webapp
-docker build --target branded \
- -t "${DOCKERHUB_ORGANISATION}/webapp-${DOCKERHUB_BRAND_VARRIANT}:latest" \
- -t "${DOCKERHUB_ORGANISATION}/webapp-${DOCKERHUB_BRAND_VARRIANT}:${OCELOT_VERSION}" \
- -t "${DOCKERHUB_ORGANISATION}/webapp-${DOCKERHUB_BRAND_VARRIANT}:${OCELOT_VERSION_BUILD}" \
- -t "${DOCKERHUB_ORGANISATION}/webapp-${DOCKERHUB_BRAND_VARRIANT}:${BUILD_VERSION_BASE}" \
- -t "${DOCKERHUB_ORGANISATION}/webapp-${DOCKERHUB_BRAND_VARRIANT}:${BUILD_VERSION}" \
- -f "${SCRIPT_DIR}/../src/docker/webapp.Dockerfile" \
- --build-arg "CONFIGURATION=${CONFIGURATION}" \
- --build-arg "APP_IMAGE_TAG_CODE=${OCELOT_VERSION}-code" \
- --build-arg "APP_IMAGE_TAG_BASE=${OCELOT_VERSION}-base" \
- "${SCRIPT_DIR}/../."
-
-# mainteance
-docker build --target branded \
- -t "${DOCKERHUB_ORGANISATION}/maintenance-${DOCKERHUB_BRAND_VARRIANT}:latest" \
- -t "${DOCKERHUB_ORGANISATION}/maintenance-${DOCKERHUB_BRAND_VARRIANT}:${OCELOT_VERSION}" \
- -t "${DOCKERHUB_ORGANISATION}/maintenance-${DOCKERHUB_BRAND_VARRIANT}:${OCELOT_VERSION_BUILD}" \
- -t "${DOCKERHUB_ORGANISATION}/maintenance-${DOCKERHUB_BRAND_VARRIANT}:${BUILD_VERSION_BASE}" \
- -t "${DOCKERHUB_ORGANISATION}/maintenance-${DOCKERHUB_BRAND_VARRIANT}:${BUILD_VERSION}" \
- -f "${SCRIPT_DIR}/../src/docker/maintenance.Dockerfile" \
- --build-arg "CONFIGURATION=${CONFIGURATION}" \
- --build-arg "APP_IMAGE_TAG_CODE=${OCELOT_VERSION}-code" \
- --build-arg "APP_IMAGE_TAG_BASE=${OCELOT_VERSION}-base" \
- "${SCRIPT_DIR}/../."
diff --git a/deployment/scripts/branded-images.upload.sh b/deployment/scripts/branded-images.upload.sh
deleted file mode 100755
index 83e27496b..000000000
--- a/deployment/scripts/branded-images.upload.sh
+++ /dev/null
@@ -1,51 +0,0 @@
-#!/bin/bash
-
-# for a branded version you should pass the following env variables:
-# DOCKERHUB_ORGANISATION - your dockerhub organisation
-# OCELOT_VERSION - specify the specific tag to build upon e.g. 2.4.0-300
-# DOCKERHUB_USERNAME - your dockerhub username
-# DOCKERHUB_TOKEN - your dockerhub access token
-
-# base setup
-SCRIPT_PATH=$(realpath $0)
-SCRIPT_DIR=$(dirname $SCRIPT_PATH)
-
-# check DOCKERHUB_BRAND_VARRIANT
-if [ -z ${DOCKERHUB_BRAND_VARRIANT} ]; then
- echo "You must provide a `DOCKERHUB_BRAND_VARRIANT` via environment variable"
- exit 1
-fi
-echo "Using DOCKERHUB_BRAND_VARRIANT=${DOCKERHUB_BRAND_VARRIANT}"
-
-# configuration
-DOCKERHUB_ORGANISATION=${DOCKERHUB_ORGANISATION:-"ocelotsocialnetwork"}
-OCELOT_VERSION=${OCELOT_VERSION:-$(node -p -e "require('${SCRIPT_DIR}/../../package.json').version")}
-OCELOT_GITHUB_RUN_NUMBER=${OCELOT_GITHUB_RUN_NUMBER:-master}
-OCELOT_VERSION_BUILD=${OCELOT_VERSION_BUILD:-${OCELOT_VERSION}-${OCELOT_GITHUB_RUN_NUMBER}}
-BRANDED_VERSION=${BRANDED_VERSION:-${GITHUB_RUN_NUMBER:-"local"}}
-BUILD_VERSION_BASE=${BRANDED_VERSION}-ocelot.social${OCELOT_VERSION}
-BUILD_VERSION=${BRANDED_VERSION}-ocelot.social${OCELOT_VERSION_BUILD}
-
-# login to dockerhub
-echo "${DOCKERHUB_TOKEN}" | docker login -u "${DOCKERHUB_USERNAME}" --password-stdin
-
-# push backend images
-docker push ${DOCKERHUB_ORGANISATION}/backend-${DOCKERHUB_BRAND_VARRIANT}:latest
-docker push ${DOCKERHUB_ORGANISATION}/backend-${DOCKERHUB_BRAND_VARRIANT}:${OCELOT_VERSION}
-docker push ${DOCKERHUB_ORGANISATION}/backend-${DOCKERHUB_BRAND_VARRIANT}:${OCELOT_VERSION_BUILD}
-docker push ${DOCKERHUB_ORGANISATION}/backend-${DOCKERHUB_BRAND_VARRIANT}:${BUILD_VERSION_BASE}
-docker push ${DOCKERHUB_ORGANISATION}/backend-${DOCKERHUB_BRAND_VARRIANT}:${BUILD_VERSION}
-
-# push webapp images
-docker push ${DOCKERHUB_ORGANISATION}/webapp-${DOCKERHUB_BRAND_VARRIANT}:latest
-docker push ${DOCKERHUB_ORGANISATION}/webapp-${DOCKERHUB_BRAND_VARRIANT}:${OCELOT_VERSION}
-docker push ${DOCKERHUB_ORGANISATION}/webapp-${DOCKERHUB_BRAND_VARRIANT}:${OCELOT_VERSION_BUILD}
-docker push ${DOCKERHUB_ORGANISATION}/webapp-${DOCKERHUB_BRAND_VARRIANT}:${BUILD_VERSION_BASE}
-docker push ${DOCKERHUB_ORGANISATION}/webapp-${DOCKERHUB_BRAND_VARRIANT}:${BUILD_VERSION}
-
-# push maintenance images
-docker push ${DOCKERHUB_ORGANISATION}/maintenance-${DOCKERHUB_BRAND_VARRIANT}:latest
-docker push ${DOCKERHUB_ORGANISATION}/maintenance-${DOCKERHUB_BRAND_VARRIANT}:${OCELOT_VERSION}
-docker push ${DOCKERHUB_ORGANISATION}/maintenance-${DOCKERHUB_BRAND_VARRIANT}:${OCELOT_VERSION_BUILD}
-docker push ${DOCKERHUB_ORGANISATION}/maintenance-${DOCKERHUB_BRAND_VARRIANT}:${BUILD_VERSION_BASE}
-docker push ${DOCKERHUB_ORGANISATION}/maintenance-${DOCKERHUB_BRAND_VARRIANT}:${BUILD_VERSION}
\ No newline at end of file
diff --git a/deployment/scripts/cluster.backend-bash.sh b/deployment/scripts/cluster.backend-bash.sh
deleted file mode 100755
index 251978f7d..000000000
--- a/deployment/scripts/cluster.backend-bash.sh
+++ /dev/null
@@ -1,22 +0,0 @@
-#!/bin/bash
-
-# time stamp
-printf "Neo4J bash :\n "
-date
-
-# base setup
-SCRIPT_PATH=$(realpath $0)
-SCRIPT_DIR=$(dirname $SCRIPT_PATH)
-
-# check CONFIGURATION
-if [[ -z "$CONFIGURATION" ]]; then
- echo "!!! You must provide a CONFIGURATION via environment variable !!!"
- exit 1
-fi
-
-printf " Cluster: %s\n" $CONFIGURATION
-
-# configuration
-KUBECONFIG=${KUBECONFIG:-${SCRIPT_DIR}/../configurations/${CONFIGURATION}/kubeconfig.yaml}
-
-kubectl --kubeconfig=${KUBECONFIG} -n default exec -it $(kubectl --kubeconfig=${KUBECONFIG} -n default get pods | grep ocelot-backend | awk '{ print $1 }') -- /bin/sh
\ No newline at end of file
diff --git a/deployment/scripts/cluster.backup.sh b/deployment/scripts/cluster.backup.sh
deleted file mode 100755
index 82cd85498..000000000
--- a/deployment/scripts/cluster.backup.sh
+++ /dev/null
@@ -1,46 +0,0 @@
-#!/bin/bash
-
-# time stamp
-printf "Backup started at:\n "
-date
-
-# base setup
-SCRIPT_PATH=$(realpath $0)
-SCRIPT_DIR=$(dirname $SCRIPT_PATH)
-
-# check CONFIGURATION
-if [[ -z "$CONFIGURATION" ]]; then
- echo "!!! You must provide a CONFIGURATION via environment variable !!!"
- exit 1
-fi
-
-printf " Cluster: %s\n" $CONFIGURATION
-
-# configuration
-KUBECONFIG=${KUBECONFIG:-${SCRIPT_DIR}/../configurations/${CONFIGURATION}/kubeconfig.yaml}
-BACKUP_DATE=$(date "+%F_%H-%M-%S")
-BACKUP_FOLDER=${BACKUP_FOLDER:-${SCRIPT_DIR}/../configurations/${CONFIGURATION}/backup/${BACKUP_DATE}}
-
-printf "Backup folder name: %s\n" $BACKUP_DATE
-# create backup folder
-mkdir -p ${BACKUP_FOLDER}
-
-# cluster maintenance mode on && Neo4j maintenance mode on
-${SCRIPT_DIR}/cluster.neo4j.sh maintenance on
-
-# database backup
-echo "Dumping database ..."
-kubectl --kubeconfig=${KUBECONFIG} -n default exec -it \
- $(kubectl --kubeconfig=${KUBECONFIG} -n default get pods | grep ocelot-neo4j | awk '{ print $1 }') \
- -- neo4j-admin dump --to=/var/lib/neo4j/$BACKUP_DATE-neo4j-dump
-# copy neo4j backup to local drive
-echo "Copying database to local file system ..."
-kubectl --kubeconfig=${KUBECONFIG} cp \
- default/$(kubectl --kubeconfig=${KUBECONFIG} -n default get pods | grep ocelot-neo4j |awk '{ print $1 }'):/var/lib/neo4j/$BACKUP_DATE-neo4j-dump $BACKUP_FOLDER/neo4j-dump
-# copy image data
-echo "Copying public uploads to local file system ..."
-kubectl --kubeconfig=${KUBECONFIG} cp \
- default/$(kubectl --kubeconfig=${KUBECONFIG} -n default get pods | grep ocelot-backend |awk '{ print $1 }'):/app/public/uploads $BACKUP_FOLDER/public-uploads
-
-# Neo4j maintenance mode off && cluster maintenance mode off
-${SCRIPT_DIR}/cluster.neo4j.sh maintenance off
\ No newline at end of file
diff --git a/deployment/scripts/cluster.dashboard-token.sh b/deployment/scripts/cluster.dashboard-token.sh
deleted file mode 100755
index 4a3a55328..000000000
--- a/deployment/scripts/cluster.dashboard-token.sh
+++ /dev/null
@@ -1,22 +0,0 @@
-#!/bin/bash
-
-# time stamp
-printf "Token :\n "
-date
-
-# base setup
-SCRIPT_PATH=$(realpath $0)
-SCRIPT_DIR=$(dirname $SCRIPT_PATH)
-
-# check CONFIGURATION
-if [[ -z "$CONFIGURATION" ]]; then
- echo "!!! You must provide a CONFIGURATION via environment variable !!!"
- exit 1
-fi
-
-printf " Cluster: %s\n" $CONFIGURATION
-
-# configuration
-KUBECONFIG=${KUBECONFIG:-${SCRIPT_DIR}/../configurations/${CONFIGURATION}/kubeconfig.yaml}
-
-kubectl --kubeconfig=${KUBECONFIG} create token admin-user -n kubernetes-dashboard
diff --git a/deployment/scripts/cluster.dashboard-tunnel.sh b/deployment/scripts/cluster.dashboard-tunnel.sh
deleted file mode 100755
index 126a0efe8..000000000
--- a/deployment/scripts/cluster.dashboard-tunnel.sh
+++ /dev/null
@@ -1,37 +0,0 @@
-#!/bin/bash
-
-# time stamp
-printf "Tunnel started at:\n "
-date
-
-# base setup
-SCRIPT_PATH=$(realpath $0)
-SCRIPT_DIR=$(dirname $SCRIPT_PATH)
-
-# check CONFIGURATION
-if [[ -z "$CONFIGURATION" ]]; then
- echo "!!! You must provide a CONFIGURATION via environment variable !!!"
- exit 1
-fi
-
-printf " Cluster: %s\n" $CONFIGURATION
-
-# configuration
-KUBECONFIG=${KUBECONFIG:-${SCRIPT_DIR}/../configurations/${CONFIGURATION}/kubeconfig.yaml}
-
-kubectl --kubeconfig=${KUBECONFIG} get pods -n kubernetes-dashboard
-#kubectl --kubeconfig=${KUBECONFIG} get -o json -n kubernetes-dashboard pod kubernetes-dashboard-kong-5ccb57895b-vxxmf
-
-# export POD_NAME=$(kubectl --kubeconfig=${KUBECONFIG} get pods -n kubernetes-dashboard -l "app.kubernetes.io/name=kubernetes-dashboard-kong,app.kubernetes.io/instance=kubernetes-dashboard" -o jsonpath="{.items[0].metadata.name}")
-export POD_NAME=kubernetes-dashboard-kong-5ccb57895b-fzqk6
-# export POD_NAME=$(kubectl --kubeconfig=${KUBECONFIG} get pods -n kubernetes-dashboard -l "app.kubernetes.io/name=kubernetes-dashboard,app.kubernetes.io/instance=kubernetes-dashboard" -o jsonpath="{.items[0].metadata.name}")
-
-echo $POD_NAME
-kubectl --kubeconfig=${KUBECONFIG} -n kubernetes-dashboard port-forward $POD_NAME 8443:8443
-
-# kubectl --kubeconfig=${KUBECONFIG} -n kubernetes-dashboard create token admin-user
-
-# kubectl --kubeconfig=${KUBECONFIG} apply -f ${SCRIPT_DIR}/../scripts/admin-user.yml
-
-
-
diff --git a/deployment/scripts/cluster.install.sh b/deployment/scripts/cluster.install.sh
deleted file mode 100755
index c0861a16b..000000000
--- a/deployment/scripts/cluster.install.sh
+++ /dev/null
@@ -1,56 +0,0 @@
-#!/bin/bash
-
-# base setup
-SCRIPT_PATH=$(realpath $0)
-SCRIPT_DIR=$(dirname $SCRIPT_PATH)
-
-# check CONFIGURATION
-if [ -z ${CONFIGURATION} ]; then
- echo "You must provide a `CONFIGURATION` via environment variable"
- exit 1
-fi
-echo "Using CONFIGURATION=${CONFIGURATION}"
-
-# configuration
-KUBECONFIG=${KUBECONFIG:-${SCRIPT_DIR}/../configurations/${CONFIGURATION}/kubeconfig.yaml}
-VALUES=${SCRIPT_DIR}/../configurations/${CONFIGURATION}/kubernetes/values.yaml
-DOCKERHUB_OCELOT_TAG=${DOCKERHUB_OCELOT_TAG:-"latest"}
-
-
-## install Ingress-Nginx
-helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx
-helm repo update
-helm install \
- ingress-nginx ingress-nginx/ingress-nginx \
- --kubeconfig=${KUBECONFIG} \
- -f ${SCRIPT_DIR}/../src/kubernetes/nginx.values.yaml
-
-## install Cert-Manager
-helm repo add jetstack https://charts.jetstack.io
-helm repo update
-helm install \
- cert-manager jetstack/cert-manager \
- --kubeconfig=${KUBECONFIG} \
- --namespace cert-manager \
- --create-namespace \
- --version v1.13.2 \
- --set installCRDs=true
-
-## install Ocelot with helm
-helm install \
- ocelot \
- --kubeconfig=${KUBECONFIG} \
- --values ${VALUES} \
- --set appVersion="${DOCKERHUB_OCELOT_TAG}" \
- ${SCRIPT_DIR}/../src/kubernetes/ \
- --timeout 10m
-
-## set Neo4j database indexes, constrains, and initial admin account plus run migrate up
-kubectl --kubeconfig=${KUBECONFIG} \
- -n default \
- exec -it \
- $(kubectl --kubeconfig=${KUBECONFIG} -n default get pods | grep ocelot-backend | awk '{ print $1 }') -- \
- /bin/sh -c "yarn prod:migrate init && yarn prod:migrate up"
- # /bin/sh -c "node --experimental-repl-await build/src/db/clean.js && node --experimental-repl-await build/src/db/seed.js"
-
-echo "!!! You must install a firewall or similar !!! (for DigitalOcean see: deployment/src/kubernetes/README.md)"
diff --git a/deployment/scripts/cluster.maintenance.sh b/deployment/scripts/cluster.maintenance.sh
deleted file mode 100755
index 7454c5a3c..000000000
--- a/deployment/scripts/cluster.maintenance.sh
+++ /dev/null
@@ -1,30 +0,0 @@
-#!/bin/bash
-
-# base setup
-SCRIPT_PATH=$(realpath $0)
-SCRIPT_DIR=$(dirname $SCRIPT_PATH)
-
-# check CONFIGURATION
-if [[ -z "$CONFIGURATION" ]]; then
- echo "You must provide a `CONFIGURATION` via environment variable"
- exit 1
-fi
-echo "Using CONFIGURATION=${CONFIGURATION}"
-
-# configuration
-KUBECONFIG=${KUBECONFIG:-${SCRIPT_DIR}/../configurations/${CONFIGURATION}/kubeconfig.yaml}
-
-case $1 in
- on)
- echo "Network maintenance: on"
- kubectl --kubeconfig=${KUBECONFIG} patch ingress ingress-ocelot-webapp --type merge --patch-file ${SCRIPT_DIR}/../src/kubernetes/patches/patch.ingress.maintenance.on.yaml
- ;;
- off)
- echo "Network maintenance: off"
- kubectl --kubeconfig=${KUBECONFIG} patch ingress ingress-ocelot-webapp --type merge --patch-file ${SCRIPT_DIR}/../src/kubernetes/patches/patch.ingress.maintenance.off.yaml
- ;;
- *)
- echo -e "Run this script with first argument either 'on' or 'off'"
- exit
- ;;
-esac
diff --git a/deployment/scripts/cluster.neo4j-bash.sh b/deployment/scripts/cluster.neo4j-bash.sh
deleted file mode 100755
index 8ac7ed6bc..000000000
--- a/deployment/scripts/cluster.neo4j-bash.sh
+++ /dev/null
@@ -1,22 +0,0 @@
-#!/bin/bash
-
-# time stamp
-printf "Neo4J bash :\n "
-date
-
-# base setup
-SCRIPT_PATH=$(realpath $0)
-SCRIPT_DIR=$(dirname $SCRIPT_PATH)
-
-# check CONFIGURATION
-if [[ -z "$CONFIGURATION" ]]; then
- echo "!!! You must provide a CONFIGURATION via environment variable !!!"
- exit 1
-fi
-
-printf " Cluster: %s\n" $CONFIGURATION
-
-# configuration
-KUBECONFIG=${KUBECONFIG:-${SCRIPT_DIR}/../configurations/${CONFIGURATION}/kubeconfig.yaml}
-
-kubectl --kubeconfig=${KUBECONFIG} -n default exec -it $(kubectl --kubeconfig=${KUBECONFIG} -n default get pods | grep ocelot-neo4j | awk '{ print $1 }') -- bash
\ No newline at end of file
diff --git a/deployment/scripts/cluster.neo4j.sh b/deployment/scripts/cluster.neo4j.sh
deleted file mode 100755
index b16dd78ac..000000000
--- a/deployment/scripts/cluster.neo4j.sh
+++ /dev/null
@@ -1,57 +0,0 @@
-#!/bin/bash
-
-# base setup
-SCRIPT_PATH=$(realpath $0)
-SCRIPT_DIR=$(dirname $SCRIPT_PATH)
-
-# check CONFIGURATION
-if [[ -z "$CONFIGURATION" ]]; then
- echo "You must provide a `CONFIGURATION` via environment variable"
- exit 1
-fi
-
-# configuration
-KUBECONFIG=${KUBECONFIG:-${SCRIPT_DIR}/../configurations/${CONFIGURATION}/kubeconfig.yaml}
-
-case $1 in
- maintenance)
- case $2 in
- on)
- # maintenance mode on
- ${SCRIPT_DIR}/cluster.maintenance.sh on
-
- # set Neo4j in offline mode (maintenance)
- echo "Neo4j maintenance: on"
- kubectl --kubeconfig=${KUBECONFIG} get deployment ocelot-neo4j -o json \
- | jq '.spec.template.spec.containers[] += {"command": ["tail", "-f", "/dev/null"]}' \
- | kubectl --kubeconfig=${KUBECONFIG} apply -f -
-
- # wait for the container to restart
- echo "Wait 60s ..."
- sleep 60
- ;;
- off)
- # set Neo4j in online mode
- echo "Neo4j maintenance: off"
- kubectl --kubeconfig=${KUBECONFIG} get deployment ocelot-neo4j -o json \
- | jq 'del(.spec.template.spec.containers[].command)' \
- | kubectl --kubeconfig=${KUBECONFIG} apply -f -
-
- # wait for the container to restart
- echo "Wait 60s ..."
- sleep 60
-
- # maintenance mode off
- ${SCRIPT_DIR}/cluster.maintenance.sh off
- ;;
- *)
- echo -e "Run this script with first argument either 'off' or 'on'"
- exit
- ;;
- esac
- ;;
- *)
- echo -e "Run this script with first argument 'maintenance'"
- exit
- ;;
-esac
diff --git a/deployment/scripts/cluster.reseed.sh b/deployment/scripts/cluster.reseed.sh
deleted file mode 100755
index 7bd44153b..000000000
--- a/deployment/scripts/cluster.reseed.sh
+++ /dev/null
@@ -1,18 +0,0 @@
-#!/bin/bash
-
-# base setup
-SCRIPT_PATH=$(realpath $0)
-SCRIPT_DIR=$(dirname $SCRIPT_PATH)
-
-# check CONFIGURATION
-if [ -z ${CONFIGURATION} ]; then
- echo "You must provide a `CONFIGURATION` via environment variable"
- exit 1
-fi
-echo "Using CONFIGURATION=${CONFIGURATION}"
-
-# configuration
-KUBECONFIG=${KUBECONFIG:-${SCRIPT_DIR}/../configurations/${CONFIGURATION}/kubeconfig.yaml}
-
-# clean & seed
-kubectl --kubeconfig=${KUBECONFIG} -n default exec -it $(kubectl --kubeconfig=${KUBECONFIG} -n default get pods | grep ocelot-backend | awk '{ print $1 }') -- /bin/sh -c "node --experimental-repl-await build/src/db/clean.js && node --experimental-repl-await build/src/db/seed.js"
\ No newline at end of file
diff --git a/deployment/scripts/cluster.upgrade.sh b/deployment/scripts/cluster.upgrade.sh
deleted file mode 100755
index de0444065..000000000
--- a/deployment/scripts/cluster.upgrade.sh
+++ /dev/null
@@ -1,24 +0,0 @@
-#!/bin/bash
-
-# base setup
-SCRIPT_PATH=$(realpath $0)
-SCRIPT_DIR=$(dirname $SCRIPT_PATH)
-
-# check CONFIGURATION
-if [ -z ${CONFIGURATION} ]; then
- echo "You must provide a `CONFIGURATION` via environment variable"
- exit 1
-fi
-echo "Using CONFIGURATION=${CONFIGURATION}"
-
-# configuration
-KUBECONFIG=${KUBECONFIG:-${SCRIPT_DIR}/../configurations/${CONFIGURATION}/kubeconfig.yaml}
-VALUES=${SCRIPT_DIR}/../configurations/${CONFIGURATION}/kubernetes/values.yaml
-DOCKERHUB_OCELOT_TAG=${DOCKERHUB_OCELOT_TAG:-"latest"}
-
-# upgrade with helm
-helm --kubeconfig=${KUBECONFIG} upgrade ocelot \
- --values ${VALUES} \
- --set appVersion="${DOCKERHUB_OCELOT_TAG}" \
- ${SCRIPT_DIR}/../src/kubernetes/ \
- --timeout 10m
\ No newline at end of file
diff --git a/deployment/scripts/clusters.backup-multiple-servers.sh b/deployment/scripts/clusters.backup-multiple-servers.sh
deleted file mode 100755
index dfab6b139..000000000
--- a/deployment/scripts/clusters.backup-multiple-servers.sh
+++ /dev/null
@@ -1,91 +0,0 @@
-#!/bin/bash
-
-# time stamp
-printf "\n\nMultiple backups started at:\n "
-date
-
-# base setup
-SCRIPT_PATH=$(realpath $0)
-SCRIPT_DIR=$(dirname $SCRIPT_PATH)
-
-# save old CONFIGURATION for later reset
-export SAVE_CONFIGURATION=$CONFIGURATION
-
-# export all variables in "../.env"
-set -a
-source ${SCRIPT_DIR}/../.env
-set +a
-
-# check BACKUP_CONFIGURATIONS
-if [[ -z "$BACKUP_CONFIGURATIONS" ]]; then
- #%! echo "You must provide a BACKUP_CONFIGURATIONS via environment variable"
- printf "!!! You must provide a BACKUP_CONFIGURATIONS via environment variable !!!\n"
- exit 1
-fi
-
-# check BACKUP_SAVED_BACKUPS_NUMBER
-if [[ -z ${BACKUP_SAVED_BACKUPS_NUMBER} ]]; then
- #%! echo "You must provide a BACKUP_SAVED_BACKUPS_NUMBER via environment variable"
- printf "!!! You must provide a BACKUP_SAVED_BACKUPS_NUMBER via environment variable !!!\n"
- exit 1
-fi
-
-# convert configurations to array
-IFS=' ' read -a CONFIGURATIONS_ARRAY <<< "$BACKUP_CONFIGURATIONS"
-
-# display the clusters
-printf "Backup the clusters:\n"
-for i in "${CONFIGURATIONS_ARRAY[@]}"
-do
- echo " $i"
-done
-
-# deleting backups?
-if (( BACKUP_SAVED_BACKUPS_NUMBER >= 1 )); then
- printf "Keep the last %d backups for all networks.\n" $BACKUP_SAVED_BACKUPS_NUMBER
-else
- echo "!!! ATTENTION: No backups are deleted !!!"
-fi
-
-echo "Cancel by ^C. You have 15 seconds"
-# wait for the admin to react
-sleep 15
-
-printf "\n"
-
-for i in "${CONFIGURATIONS_ARRAY[@]}"
-do
- export CONFIGURATION=$i
- # individual cluster backup
- ${SCRIPT_DIR}/cluster.backup.sh
-
- # deleting backups?
- if (( BACKUP_SAVED_BACKUPS_NUMBER >= 1 )); then
- # delete all oldest backups, but leave the last BACKUP_SAVED_BACKUPS_NUMBER
-
- keep=$BACKUP_SAVED_BACKUPS_NUMBER
- path="$SCRIPT_DIR/../configurations/$CONFIGURATION/backup/"
-
- cd $path
-
- printf "In\n '$path'\n remove:\n"
- while [ `ls -1 | wc -l` -gt $keep ]; do
- oldest=`ls -c1 | sort -n | head -1`
- printf " %s\n" $oldest
- rm -rf $oldest
- done
-
- printf "Keep the last %d backups:\n" $BACKUP_SAVED_BACKUPS_NUMBER
- ls -c1 | sort -n | awk '{print " " $0}'
-
- cd $SCRIPT_DIR
- else
- echo "!!! ATTENTION: No backups are deleted !!!"
- fi
-
- printf "\n"
-done
-
-# reset CONFIGURATION to old
-export CONFIGURATION=$SAVE_CONFIGURATION
-echo "Reset to CONFIGURATION=$CONFIGURATION"
diff --git a/deployment/scripts/secret.generate.sh b/deployment/scripts/secret.generate.sh
deleted file mode 100755
index cb788a7bd..000000000
--- a/deployment/scripts/secret.generate.sh
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/bin/bash
-
-# generate a secret and store it in the SECRET file.
-# Note that this overwrites the existing file
-
-# base setup
-SCRIPT_PATH=$(realpath $0)
-SCRIPT_DIR=$(dirname $SCRIPT_PATH)
-
-# check CONFIGURATION
-if [ -z ${CONFIGURATION} ]; then
- echo "You must provide a `CONFIGURATION` via environment variable"
- exit 1
-fi
-echo "Using CONFIGURATION=${CONFIGURATION}"
-
-# configuration
-SECRET_FILE=${SCRIPT_DIR}/../configurations/${CONFIGURATION}/SECRET
-
-openssl rand -base64 32 > ${SECRET_FILE}
\ No newline at end of file
diff --git a/deployment/scripts/secrets.decrypt.sh b/deployment/scripts/secrets.decrypt.sh
deleted file mode 100755
index a7a1328b1..000000000
--- a/deployment/scripts/secrets.decrypt.sh
+++ /dev/null
@@ -1,50 +0,0 @@
-#!/bin/bash
-
-# decrypt secrets in the selected configuration
-# Note that existing decrypted files will be overwritten
-
-# base setup
-SCRIPT_PATH=$(realpath $0)
-SCRIPT_DIR=$(dirname $SCRIPT_PATH)
-
-# check CONFIGURATION
-if [ -z ${CONFIGURATION} ]; then
- echo "You must provide a `CONFIGURATION` via environment variable"
- exit 1
-fi
-echo "Using CONFIGURATION=${CONFIGURATION}"
-
-# configuration
-SECRET=${SECRET}
-SECRET_FILE=${SCRIPT_DIR}/../configurations/${CONFIGURATION}/SECRET
-FILES=(\
- "${SCRIPT_DIR}/../configurations/${CONFIGURATION}/.env" \
- "${SCRIPT_DIR}/../configurations/${CONFIGURATION}/kubeconfig.yaml" \
- "${SCRIPT_DIR}/../configurations/${CONFIGURATION}/kubernetes/values.yaml" \
- "${SCRIPT_DIR}/../configurations/${CONFIGURATION}/kubernetes/dns.values.yaml" \
- )
-
-# Load SECRET from file if it is not set explicitly
-if [ -z ${SECRET} ] && [ -f "${SECRET_FILE}" ]; then
- SECRET=$(<${SECRET_FILE})
-fi
-
-# exit when there is no SECRET set
-if [ -z ${SECRET} ]; then
- echo "No SECRET provided and no SECRET-File found."
- exit 1
-fi
-
-# decrypt
-for file in "${FILES[@]}"
-do
- if [ -f "${file}.enc" ]; then
- #gpg --symmetric --batch --passphrase="${SECRET}" --cipher-algo AES256 --output ${file}.enc ${file}
- gpg --quiet --batch --yes --decrypt --passphrase="${SECRET}" --output ${file} ${file}.enc
- echo "Decrypted ${file}"
- fi
-done
-
-echo "DONE"
-# gpg --quiet --batch --yes --decrypt --passphrase="${SECRET}" \
-# --output $HOME/secrets/my_secret.json my_secret.json.gpg
diff --git a/deployment/scripts/secrets.encrypt.sh b/deployment/scripts/secrets.encrypt.sh
deleted file mode 100755
index 57be1c16e..000000000
--- a/deployment/scripts/secrets.encrypt.sh
+++ /dev/null
@@ -1,47 +0,0 @@
-#!/bin/bash
-
-# encrypt secrets in the selected configuration
-# Note that existing encrypted files will be overwritten
-
-# base setup
-SCRIPT_PATH=$(realpath $0)
-SCRIPT_DIR=$(dirname $SCRIPT_PATH)
-
-# check CONFIGURATION
-if [ -z ${CONFIGURATION} ]; then
- echo "You must provide a `CONFIGURATION` via environment variable"
- exit 1
-fi
-echo "Using CONFIGURATION=${CONFIGURATION}"
-
-# configuration
-SECRET=${SECRET}
-SECRET_FILE=${SCRIPT_DIR}/../configurations/${CONFIGURATION}/SECRET
-FILES=(\
- "${SCRIPT_DIR}/../configurations/${CONFIGURATION}/.env" \
- "${SCRIPT_DIR}/../configurations/${CONFIGURATION}/kubeconfig.yaml" \
- "${SCRIPT_DIR}/../configurations/${CONFIGURATION}/kubernetes/values.yaml" \
- "${SCRIPT_DIR}/../configurations/${CONFIGURATION}/kubernetes/dns.values.yaml" \
- )
-
-# Load SECRET from file if it is not set explicitly
-if [ -z ${SECRET} ] && [ -f "${SECRET_FILE}" ]; then
- SECRET=$(<${SECRET_FILE})
-fi
-
-# exit when there is no SECRET set
-if [ -z ${SECRET} ]; then
- echo "No SECRET provided and no SECRET-File found."
- exit 1
-fi
-
-# encrypt
-for file in "${FILES[@]}"
-do
- if [ -f "${file}" ]; then
- gpg --symmetric --batch --yes --passphrase="${SECRET}" --cipher-algo AES256 --output ${file}.enc ${file}
- echo "Encrypted ${file}"
- fi
-done
-
-echo "DONE"
diff --git a/deployment/src/docker/backend.Dockerfile b/deployment/src/docker/backend.Dockerfile
deleted file mode 100644
index 76a091da2..000000000
--- a/deployment/src/docker/backend.Dockerfile
+++ /dev/null
@@ -1,46 +0,0 @@
-ARG APP_IMAGE=ocelotsocialnetwork/backend
-ARG APP_IMAGE_TAG_BASE=latest-base
-ARG APP_IMAGE_TAG_CODE=latest-code
-ARG APP_IMAGE_BASE=${APP_IMAGE}:${APP_IMAGE_TAG_BASE}
-ARG APP_IMAGE_CODE=${APP_IMAGE}:${APP_IMAGE_TAG_CODE}
-
-##################################################################################
-# CODE (branded) #################################################################
-##################################################################################
-FROM $APP_IMAGE_CODE AS code
-
-ARG CONFIGURATION=example
-
-# copy public constants and email templates into the Docker image to brand it
-COPY configurations/${CONFIGURATION}/branding/constants/emails.ts src/config/
-COPY configurations/${CONFIGURATION}/branding/constants/logos.ts src/config/
-COPY configurations/${CONFIGURATION}/branding/constants/metadata.ts src/config/
-COPY configurations/${CONFIGURATION}/branding/email/ src/middleware/helpers/email/
-
-##################################################################################
-# BUILD ##########################################################################
-##################################################################################
-FROM code AS build
-
-# yarn install
-RUN yarn install --production=false --frozen-lockfile --non-interactive
-# yarn build
-RUN yarn run build
-
-##################################################################################
-# BRANDED (Does contain only "binary"- and static-files to reduce image size) ####
-##################################################################################
-FROM $APP_IMAGE_BASE AS branded
-
-# TODO - do all copying with one COPY command to have one layer
-# Copy "binary"-files from build image
-COPY --from=build ${DOCKER_WORKDIR}/build ./build
-COPY --from=build ${DOCKER_WORKDIR}/node_modules ./node_modules
-# TODO - externalize the uploads so we can copy the whole folder
-COPY --from=build ${DOCKER_WORKDIR}/public/img/ ./public/img/
-COPY --from=build ${DOCKER_WORKDIR}/public/providers.json ./build/public/providers.json
-# Copy package.json for script definitions (lock file should not be needed)
-COPY --from=build ${DOCKER_WORKDIR}/package.json ./package.json
-
-# Run command
-CMD /bin/sh -c "yarn run start"
diff --git a/deployment/src/docker/maintenance.Dockerfile b/deployment/src/docker/maintenance.Dockerfile
deleted file mode 100644
index 8eefcc067..000000000
--- a/deployment/src/docker/maintenance.Dockerfile
+++ /dev/null
@@ -1,44 +0,0 @@
-ARG APP_IMAGE=ocelotsocialnetwork/maintenance
-ARG APP_IMAGE_TAG_BASE=latest-base
-ARG APP_IMAGE_TAG_CODE=latest-code
-ARG APP_IMAGE_BASE=${APP_IMAGE}:${APP_IMAGE_TAG_BASE}
-ARG APP_IMAGE_CODE=${APP_IMAGE}:${APP_IMAGE_TAG_CODE}
-
-##################################################################################
-# CODE (branded) #################################################################
-##################################################################################
-FROM $APP_IMAGE_CODE AS code
-
-ARG CONFIGURATION=example
-
-# copy public constants into the Docker image to brand it
-COPY configurations/${CONFIGURATION}/branding/static/ static/
-COPY configurations/${CONFIGURATION}/branding/constants/ constants/
-RUN /bin/sh -c 'cd constants && for f in *.ts; do mv -- "$f" "${f%.ts}.js"; done'
-
-# locales
-COPY configurations/${CONFIGURATION}/branding/locales/*.json locales/tmp/
-COPY src/tools/ tools/
-RUN apk add --no-cache bash jq
-RUN tools/merge-locales.sh
-
-##################################################################################
-# BUILD ##########################################################################
-##################################################################################
-FROM code AS build
-
-# yarn install
-## unnicely done in $APP_IMAGE_CODE at the moment, see main repo
-# RUN yarn install --production=false --frozen-lockfile --non-interactive
-# yarn generate
-RUN yarn run generate
-
-##################################################################################
-# BRANDED ### TODO # TODO # TODO # TODO # TODO # TODO # TODO # TODO # TODO ####
-##################################################################################
-# FROM $APP_IMAGE_BASE AS branded
-FROM nginx:alpine AS branded
-
-COPY --from=build ./app/dist/ /usr/share/nginx/html/
-RUN rm /etc/nginx/conf.d/default.conf
-COPY --from=code ./app/maintenance/nginx/custom.conf /etc/nginx/conf.d/
diff --git a/deployment/src/docker/webapp.Dockerfile b/deployment/src/docker/webapp.Dockerfile
deleted file mode 100644
index 54f79a56f..000000000
--- a/deployment/src/docker/webapp.Dockerfile
+++ /dev/null
@@ -1,61 +0,0 @@
-ARG APP_IMAGE=ocelotsocialnetwork/webapp
-ARG APP_IMAGE_TAG_BASE=latest-base
-ARG APP_IMAGE_TAG_CODE=latest-code
-ARG APP_IMAGE_BASE=${APP_IMAGE}:${APP_IMAGE_TAG_BASE}
-ARG APP_IMAGE_CODE=${APP_IMAGE}:${APP_IMAGE_TAG_CODE}
-
-##################################################################################
-# CODE (branded) #################################################################
-##################################################################################
-FROM $APP_IMAGE_CODE AS code
-
-ARG CONFIGURATION=example
-
-# copy public constants into the Docker image to brand it
-COPY configurations/${CONFIGURATION}/branding/static/ static/
-COPY configurations/${CONFIGURATION}/branding/constants/ constants/
-RUN /bin/sh -c 'cd constants && for f in *.ts; do mv -- "$f" "${f%.ts}.js"; done'
-COPY configurations/${CONFIGURATION}/branding/locales/html/ locales/html/
-COPY configurations/${CONFIGURATION}/branding/assets/styles/imports/ assets/styles/imports/
-COPY configurations/${CONFIGURATION}/branding/assets/fonts/ assets/fonts/
-
-# locales
-COPY configurations/${CONFIGURATION}/branding/locales/*.json locales/tmp/
-COPY src/tools/ tools/
-RUN apk add --no-cache bash jq
-RUN tools/merge-locales.sh
-
-##################################################################################
-# BUILD ##########################################################################
-##################################################################################
-FROM code AS build
-
-# yarn install
-RUN yarn install --production=false --frozen-lockfile --non-interactive
-# yarn build
-RUN yarn run build
-
-##################################################################################
-# BRANDED (Does contain only "binary"- and static-files to reduce image size) ####
-##################################################################################
-FROM $APP_IMAGE_BASE AS branded
-
-# TODO - do all copying with one COPY command to have one layer
-# Copy "binary"-files from build image
-COPY --from=build ${DOCKER_WORKDIR}/.nuxt ./.nuxt
-COPY --from=build ${DOCKER_WORKDIR}/node_modules ./node_modules
-COPY --from=build ${DOCKER_WORKDIR}/nuxt.config.js ./nuxt.config.js
-# Copy static files
-# TODO - this seems not be needed anymore for the new rebranding
-# TODO - this should be one Folder containign all stuff needed to be copied
-COPY --from=build ${DOCKER_WORKDIR}/config/ ./config/
-COPY --from=build ${DOCKER_WORKDIR}/constants ./constants
-COPY --from=build ${DOCKER_WORKDIR}/static ./static
-COPY --from=build ${DOCKER_WORKDIR}/locales ./locales
-COPY --from=build ${DOCKER_WORKDIR}/assets/styles/imports ./assets/styles/imports
-COPY --from=build ${DOCKER_WORKDIR}/assets/fonts ./assets/fonts
-# Copy package.json for script definitions (lock file should not be needed)
-COPY --from=build ${DOCKER_WORKDIR}/package.json ./package.json
-
-# Run command
-CMD /bin/sh -c "yarn run start"
diff --git a/deployment/src/kubernetes/Backup.md b/deployment/src/kubernetes/Backup.md
deleted file mode 100644
index 5e4c55ddb..000000000
--- a/deployment/src/kubernetes/Backup.md
+++ /dev/null
@@ -1,308 +0,0 @@
-# Kubernetes Backup Of Ocelot.Social
-
-One of the most important tasks in managing a running [ocelot.social](https://github.com/Ocelot-Social-Community/Ocelot-Social) network is backing up the data, e.g. the Neo4j database and the stored image files.
-
-## Manual Offline Backup
-
-To prepare, [kubectl](https://kubernetes.io/docs/tasks/tools/) must be installed and ready to use so that you have access to Kubernetes on your server.
-
-Check if the correct context is used by running the following commands:
-
-```bash
-# check context and set the correct one
-$ kubectl config get-contexts
-# if the wrong context is chosen use it
-$ kubectl config use-context
-# if you like check additionally if all pods are running well
-$ kubectl -n default get pods -o wide
-```
-
-The very first step is to put the website into **maintenance mode**.
-
-### Set Maintenance Mode
-
-There are two ways to put the network into maintenance mode:
-
-- via Kubernetes Dashboard
-- via `kubectl`
-
-#### Maintenance Mode Via Kubernetes Dashboard
-
-In the Kubernetes Dashboard, you can select `Ingresses` from the left side menu under `Service`.
-
-After that, in the list that appears, you will find the entry `ingress-ocelot-webapp`, which has three dots on the right, where you can click to edit the entry.
-
-You can scroll to the end of the YAML file, where you will find one or more `host` entries under `rules`, one for each domain of the network.
-
-In all entries, change the value of the `serviceName` entry from ***ocelot-webapp*** to `ocelot-maintenance` and the value of the `servicePort` entry from ***3000*** to `80`.
-
-First, check if your website is still online.
-After you click `Update`, the new settings will be applied and you will find your website in maintenance mode.
-
-#### Maintenance Mode Via `kubectl`
-
-To put the network into maintenance mode, run the following commands in the terminal:
-
-```bash
-# list ingresses
-$ kubectl get ingress -n default
-# edit ingress
-$ kubectl -n default edit ingress ingress-ocelot-webapp
-```
-
-Change the content of the YAML file for all domains to:
-
-```yaml
- spec:
- rules:
- - host: network-domain.social
- http:
- paths:
- - backend:
- # serviceName: ocelot-webapp
- # servicePort: 3000
- serviceName: ocelot-maintenance
- servicePort: 80
-```
-
-First, check if your website is still online.
-After you save the file, the new settings will be applied and you will find your website in maintenance mode.
-
-### Neo4j Database Offline Backup
-
-Before we can back up the database, we need to put it into **sleep mode**.
-
-#### Set Neo4j To Sleep Mode
-
-Again there are two ways to put the network into sleep mode:
-
-- via Kubernetes Dashboard
-- via `kubectl`
-
-##### Sleep Mode Via Kubernetes Dashboard
-
-In the Kubernetes Dashboard, you can select `Deployments` from the left side menu under `Workloads`.
-
-After that, in the list that appears, you will find the entry `ocelot-neo4j`, which has three dots on the right, where you can click to edit the entry.
-
-Scroll to the end of the YAML file where you will find the `spec.template.spec.containers` entry. Here you can insert the `command` entry directly after `imagePullPolicy` in a new line.
-
-```yaml
- terminationMessagePath: /dev/termination-log
- terminationMessagePolicy: File
- imagePullPolicy: Always
- command: ["tail", "-f", "/dev/null"]
-```
-
-After clicking `Update`, the new settings will be applied and you should check in the `Pods` menu item on the left side if the `ocelot-neo4j-` pod restarts.
-
-##### Sleep Mode Via `kubectl`
-
-To put Neo4j into sleep mode, run the following commands in the terminal:
-
-```bash
-# list deployments
-$ kubectl get deployments -n default
-# edit deployment
-$ kubectl -n default edit deployment ocelot-neo4j
-```
-
-Scroll to the `spec.template.spec.containers` entry. Here you can insert the `command` entry directly after `imagePullPolicy` in a new line.
-
-```yaml
- image: /neo4j-community-branded:latest
- imagePullPolicy: Always
- command: ["tail", "-f", "/dev/null"]
-```
-
-After pressing enter, the new settings will be applied and you should check if the `ocelot-neo4j-` pod restarts.
-Use command:
-
-```bash
-# check if the old pod restarts
-$ kubectl -n default get pods -o wide
-```
-
-#### Generate Offline Backup
-
-The offline backup is generated via `kubectl`:
-
-```bash
-# check for the Neo4j pod
-$ kubectl -n default get pods -o wide
-
-# ls: see wish backup dumps are already there
-$ kubectl -n default exec -it $(kubectl -n default get pods | grep ocelot-neo4j | awk '{ print $1 }') -- ls
-
-# bash: enter bash of Neo4j
-$ kubectl -n default exec -it $(kubectl -n default get pods | grep ocelot-neo4j | awk '{ print $1 }') -- bash
-# generate Dump
-neo4j% neo4j-admin dump --to=/var/lib/neo4j/$(date +%F)-neo4j-dump
-# exit bash
-neo4j% exit
-
-# ls: see if the new backup dump is there
-$ kubectl -n default exec -it $(kubectl -n default get pods | grep ocelot-neo4j | awk '{ print $1 }') -- ls
-```
-
-If you need a specific database name, add the option `--database=` to the command `neo4j-admin dump`.
-To find out the default database name, see the [Neo4j readme](https://github.com/Ocelot-Social-Community/Ocelot-Social/blob/master/neo4j/README.md).
-
-Lets copy the dump backup
-
-```bash
-# copy dump onto backup volume direct
-$ kubectl cp default/$(kubectl -n default get pods | grep ocelot-neo4j |awk '{ print $1 }'):/var/lib/neo4j/$(date +%F)-neo4j-dump /Volumes//$(date +%F)-neo4j-dump
-
-```
-
-#### Remove Sleep Mode From Neo4j
-
-Again there are two ways to put the network into working mode:
-
-- via Kubernetes Dashboard
-- via `kubectl`
-
-##### Remove Sleep Mode Via Kubernetes Dashboard
-
-In the Kubernetes Dashboard, you can select `Deployments` from the left side menu under `Workloads`.
-
-After that, in the list that appears, you will find the entry `ocelot-neo4j`, which has three dots on the right, where you can click to edit the entry.
-
-Scroll to the `spec.template.spec.containers.command` entry and remove the whole `command` entry like:
-
-```yaml
- containers:
- - name: container-ocelot-neo4j
- image: 'senderfm/neo4j-community-branded:latest'
- command:
- - tail
- - '-f'
- - /dev/null
- ports:
- - containerPort: 7687
- protocol: TCP
-```
-
-And get:
-
-```yaml
- containers:
- - name: container-ocelot-neo4j
- image: 'senderfm/neo4j-community-branded:latest'
- ports:
- - containerPort: 7687
- protocol: TCP
-```
-
-After clicking `Update`, the new settings will be applied and you should check in the `Pods` menu item on the left side if the `ocelot-neo4j-` pod restarts.
-
-##### Remove Sleep Mode Via `kubectl`
-
-To put Neo4j into working mode, run the following commands in the terminal:
-
-```bash
-# list deployments
-$ kubectl get deployments -n default
-# edit deployment
-$ kubectl -n default edit deployment ocelot-neo4j
-```
-
-Scroll to the `spec.template.spec.containers.command` entry and remove the whole `command` entry like:
-
-```yaml
- spec:
- containers:
- - command:
- - tail
- - -f
- - /dev/null
- envFrom:
- - configMapRef:
- name: configmap-ocelot-neo4j
-```
-
-And get:
-
-```yaml
- spec:
- containers:
- - envFrom:
- - configMapRef:
- name: configmap-ocelot-neo4j
-```
-
-After pressing enter, the new settings will be applied and you should check if the `ocelot-neo4j-` pod restarts.
-Use command:
-
-```bash
-# check if the old pod restarts
-$ kubectl -n default get pods -o wide
-```
-
-### Backend Backup
-
-To back up the images from the backend volume, run commands:
-
-```bash
-# ls: backend/public/uploads
-$ kubectl -n default exec -it $(kubectl -n default get pods | grep ocelot-backend | awk '{ print $1 }') -- ls public/uploads
-
-# copy all images from upload to backup volume direct
-$ kubectl cp default/$(kubectl -n default get pods | grep ocelot-backend |awk '{ print $1 }'):/app/public/uploads /Volumes//$(date +%F)-public-uploads
-```
-
-### Remove Maintenance Mode
-
-There are two ways to put the network into working mode:
-
-- via Kubernetes Dashboard
-- via `kubectl`
-
-#### Remove Maintenance Mode Via Kubernetes Dashboard
-
-In the Kubernetes Dashboard, you can select `Ingresses` from the left side menu under `Service`.
-
-After that, in the list that appears, you will find the entry `ingress-ocelot-webapp`, which has three dots on the right, where you can click to edit the entry.
-
-You can scroll to the end of the YAML file, where you will find one or more `host` entries under `rules`, one for each domain of the network.
-
-In all entries, change the value of the `serviceName` entry from ***ocelot-maintenance*** to `ocelot-webapp` and the value of the `servicePort` entry from ***80*** to `3000`.
-
-First, check if your website is still in maintenance mode.
-After you click `Update`, the new settings will be applied and you will find your website online again.
-
-#### Remove Maintenance Mode Via `kubectl`
-
-To put the network into working mode, run the following commands in the terminal:
-
-```bash
-# list ingresses
-$ kubectl get ingress -n default
-# edit ingress
-$ kubectl -n default edit ingress ingress-ocelot-webapp
-```
-
-Change the content of the YAML file for all domains to:
-
-```yaml
- spec:
- rules:
- - host: network-domain.social
- http:
- paths:
- - backend:
- serviceName: ocelot-webapp
- servicePort: 3000
- # serviceName: ocelot-maintenance
- # servicePort: 80
-```
-
-First, check if your website is still in maintenance mode.
-After you save the file, the new settings will be applied and you will find your website online again.
-
-XXX
-
-```bash
-# Dump: Create a Backup in Kubernetes: https://docs.human-connection.org/human-connection/deployment/volumes/neo4j-offline-backup#create-a-backup-in-kubernetes
-```
diff --git a/deployment/src/kubernetes/Chart.yaml b/deployment/src/kubernetes/Chart.yaml
deleted file mode 100644
index 5b953e3e7..000000000
--- a/deployment/src/kubernetes/Chart.yaml
+++ /dev/null
@@ -1,39 +0,0 @@
-type: application
-apiVersion: v2
-name: ocelot-social
-version: "1.0.0"
-# The appVersion defines which docker image is pulled.
-# Having it set to latest will pull the latest build on dockerhub.
-# You are free to define a specific version here tho.
-# e.g. appVersion: "latest" or "1.0.2-3-ocelot.social1.0.2-79"
-# Be aware that this requires all your apps to have the same docker image version available.
-appVersion: "latest"
-description: The Helm chart for ocelot.social
-home: https://ocelot.social
-sources:
- - https://github.com/Ocelot-Social-Community/
- - https://github.com/Ocelot-Social-Community/Ocelot-Social
- - https://github.com/Ocelot-Social-Community/Ocelot-Social-Deploy-Rebranding
-maintainers:
- - name: Ulf Gebhardt
- email: ulf.gebhardt@webcraft-media.de
- url: https://www.webcraft-media.de/#!ulf_gebhardt
-icon: https://github.com/Ocelot-Social-Community/Ocelot-Social/raw/master/webapp/static/img/custom/welcome.svg
-deprecated: false
-
-# Unused Fields
-#dependencies: # A list of the chart requirements (optional)
-# - name: ingress-nginx
-# version: v1.10.0
-# repository: https://kubernetes.github.io/ingress-nginx
-# condition: (optional) A yaml path that resolves to a boolean, used for enabling/disabling charts (e.g. subchart1.enabled )
-# tags: # (optional)
-# - Tags can be used to group charts for enabling/disabling together
-# import-values: # (optional)
-# - ImportValues holds the mapping of source values to parent key to be imported. Each item can be a string or pair of child/parent sublist items.
-# alias: (optional) Alias to be used for the chart. Useful when you have to add the same chart multiple times
-#kubeVersion: A SemVer range of compatible Kubernetes versions (optional)
-#keywords:
-# - A list of keywords about this project (optional)
-#annotations:
-# example: A list of annotations keyed by name (optional).
\ No newline at end of file
diff --git a/deployment/src/kubernetes/DigitalOcean.md b/deployment/src/kubernetes/DigitalOcean.md
deleted file mode 100644
index 251a6d341..000000000
--- a/deployment/src/kubernetes/DigitalOcean.md
+++ /dev/null
@@ -1,145 +0,0 @@
-# DigitalOcean
-
-If you want to set up a [Kubernetes](https://kubernetes.io) cluster on [DigitalOcean](https://www.digitalocean.com), follow this guide.
-
-## Create Account
-
-Create an account with DigitalOcean.
-
-## Add Project
-
-On the left side you will see a menu. Click on `New Project`. Enter a name and click `Create Project`.
-Skip moving resources, probably.
-
-## Create Kubernetes Cluster
-
-On the right top you find the button `Create`. Click on it and choose `Kubernetes - Create Kubernetes Cluster`.
-
-- use the latest Kubernetes version
-- choose your datacenter region
-- name your node pool: e.g. `pool-`
-- `2 Basic nodes` with `2.5 GB RAM (total of 4 GB)`, `2 shared CPUs`, and `80 GB Disk` each is optimal for the beginning
-- set your cluster name: e.g. `cluster-`
-- select your project
-- no tags necessary
-
-## Getting Started
-
-After your cluster is set up – see progress bar above – click on `Getting started`. Please install the following management tools:
-
-- [kubectl v1.24.1](https://kubernetes.io/docs/tasks/tools/)
-- [doctl v1.78.0](https://github.com/digitalocean/doctl)
-
-Install the tools as described on the tab or see the links here.
-
-After the installation, click on `Continue`.
-
-### Download Configuration File
-
-Follow the steps to download the configuration file.
-
-You can skip this step if necessary, as you can download the file later. You can then do this by clicking on `Kubernetes` in the left menu. In the menu to the right of the cluster name in the cluster list, click on `More` and select `Download Config`.
-
-### Patch & Minor Version Upgrades
-
-Skip `Patch & Minor Version Upgrades` for now.
-
-### Install 1-Click Apps
-
-You don't need a 1-click app. Our helmet script will install the required NGINXs.
-Therefore, skip this step as well.
-
-For a 1-click Kubernetes Dashboard or alternatives, follow the next steps.
-
-## Install Kubernetes Dashboard
-
-We recommend installing a Kubernetes Dashboard, as DigitalOcean no longer offers a pre-installed dashboard.
-
-- 1-click-deployment of [Kubernetes Dashboard on DigitalOcean marketplace](https://marketplace.digitalocean.com/apps/kubernetes-dashboard)
-
-There you will also find a section entitled `Getting Started`, which describes how you can log in from your local computer.
-
-Very short description:
-
-### In your DigitalOcean Account
-
-For authentication, download the current cluster configuration file from DigitalOcean.
-
-### In Terminal
-
-Set the context of the cluster by command:
-
-```bash
-kubectl config use-context
-```
-
-We seem to have two instances in our DigitalOcean cluster how we need to log into the Kubernetes Dashboard.
-It looks like it depends on the Kubernetes Dashboard version, but we are not absolutely sure.
-
-#### Login with `kubeconfig` File
-
-Port-forward the Kubernetes Dashboard to your local machine:
-
-```bash
-# save pod name
-$ export POD_NAME=$(kubectl get pods -n kubernetes-dashboard -l "app.kubernetes.io/name=kubernetes-dashboard,app.kubernetes.io/instance=kubernetes-dashboard" -o jsonpath="{.items[0].metadata.name}")
-# forward port
-$ kubectl -n kubernetes-dashboard port-forward $POD_NAME 8443:8443
-```
-
-Access the URL in your local web browser at `https://127.0.0.1:8443/`, and log in using your Kubernetes cluster credentials – downloaded config file.
-You may encounter a certificate warning, so make sure to override it.
-
-#### Login with Admin Token
-
-Port-forward the Kubernetes Dashboard to your local machine:
-
-```bash
-# create your access token
-kubectl -n kubernetes-dashboard create token admin-user
-# forward port
-kubectl -n kubernetes-dashboard port-forward svc/kubernetes-dashboard-kong-proxy 8443:443
-```
-
-Access the URL in your local web browser at `https://127.0.0.1:8443/`, and log in using your access token.
-You may encounter a certificate warning, so make sure to override it.
-
-## Alternatives to Kubernetes Dashboard
-
-DigitalOcean has a website about Kubernetes Dashboard and alternatives:
-
--
-
-## DNS Configuration
-
-There are the following two ways to set up the DNS.
-
-### Manage DNS With A Different Domain Provider
-
-If you have registered your domain or subdomain with another domain provider, add an `A` record there with one of the IP addresses from one of the cluster droplets in the DNS.
-
-To find the correct IP address to set in the DNS `A` record, click `Droplets` in the left main menu.
-A list of all your droplets will be displayed.
-Take one of the IPs of perhaps two or more droplets in your cluster from the list and enter it into the `A` record.
-
-### Manage DNS With DigitalOcean
-
-***TODO:** How to configure the DigitalOcean DNS management service …*
-
-To understand what makes sense to do when managing your DNS with DigitalOcean, you need to know how DNS works:
-
-DNS means `Domain Name System`. It resolves domains like `example.com` into an IP like `123.123.123.123`.
-DigitalOcean is not a domain registrar, but provides a DNS management service. If you use DigitalOcean's DNS management service, you can configure [your cluster](./README.md#dns) to always resolve the domain to the correct IP and automatically update it for that.
-The IPs of the DigitalOcean machines are not necessarily stable, so the cluster's DNS service will update the DNS records managed by DigitalOcean to the new IP as needed.
-
-***CAUTION:** If you are using an external DNS, you currently have to do this manually, which can cause downtime.*
-
-## Deploy
-
-Yeah, you're done here. Back to [Deployment with Helm for Kubernetes](./README.md).
-
-## Backups On DigitalOcean
-
-You can and should do [backups](./Backup.md) with Kubernetes for sure.
-
-Additional to backup and copying the Neo4j database dump and the backend images you can do a volume snapshot on DigitalOcean at the moment you have the database in sleep mode.
diff --git a/deployment/src/kubernetes/README.md b/deployment/src/kubernetes/README.md
deleted file mode 100644
index 9b1c6fbe2..000000000
--- a/deployment/src/kubernetes/README.md
+++ /dev/null
@@ -1,350 +0,0 @@
-# Kubernetes Helm Installation Of Ocelot.Social
-
-Deploying [ocelot.social](https://github.com/Ocelot-Social-Community/Ocelot-Social) with [Helm](https://helm.sh) for [Kubernetes](https://kubernetes.io) is very straight forward. All you have to do is to change certain parameters, like domain names and API keys, then you just install our provided Helm chart to your cluster.
-
-## Kubernetes Cloud Hosting
-
-There are various ways to set up your own or a managed Kubernetes cluster. We will extend the following lists over time.
-Please contact us if you are interested in options not listed below.
-
-Managed Kubernetes:
-
-- [DigitalOcean](./DigitalOcean.md)
-
-## Configuration
-
-You can customize the network server with your configuration by duplicate the `values.template.yaml` to a new `values.yaml` file and change it to your need. All included variables will be available as environment variables in your deployed kubernetes pods.
-
-Besides the `values.template.yaml` file we provide a `nginx.values.template.yaml` and `dns.values.template.yaml` for a similar procedure. The new `nginx.values.yaml` is the configuration for the ingress-nginx Helm chart, while the `dns.values.yaml` file is for automatically updating the dns values on DigitalOcean and therefore optional.
-
-## Installation
-
-Due to the many limitations of Helm you still have to do several manual steps.
-Those occur before you run the actual *ocelot.social* Helm chart.
-Obviously it is expected of you to have `helm` and `kubectl` installed.
-For the cert-manager you may need `cmctl`, see below.
-For DigitalOcean you may also need `doctl`.
-
-Install:
-
-- [kubectl v1.24.1](https://kubernetes.io/docs/tasks/tools/)
-- [doctl v1.78.0](https://docs.digitalocean.com/reference/doctl/how-to/install/)
-- [cmctl v1.8.2](https://cert-manager.io/docs/usage/cmctl/#installation)
-- [helm v3.9.0](https://helm.sh/docs/intro/install/)
-
-
-### Cert Manager (https)
-
-Please refer to [cert-manager.io docs](https://cert-manager.io/docs/installation/) for more details.
-
-***ATTENTION:*** *Be with the Terminal in your repository in the folder of this README.*
-
-We have three ways to install the cert-manager, purely via `kubectl`, via `cmctl`, or with `helm`.
-
-We recommend using `helm` because then we do not mix the installation methods.
-Please have a look here:
-
-- [Installing with Helm](https://cert-manager.io/docs/installation/helm/#installing-with-helm)
-
-Our Helm installation is optimized for cert-manager version `v1.13.1` and `kubectl` version `"v1.28.2`.
-
-Please search here for cert-manager versions that are compatible with your `kubectl` version on the cluster and on the client: [cert-manager Supported Releases](https://cert-manager.io/docs/installation/supported-releases/#supported-releases).
-
-***ATTENTION:*** *When uninstalling cert-manager, be sure to use the same method as for installation! Otherwise, we could end up in a broken state, see [Uninstall](https://cert-manager.io/docs/installation/kubectl/#uninstalling).*
-
-
-
-### Ingress-Nginx
-
-#### 1. Add Helm repository for `ingress-nginx` and update
-
-```bash
-$ helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx
-$ helm repo update
-```
-
-#### 2. Install ingress-nginx
-
-```bash
-# in configuration/
-
-# kubeconfig.yaml set globaly
-helm install ingress-nginx ingress-nginx/ingress-nginx -f ../../src/kubernetes/nginx.values.yaml
-
-# or kubeconfig.yaml in your repo, then adjust
-helm install \
- ingress-nginx ingress-nginx/ingress-nginx -f ../../src/kubernetes/nginx.values.yaml \
- --kubeconfig ./kubeconfig.yaml
-```
-
-### DigitalOcean Firewall
-
-This is only necessary if you run DigitalOcean without load balancer ([see here for more info](https://stackoverflow.com/questions/54119399/expose-port-80-on-digital-oceans-managed-kubernetes-without-a-load-balancer/55968709)) .
-
-#### 1. Authenticate towards DO with your local `doctl`
-
-You will need a DO token for that.
-
-```bash
-# without doctl context
-$ doctl auth init
-# with doctl new context to be filled in
-$ doctl auth init --context
-```
-
-You will need an API token, which you can generate in the control panel at .
-
-#### 2. Generate DO firewall
-
- Get the `CLUSTER_UUID` value from the dashboard or from the ID column via `doctl kubernetes cluster list`:
-
-```bash
-# need to apply access token by `doctl auth init` before
-$ doctl kubernetes cluster list
-```
-
-Fill in the `CLUSTER_UUID` and `your-domain`. The latter with hyphens `-` instead of dots `.`:
-
-```bash
-# without doctl context
-$ doctl compute firewall create \
---inbound-rules="protocol:tcp,ports:80,address:0.0.0.0/0,address:::/0 protocol:tcp,ports:443,address:0.0.0.0/0,address:::/0" \
---tag-names=k8s: \
---name=-http-https
-# with doctl context to be filled in
-$ doctl compute firewall create \
---inbound-rules="protocol:tcp,ports:80,address:0.0.0.0/0,address:::/0 protocol:tcp,ports:443,address:0.0.0.0/0,address:::/0" \
---tag-names=k8s: \
---name=-http-https --context
-```
-
-To get informations about your success use this command. (Fill in the `ID` you got at creation.):
-
-```bash
-# without doctl context
-$ doctl compute firewall get
-# with doctl context to be filled in
-$ doctl compute firewall get --context
-```
-
-### DNS
-
-***ATTENTION:** This seems not to work at all so we leave it away at the moment*
-
-***TODO:** I thought this is necessary if we use the DigitalOcean DNS management service? See [Manage DNS With DigitalOcean](./DigitalOcean.md#manage-dns-with-digitalocean)*
-
-This chart is only necessary (recommended is more precise) if you run DigitalOcean without load balancer.
-You need to generate an access token with read + write for the `dns.values.yaml` at and fill it in.
-
-#### 1. Add Helm repository for `binami` and update
-
-```bash
-$ helm repo add bitnami https://charts.bitnami.com/bitnami
-$ helm repo update
-```
-
-#### 2. Install DNS
-
-```bash
-# !!! untested for now for new deployment structure !!!
-
-# kubeconfig.yaml set globaly
-$ helm install dns bitnami/external-dns -f dns.values.yaml
-# or kubeconfig.yaml in your repo, then adjust
-$ helm --kubeconfig=/../kubeconfig.yaml install dns bitnami/external-dns -f dns.values.yaml
-```
-
-### Ocelot.Social
-
-***Attention:** Before installing your own ocelot.social network, you need to create a DockerHub (account and) organization, put its name in the `package.json` file, and push your deployment and rebranding code to GitHub so that GitHub Actions can push your Docker images to DockerHub. This is because Kubernetes will pull these images to create PODs from them.*
-
-All commands for ocelot need to be executed in the kubernetes folder. Therefore `cd deployment/kubernetes/` is expected to be run before every command. Furthermore the given commands will install ocelot into the default namespace. This can be modified to by attaching `--namespace not.default`.
-
-#### Install
-
-Only run once for the first time of installation:
-
-```bash
-# in configuration/
-
-# kubeconfig.yaml set globaly
-helm install ocelot \
- --values ./kubernetes/values.yaml \
- --set appVersion="latest" \
- ../../src/kubernetes/ \
- --timeout 10m
-
-# or kubeconfig.yaml in your repo, then adjust
-helm install ocelot \
- --kubeconfig ./kubeconfig.yaml \
- --values ./kubernetes/values.yaml \
- --set appVersion="latest" \
- ../../src/kubernetes/ \
- --timeout 10m
-```
-
-#### Upgrade & Update
-
-Run for all upgrades and updates:
-
-```bash
-# !!! untested for now for new deployment structure !!!
-
-# in configuration/
-
-# kubeconfig.yaml set globaly
-helm upgrade ocelot \
- --values ./kubernetes/values.yaml \
- --set appVersion="latest" \
- ../../src/kubernetes/ \
- --timeout 10m
-
-# or kubeconfig.yaml in your repo, then adjust
-helm upgrade ocelot \
- --kubeconfig ./kubeconfig.yaml \
- --values ./kubernetes/values.yaml \
- --set appVersion="latest" \
- ../../src/kubernetes/ \
- --timeout 10m
-```
-
-#### Rollback
-
-Run for a rollback, in case something went wrong:
-
-```bash
-# !!! untested for now for new deployment structure !!!
-
-# in configuration/
-
-# kubeconfig.yaml set globaly
-helm rollback ocelot --timeout 10m
-
-# or kubeconfig.yaml in your repo, then adjust
-helm rollback ocelot \
- --kubeconfig ./kubeconfig.yaml \
- --timeout 10m
-```
-
-#### Uninstall
-
-Be aware that if you uninstall ocelot the formerly bound volumes become unbound. Those volumes contain all data from uploads and database. You have to manually free their reference in order to bind them again when reinstalling. Once unbound from their former container references they should automatically be rebound (considering the sizes did not change)
-
-```bash
-# !!! untested for now for new deployment structure !!!
-
-# in configuration/
-
-# kubeconfig.yaml set globaly
-helm uninstall ocelot --timeout 10m
-
-# or kubeconfig.yaml in your repo, then adjust
-helm uninstall ocelot \
- --kubeconfig ./kubeconfig.yaml \
- --timeout 10m
-```
-
-## Backups
-
-You can and should do [backups](./Backup.md) with Kubernetes for sure.
-
-
-
-## Kubernetes Commands (Without Helm) To Deploy New Docker Images To A Kubernetes Cluster
-
-### Deploy A Version
-
-```bash
-# !!! be aware of the correct kube context !!!
-$ kubectl config get-contexts
-
-# deploy version '$BUILD_VERSION'
-# !!! 'latest' is not recommended on production !!!
-
-# for easyness set env
-$ export BUILD_VERSION=1.0.8-48-ocelot.social1.0.8-184 # example
-# check this with
-$ echo $BUILD_VERSION
-1.0.8-48-ocelot.social1.0.8-184
-
-# deploy actual version '$BUILD_VERSION' to Kubernetes cluster
-$ kubectl -n default set image deployment/ocelot-webapp container-ocelot-webapp=ocelotsocialnetwork/webapp:$BUILD_VERSION
-$ kubectl -n default rollout restart deployment/ocelot-webapp
-$ kubectl -n default set image deployment/ocelot-backend container-ocelot-backend=ocelotsocialnetwork/backend:$BUILD_VERSION
-$ kubectl -n default rollout restart deployment/ocelot-backend
-$ kubectl -n default set image deployment/ocelot-maintenance container-ocelot-maintenance=ocelotsocialnetwork/maintenance:$BUILD_VERSION
-$ kubectl -n default rollout restart deployment/ocelot-maintenance
-$ kubectl -n default set image deployment/ocelot-neo4j container-ocelot-neo4j=ocelotsocialnetwork/neo4j-community:$BUILD_VERSION
-$ kubectl -n default rollout restart deployment/ocelot-neo4j
-# verify deployment and wait for the pods of each deployment to get ready for cleaning and seeding of the database
-$ kubectl -n default rollout status deployment/ocelot-webapp --timeout=240s
-$ kubectl -n default rollout status deployment/ocelot-maintenance --timeout=240s
-$ kubectl -n default rollout status deployment/ocelot-backend --timeout=240s
-$ kubectl -n default rollout status deployment/ocelot-neo4j --timeout=240s
-```
-
-### Staging – Clean And Seed Neo4j Database
-
-***ATTENTION:*** Cleaning and seeding of our Neo4j database is only possible in production if env `PRODUCTION_DB_CLEAN_ALLOW=true` is set in our deployment.
-
-```bash
-# !!! be aware of the correct kube context !!!
-$ kubectl config get-contexts
-
-# for staging: reset and seed Neo4j database via backend
-$ kubectl -n default exec -it $(kubectl -n default get pods | grep ocelot-backend | awk '{ print $1 }') -- /bin/sh -c "node --experimental-repl-await build/src/db/clean.js && node --experimental-repl-await build/src/db/seed.js"
-
-# or alternatively
-
-# for production: set Neo4j database indexes, constrains, and initial admin account plus run migrate up via backend
-$ kubectl -n default exec -it $(kubectl -n default get pods | grep ocelot-backend | awk '{ print $1 }') -- /bin/sh -c "yarn prod:migrate init && yarn prod:migrate up"
-```
diff --git a/deployment/src/kubernetes/nginx.values.yaml b/deployment/src/kubernetes/nginx.values.yaml
deleted file mode 100644
index 8035104d2..000000000
--- a/deployment/src/kubernetes/nginx.values.yaml
+++ /dev/null
@@ -1,13 +0,0 @@
-# please duplicate template file and rename to "nginx.values.yaml" and fill in your value
-
-controller:
- kind: DaemonSet
- hostNetwork: true
- dnsPolicy: ClusterFirstWithHostNet
- ingressClass: nginx
- daemonset:
- useHostPort: true
- service:
- type: ClusterIP
-rbac:
- create: true
\ No newline at end of file
diff --git a/deployment/src/kubernetes/patches/patch.ingress.maintenance.off.yaml b/deployment/src/kubernetes/patches/patch.ingress.maintenance.off.yaml
deleted file mode 100644
index c01745d0a..000000000
--- a/deployment/src/kubernetes/patches/patch.ingress.maintenance.off.yaml
+++ /dev/null
@@ -1,12 +0,0 @@
-spec:
- rules:
- - host:
- http:
- paths:
- - path: /
- pathType: ImplementationSpecific
- backend:
- service:
- name: ocelot-webapp
- port:
- number: 3000
\ No newline at end of file
diff --git a/deployment/src/kubernetes/patches/patch.ingress.maintenance.on.yaml b/deployment/src/kubernetes/patches/patch.ingress.maintenance.on.yaml
deleted file mode 100644
index 8a2c5d45f..000000000
--- a/deployment/src/kubernetes/patches/patch.ingress.maintenance.on.yaml
+++ /dev/null
@@ -1,12 +0,0 @@
-spec:
- rules:
- - host:
- http:
- paths:
- - path: /
- pathType: ImplementationSpecific
- backend:
- service:
- name: ocelot-maintenance
- port:
- number: 80
\ No newline at end of file
diff --git a/deployment/src/kubernetes/templates/NOTES.txt b/deployment/src/kubernetes/templates/NOTES.txt
deleted file mode 100644
index 3db4648ca..000000000
--- a/deployment/src/kubernetes/templates/NOTES.txt
+++ /dev/null
@@ -1 +0,0 @@
-You installed ocelot-social! Congrats <3
\ No newline at end of file
diff --git a/deployment/src/kubernetes/templates/backend/ConfigMap.yml b/deployment/src/kubernetes/templates/backend/ConfigMap.yml
deleted file mode 100644
index a421fab41..000000000
--- a/deployment/src/kubernetes/templates/backend/ConfigMap.yml
+++ /dev/null
@@ -1,31 +0,0 @@
-kind: ConfigMap
-apiVersion: v1
-metadata:
- name: configmap-{{ .Release.Name }}-backend
- labels:
- app.kubernetes.io/name: "{{ .Chart.Name }}"
- app.kubernetes.io/instance: "{{ .Release.Name }}"
- app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
- app.kubernetes.io/component: "configmap-backend"
- app.kubernetes.io/part-of: "{{ .Chart.Name }}"
- app.kubernetes.io/managed-by: "{{ .Release.Service }}"
- helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
-data:
- PRODUCTION_DB_CLEAN_ALLOW: "{{ .Values.PRODUCTION_DB_CLEAN_ALLOW }}"
- PUBLIC_REGISTRATION: "{{ .Values.PUBLIC_REGISTRATION }}"
- INVITE_REGISTRATION: "{{ .Values.INVITE_REGISTRATION }}"
- CATEGORIES_ACTIVE: "{{ .Values.CATEGORIES_ACTIVE }}"
- CLIENT_URI: "{{ .Values.BACKEND.CLIENT_URI }}"
- EMAIL_DEFAULT_SENDER: "{{ .Values.BACKEND.EMAIL_DEFAULT_SENDER }}"
- SMTP_HOST: "{{ .Values.BACKEND.SMTP_HOST }}"
- SMTP_PORT: "{{ .Values.BACKEND.SMTP_PORT }}"
- SMTP_IGNORE_TLS: "{{ .Values.BACKEND.SMTP_IGNORE_TLS }}"
- SMTP_SECURE: "{{ .Values.BACKEND.SMTP_SECURE }}"
- SMTP_DKIM_DOMAINNAME: "{{ .Values.BACKEND.SMTP_DKIM_DOMAINNAME }}"
- SMTP_DKIM_KEYSELECTOR: "{{ .Values.BACKEND.SMTP_DKIM_KEYSELECTOR }}"
- GRAPHQL_URI: "http://{{ .Release.Name }}-backend:4000"
- NEO4J_URI: "bolt://{{ .Release.Name }}-neo4j:7687"
- #REDIS_DOMAIN: ---toBeSet(IP)---
- #REDIS_PORT: "6379"
- #SENTRY_DSN_WEBAPP: "---toBeSet---"
- #SENTRY_DSN_BACKEND: "---toBeSet---"
\ No newline at end of file
diff --git a/deployment/src/kubernetes/templates/backend/Deployment.yaml b/deployment/src/kubernetes/templates/backend/Deployment.yaml
deleted file mode 100644
index 4d0f66dbc..000000000
--- a/deployment/src/kubernetes/templates/backend/Deployment.yaml
+++ /dev/null
@@ -1,62 +0,0 @@
-kind: Deployment
-apiVersion: apps/v1
-metadata:
- name: {{ .Release.Name }}-backend
- labels:
- app.kubernetes.io/name: "{{ .Chart.Name }}"
- app.kubernetes.io/instance: "{{ .Release.Name }}"
- app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
- app.kubernetes.io/component: "deployment-backend"
- app.kubernetes.io/part-of: "{{ .Chart.Name }}"
- app.kubernetes.io/managed-by: "{{ .Release.Service }}"
- helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
-spec:
- replicas: 1
- minReadySeconds: {{ .Values.BACKEND.MIN_READY_SECONDS }}
- progressDeadlineSeconds: {{ .Values.BACKEND.PROGRESS_DEADLINE_SECONDS }}
- revisionHistoryLimit: {{ .Values.BACKEND.REVISIONS_HISTORY_LIMIT }}
- strategy:
- rollingUpdate:
- maxUnavailable: 1
- selector:
- matchLabels:
- app: {{ .Release.Name }}-backend
- template:
- metadata:
- annotations:
- backup.velero.io/backup-volumes: uploads
- # make sure the pod is redeployed
- rollme: {{ randAlphaNum 5 | quote }}
- labels:
- app: {{ .Release.Name }}-backend
- spec:
- containers:
- - name: container-{{ .Release.Name }}-backend
- image: "{{ .Values.BACKEND.DOCKER_IMAGE_REPO }}:{{ .Chart.AppVersion }}"
- imagePullPolicy: {{ .Values.BACKEND.DOCKER_IMAGE_PULL_POLICY }}
- envFrom:
- - configMapRef:
- name: configmap-{{ .Release.Name }}-backend
- - secretRef:
- name: secret-{{ .Release.Name }}-backend
- resources:
- requests:
- memory: {{ .Values.BACKEND.RESOURCE_REQUESTS_MEMORY | default "500M" | quote }}
- limits:
- memory: {{ .Values.BACKEND.RESOURCE_LIMITS_MEMORY | default "1G" | quote }}
- ports:
- - containerPort: 4000
- protocol: TCP
- terminationMessagePath: /dev/termination-log
- terminationMessagePolicy: File
- volumeMounts:
- - mountPath: /app/public/uploads
- name: uploads
- dnsPolicy: ClusterFirst
- schedulerName: default-scheduler
- restartPolicy: {{ .Values.BACKEND.CONTAINER_RESTART_POLICY }}
- terminationGracePeriodSeconds: {{ .Values.BACKEND.CONTAINER_TERMINATION_GRACE_PERIOD_SECONDS }}
- volumes:
- - name: uploads
- persistentVolumeClaim:
- claimName: volume-claim-{{ .Release.Name }}-uploads
\ No newline at end of file
diff --git a/deployment/src/kubernetes/templates/backend/PersistentVolumeClaim.yaml b/deployment/src/kubernetes/templates/backend/PersistentVolumeClaim.yaml
deleted file mode 100644
index 758e9e18c..000000000
--- a/deployment/src/kubernetes/templates/backend/PersistentVolumeClaim.yaml
+++ /dev/null
@@ -1,24 +0,0 @@
-kind: PersistentVolumeClaim
-apiVersion: v1
-metadata:
- name: volume-claim-{{ .Release.Name }}-uploads
- labels:
- app.kubernetes.io/name: "{{ .Chart.Name }}"
- app.kubernetes.io/instance: "{{ .Release.Name }}"
- app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
- app.kubernetes.io/component: "volume-claim-backend"
- app.kubernetes.io/part-of: "{{ .Chart.Name }}"
- app.kubernetes.io/managed-by: "{{ .Release.Service }}"
- helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
-spec:
- #dataSource:
- # name: uploads-snapshot
- # kind: VolumeSnapshot
- # apiGroup: snapshot.storage.k8s.io
- storageClassName: storage-{{ .Release.Name }}-persistent
- accessModes:
- - ReadWriteOnce
- resources:
- requests:
- storage: {{ .Values.BACKEND.STORAGE_UPLOADS }}
-
diff --git a/deployment/src/kubernetes/templates/backend/Secret.yaml b/deployment/src/kubernetes/templates/backend/Secret.yaml
deleted file mode 100644
index 605a92234..000000000
--- a/deployment/src/kubernetes/templates/backend/Secret.yaml
+++ /dev/null
@@ -1,22 +0,0 @@
-kind: Secret
-apiVersion: v1
-metadata:
- name: secret-{{ .Release.Name }}-backend
- labels:
- app.kubernetes.io/name: "{{ .Chart.Name }}"
- app.kubernetes.io/instance: "{{ .Release.Name }}"
- app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
- app.kubernetes.io/component: "secret-backend"
- app.kubernetes.io/part-of: "{{ .Chart.Name }}"
- app.kubernetes.io/managed-by: "{{ .Release.Service }}"
- helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
-stringData:
- JWT_SECRET: "{{ .Values.BACKEND.JWT_SECRET }}"
- MAPBOX_TOKEN: "{{ .Values.MAPBOX_TOKEN }}"
- PRIVATE_KEY_PASSPHRASE: "{{ .Values.BACKEND.PRIVATE_KEY_PASSPHRASE }}"
- SMTP_USERNAME: "{{ .Values.BACKEND.SMTP_USERNAME }}"
- SMTP_PASSWORD: "{{ .Values.BACKEND.SMTP_PASSWORD }}"
- SMTP_DKIM_PRIVATKEY: "{{ .Values.BACKEND.SMTP_DKIM_PRIVATKEY }}"
- #NEO4J_USERNAME: ""
- #NEO4J_PASSWORD: ""
- #REDIS_PASSWORD: ---toBeSet---
diff --git a/deployment/src/kubernetes/templates/backend/Service.yaml b/deployment/src/kubernetes/templates/backend/Service.yaml
deleted file mode 100644
index 9029be586..000000000
--- a/deployment/src/kubernetes/templates/backend/Service.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
-kind: Service
-apiVersion: v1
-metadata:
- name: {{ .Release.Name }}-backend
- labels:
- app.kubernetes.io/name: "{{ .Chart.Name }}"
- app.kubernetes.io/instance: "{{ .Release.Name }}"
- app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
- app.kubernetes.io/component: "service-backend"
- app.kubernetes.io/part-of: "{{ .Chart.Name }}"
- app.kubernetes.io/managed-by: "{{ .Release.Service }}"
- helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
-spec:
- ports:
- - name: {{ .Release.Name }}-graphql
- port: 4000
- targetPort: 4000
- protocol: TCP
- selector:
- app: {{ .Release.Name }}-backend
diff --git a/deployment/src/kubernetes/templates/issuer/letsencrypt-production.yaml b/deployment/src/kubernetes/templates/issuer/letsencrypt-production.yaml
deleted file mode 100644
index 6f82f3686..000000000
--- a/deployment/src/kubernetes/templates/issuer/letsencrypt-production.yaml
+++ /dev/null
@@ -1,22 +0,0 @@
-apiVersion: cert-manager.io/v1
-kind: ClusterIssuer
-metadata:
- name: letsencrypt-production
- labels:
- app.kubernetes.io/name: "{{ .Chart.Name }}"
- app.kubernetes.io/instance: "{{ .Release.Name }}"
- app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
- app.kubernetes.io/component: "letsencrypt-production"
- app.kubernetes.io/part-of: "{{ .Chart.Name }}"
- app.kubernetes.io/managed-by: "{{ .Release.Service }}"
- helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
-spec:
- acme:
- server: https://acme-v02.api.letsencrypt.org/directory
- email: {{ .Values.LETSENCRYPT.EMAIL }}
- privateKeySecretRef:
- name: letsencrypt-production
- solvers:
- - http01:
- ingress:
- class: nginx
diff --git a/deployment/src/kubernetes/templates/issuer/letsencrypt-staging.yaml b/deployment/src/kubernetes/templates/issuer/letsencrypt-staging.yaml
deleted file mode 100644
index e488d9335..000000000
--- a/deployment/src/kubernetes/templates/issuer/letsencrypt-staging.yaml
+++ /dev/null
@@ -1,22 +0,0 @@
-apiVersion: cert-manager.io/v1
-kind: ClusterIssuer
-metadata:
- name: letsencrypt-staging
- labels:
- app.kubernetes.io/name: "{{ .Chart.Name }}"
- app.kubernetes.io/instance: "{{ .Release.Name }}"
- app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
- app.kubernetes.io/component: "letsencrypt-staging"
- app.kubernetes.io/part-of: "{{ .Chart.Name }}"
- app.kubernetes.io/managed-by: "{{ .Release.Service }}"
- helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
-spec:
- acme:
- server: https://acme-staging-v02.api.letsencrypt.org/directory
- email: {{ .Values.LETSENCRYPT.EMAIL }}
- privateKeySecretRef:
- name: letsencrypt-staging
- solvers:
- - http01:
- ingress:
- class: nginx
diff --git a/deployment/src/kubernetes/templates/jobs/job-db-init.yaml b/deployment/src/kubernetes/templates/jobs/job-db-init.yaml
deleted file mode 100644
index f207bd8c1..000000000
--- a/deployment/src/kubernetes/templates/jobs/job-db-init.yaml
+++ /dev/null
@@ -1,29 +0,0 @@
-kind: Job
-apiVersion: batch/v1
-metadata:
- name: job-{{ .Release.Name }}-db-init
- labels:
- app.kubernetes.io/name: "{{ .Chart.Name }}"
- app.kubernetes.io/instance: "{{ .Release.Name }}"
- app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
- app.kubernetes.io/component: "job-db-init"
- app.kubernetes.io/part-of: "{{ .Chart.Name }}"
- app.kubernetes.io/managed-by: "{{ .Release.Service }}"
- helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
- annotations:
- "helm.sh/hook": post-install
- "helm.sh/hook-delete-policy": hook-succeeded, hook-failed
- "helm.sh/hook-weight": "0"
-spec:
- template:
- spec:
- restartPolicy: Never
- containers:
- - name: job-{{ .Release.Name }}-db-init
- image: "{{ .Values.BACKEND.DOCKER_IMAGE_REPO }}:{{ .Chart.AppVersion }}"
- command: ["/bin/sh", "-c", "yarn prod:migrate init"]
- envFrom:
- - configMapRef:
- name: configmap-{{ .Release.Name }}-backend
- - secretRef:
- name: secret-{{ .Release.Name }}-backend
\ No newline at end of file
diff --git a/deployment/src/kubernetes/templates/jobs/job-db-migrate.yaml b/deployment/src/kubernetes/templates/jobs/job-db-migrate.yaml
deleted file mode 100644
index 950793db4..000000000
--- a/deployment/src/kubernetes/templates/jobs/job-db-migrate.yaml
+++ /dev/null
@@ -1,29 +0,0 @@
-kind: Job
-apiVersion: batch/v1
-metadata:
- name: job-{{ .Release.Name }}-db-migrate
- labels:
- app.kubernetes.io/name: "{{ .Chart.Name }}"
- app.kubernetes.io/instance: "{{ .Release.Name }}"
- app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
- app.kubernetes.io/component: "job-db-migrate"
- app.kubernetes.io/part-of: "{{ .Chart.Name }}"
- app.kubernetes.io/managed-by: "{{ .Release.Service }}"
- helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
- annotations:
- "helm.sh/hook": post-install, post-upgrade
- "helm.sh/hook-delete-policy": hook-succeeded, hook-failed
- "helm.sh/hook-weight": "5"
-spec:
- template:
- spec:
- restartPolicy: Never
- containers:
- - name: job-{{ .Release.Name }}-db-migrations
- image: "{{ .Values.BACKEND.DOCKER_IMAGE_REPO }}:{{ .Chart.AppVersion }}"
- command: ["/bin/sh", "-c", "yarn prod:migrate up"]
- envFrom:
- - configMapRef:
- name: configmap-{{ .Release.Name }}-backend
- - secretRef:
- name: secret-{{ .Release.Name }}-backend
\ No newline at end of file
diff --git a/deployment/src/kubernetes/templates/maintenance/ConfigMap.yml b/deployment/src/kubernetes/templates/maintenance/ConfigMap.yml
deleted file mode 100644
index fe29afbfe..000000000
--- a/deployment/src/kubernetes/templates/maintenance/ConfigMap.yml
+++ /dev/null
@@ -1,14 +0,0 @@
-kind: ConfigMap
-apiVersion: v1
-metadata:
- name: configmap-{{ .Release.Name }}-maintenance
- labels:
- app.kubernetes.io/name: "{{ .Chart.Name }}"
- app.kubernetes.io/instance: "{{ .Release.Name }}"
- app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
- app.kubernetes.io/component: "configmap-maintenance"
- app.kubernetes.io/part-of: "{{ .Chart.Name }}"
- app.kubernetes.io/managed-by: "{{ .Release.Service }}"
- helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
-data:
- HOST: "0.0.0.0"
\ No newline at end of file
diff --git a/deployment/src/kubernetes/templates/maintenance/Deployment.yaml b/deployment/src/kubernetes/templates/maintenance/Deployment.yaml
deleted file mode 100644
index ec37552d1..000000000
--- a/deployment/src/kubernetes/templates/maintenance/Deployment.yaml
+++ /dev/null
@@ -1,45 +0,0 @@
-kind: Deployment
-apiVersion: apps/v1
-metadata:
- name: {{ .Release.Name }}-maintenance
- labels:
- app.kubernetes.io/name: "{{ .Chart.Name }}"
- app.kubernetes.io/instance: "{{ .Release.Name }}"
- app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
- app.kubernetes.io/component: "deployment-maintenance"
- app.kubernetes.io/part-of: "{{ .Chart.Name }}"
- app.kubernetes.io/managed-by: "{{ .Release.Service }}"
- helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
-spec:
- revisionHistoryLimit: {{ .Values.MAINTENANCE.REVISIONS_HISTORY_LIMIT }}
- strategy:
- rollingUpdate:
- maxUnavailable: 1
- selector:
- matchLabels:
- app: {{ .Release.Name }}-maintenance
- template:
- metadata:
- labels:
- app: {{ .Release.Name }}-maintenance
- # make sure the pod is redeployed
- rollme: {{ randAlphaNum 5 | quote }}
- spec:
- containers:
- - name: container-{{ .Release.Name }}-maintenance
- image: "{{ .Values.MAINTENANCE.DOCKER_IMAGE_REPO }}:{{ .Chart.AppVersion }}"
- imagePullPolicy: {{ .Values.MAINTENANCE.DOCKER_IMAGE_PULL_POLICY }}
- envFrom:
- - configMapRef:
- name: configmap-{{ .Release.Name }}-webapp
- - secretRef:
- name: secret-{{ .Release.Name }}-webapp
- resources:
- requests:
- memory: {{ .Values.MAINTENANCE.RESOURCE_REQUESTS_MEMORY | default "500M" | quote }}
- limits:
- memory: {{ .Values.MAINTENANCE.RESOURCE_LIMITS_MEMORY | default "1G" | quote }}
- ports:
- - containerPort: 80
- restartPolicy: {{ .Values.MAINTENANCE.CONTAINER_RESTART_POLICY }}
- terminationGracePeriodSeconds: {{ .Values.MAINTENANCE.CONTAINER_TERMINATION_GRACE_PERIOD_SECONDS }}
diff --git a/deployment/src/kubernetes/templates/maintenance/Secret.yaml b/deployment/src/kubernetes/templates/maintenance/Secret.yaml
deleted file mode 100644
index b4752e552..000000000
--- a/deployment/src/kubernetes/templates/maintenance/Secret.yaml
+++ /dev/null
@@ -1,13 +0,0 @@
-kind: Secret
-apiVersion: v1
-metadata:
- name: secret-{{ .Release.Name }}-maintenance
- labels:
- app.kubernetes.io/name: "{{ .Chart.Name }}"
- app.kubernetes.io/instance: "{{ .Release.Name }}"
- app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
- app.kubernetes.io/component: "secret-maintenance"
- app.kubernetes.io/part-of: "{{ .Chart.Name }}"
- app.kubernetes.io/managed-by: "{{ .Release.Service }}"
- helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
-stringData:
diff --git a/deployment/src/kubernetes/templates/maintenance/Service.yaml b/deployment/src/kubernetes/templates/maintenance/Service.yaml
deleted file mode 100644
index 95f042df5..000000000
--- a/deployment/src/kubernetes/templates/maintenance/Service.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
-kind: Service
-apiVersion: v1
-metadata:
- name: {{ .Release.Name }}-maintenance
- labels:
- app.kubernetes.io/name: "{{ .Chart.Name }}"
- app.kubernetes.io/instance: "{{ .Release.Name }}"
- app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
- app.kubernetes.io/component: "service-maintenance"
- app.kubernetes.io/part-of: "{{ .Chart.Name }}"
- app.kubernetes.io/managed-by: "{{ .Release.Service }}"
- helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
-spec:
- ports:
- - name: {{ .Release.Name }}-http
- port: 80
- targetPort: 80
- protocol: TCP
- selector:
- app: {{ .Release.Name }}-maintenance
diff --git a/deployment/src/kubernetes/templates/neo4j/ConfigMap.yml b/deployment/src/kubernetes/templates/neo4j/ConfigMap.yml
deleted file mode 100644
index 677218c16..000000000
--- a/deployment/src/kubernetes/templates/neo4j/ConfigMap.yml
+++ /dev/null
@@ -1,24 +0,0 @@
-kind: ConfigMap
-apiVersion: v1
-metadata:
- name: configmap-{{ .Release.Name }}-neo4j
- labels:
- app.kubernetes.io/name: "{{ .Chart.Name }}"
- app.kubernetes.io/instance: "{{ .Release.Name }}"
- app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
- app.kubernetes.io/component: "configmap-neo4j"
- app.kubernetes.io/part-of: "{{ .Chart.Name }}"
- app.kubernetes.io/managed-by: "{{ .Release.Service }}"
- helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
-data:
- NEO4J_ACCEPT_LICENSE_AGREEMENT: "{{ .Values.NEO4J.ACCEPT_LICENSE_AGREEMENT }}"
- NEO4J_AUTH: "{{ .Values.NEO4J.AUTH }}"
- NEO4J_dbms_connector_bolt_thread__pool__max__size: "{{ .Values.NEO4J.DBMS_CONNECTOR_BOLT_THREAD_POOL_MAX_SIZE }}"
- NEO4J_dbms_memory_heap_initial__size: "{{ .Values.NEO4J.DBMS_MEMORY_HEAP_INITIAL_SIZE }}"
- NEO4J_dbms_memory_heap_max__size: "{{ .Values.NEO4J.DBMS_MEMORY_HEAP_MAX_SIZE }}"
- NEO4J_dbms_memory_pagecache_size: "{{ .Values.NEO4J.DBMS_MEMORY_PAGECACHE_SIZE }}"
- NEO4J_dbms_security_procedures_unrestricted: "{{ .Values.NEO4J.DBMS_SECURITY_PROCEDURES_UNRESTRICTED }}"
- NEO4J_dbms_allow__format__migration: "true"
- NEO4J_dbms_allow__upgrade: "true"
- NEO4J_dbms_default__database: "{{ .Values.NEO4J.DBMS_DEFAULT_DATABASE }}"
- NEO4J_apoc_import_file_enabled: "{{ .Values.NEO4J.APOC_IMPORT_FILE_ENABLED }}"
diff --git a/deployment/src/kubernetes/templates/neo4j/Deployment.yaml b/deployment/src/kubernetes/templates/neo4j/Deployment.yaml
deleted file mode 100644
index 96867dbb5..000000000
--- a/deployment/src/kubernetes/templates/neo4j/Deployment.yaml
+++ /dev/null
@@ -1,57 +0,0 @@
-apiVersion: apps/v1
-kind: Deployment
-metadata:
- name: {{ .Release.Name }}-neo4j
- labels:
- app.kubernetes.io/name: "{{ .Chart.Name }}"
- app.kubernetes.io/instance: "{{ .Release.Name }}"
- app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
- app.kubernetes.io/component: "deployment-neo4j"
- app.kubernetes.io/part-of: "{{ .Chart.Name }}"
- app.kubernetes.io/managed-by: "{{ .Release.Service }}"
- helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
-spec:
- replicas: 1
- revisionHistoryLimit: {{ .Values.NEO4J.REVISIONS_HISTORY_LIMIT }}
- strategy:
- rollingUpdate:
- maxUnavailable: 1
- selector:
- matchLabels:
- app: {{ .Release.Name }}-neo4j
- template:
- metadata:
- name: neo4j
- annotations:
- backup.velero.io/backup-volumes: neo4j-data
- # make sure the pod is redeployed
- rollme: {{ randAlphaNum 5 | quote }}
- labels:
- app: {{ .Release.Name }}-neo4j
- spec:
- containers:
- - name: container-{{ .Release.Name }}-neo4j
- image: "{{ .Values.NEO4J.DOCKER_IMAGE_REPO }}:{{ .Chart.AppVersion }}"
- imagePullPolicy: {{ .Values.NEO4J.DOCKER_IMAGE_PULL_POLICY }}
- ports:
- - containerPort: 7687
- - containerPort: 7474
- resources:
- requests:
- memory: {{ .Values.NEO4J.RESOURCE_REQUESTS_MEMORY | default "1G" | quote }}
- limits:
- memory: {{ .Values.NEO4J.RESOURCE_LIMITS_MEMORY | default "1G" | quote }}
- envFrom:
- - configMapRef:
- name: configmap-{{ .Release.Name }}-neo4j
- - secretRef:
- name: secret-{{ .Release.Name }}-neo4j
- volumeMounts:
- - mountPath: /data/
- name: neo4j-data
- volumes:
- - name: neo4j-data
- persistentVolumeClaim:
- claimName: volume-claim-{{ .Release.Name }}-neo4j
- restartPolicy: {{ .Values.NEO4J.CONTAINER_RESTART_POLICY }}
- terminationGracePeriodSeconds: {{ .Values.NEO4J.CONTAINER_TERMINATION_GRACE_PERIOD_SECONDS }}
diff --git a/deployment/src/kubernetes/templates/neo4j/PersistentVolumeClaim.yaml b/deployment/src/kubernetes/templates/neo4j/PersistentVolumeClaim.yaml
deleted file mode 100644
index 3aab02d9f..000000000
--- a/deployment/src/kubernetes/templates/neo4j/PersistentVolumeClaim.yaml
+++ /dev/null
@@ -1,19 +0,0 @@
-kind: PersistentVolumeClaim
-apiVersion: v1
-metadata:
- name: volume-claim-{{ .Release.Name }}-neo4j
- labels:
- app.kubernetes.io/name: "{{ .Chart.Name }}"
- app.kubernetes.io/instance: "{{ .Release.Name }}"
- app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
- app.kubernetes.io/component: "volume-claim-neo4j"
- app.kubernetes.io/part-of: "{{ .Chart.Name }}"
- app.kubernetes.io/managed-by: "{{ .Release.Service }}"
- helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
-spec:
- storageClassName: storage-{{ .Release.Name }}-persistent
- accessModes:
- - ReadWriteOnce
- resources:
- requests:
- storage: {{ .Values.NEO4J.STORAGE }}
\ No newline at end of file
diff --git a/deployment/src/kubernetes/templates/neo4j/Secret.yaml b/deployment/src/kubernetes/templates/neo4j/Secret.yaml
deleted file mode 100644
index d8b1c17db..000000000
--- a/deployment/src/kubernetes/templates/neo4j/Secret.yaml
+++ /dev/null
@@ -1,15 +0,0 @@
-kind: Secret
-apiVersion: v1
-metadata:
- name: secret-{{ .Release.Name }}-neo4j
- labels:
- app.kubernetes.io/name: "{{ .Chart.Name }}"
- app.kubernetes.io/instance: "{{ .Release.Name }}"
- app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
- app.kubernetes.io/component: "secret-neo4j"
- app.kubernetes.io/part-of: "{{ .Chart.Name }}"
- app.kubernetes.io/managed-by: "{{ .Release.Service }}"
- helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
-stringData:
- NEO4J_USERNAME: ""
- NEO4J_PASSWORD: ""
\ No newline at end of file
diff --git a/deployment/src/kubernetes/templates/neo4j/Service.yaml b/deployment/src/kubernetes/templates/neo4j/Service.yaml
deleted file mode 100644
index 4ed56bd3f..000000000
--- a/deployment/src/kubernetes/templates/neo4j/Service.yaml
+++ /dev/null
@@ -1,23 +0,0 @@
-kind: Service
-apiVersion: v1
-metadata:
- name: {{ .Release.Name }}-neo4j
- labels:
- app.kubernetes.io/name: "{{ .Chart.Name }}"
- app.kubernetes.io/instance: "{{ .Release.Name }}"
- app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
- app.kubernetes.io/component: "service-neo4j"
- app.kubernetes.io/part-of: "{{ .Chart.Name }}"
- app.kubernetes.io/managed-by: "{{ .Release.Service }}"
- helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
-spec:
- ports:
- - name: {{ .Release.Name }}-bolt
- port: 7687
- targetPort: 7687
- protocol: TCP
- #- name: {{ .Release.Name }}-http
- # port: 7474
- # targetPort: 7474
- selector:
- app: {{ .Release.Name }}-neo4j
diff --git a/deployment/src/kubernetes/templates/storage/persistent.yml b/deployment/src/kubernetes/templates/storage/persistent.yml
deleted file mode 100644
index 2ac07c5de..000000000
--- a/deployment/src/kubernetes/templates/storage/persistent.yml
+++ /dev/null
@@ -1,16 +0,0 @@
-kind: StorageClass
-apiVersion: storage.k8s.io/v1
-metadata:
- name: storage-{{ .Release.Name }}-persistent
- labels:
- app.kubernetes.io/name: "{{ .Chart.Name }}"
- app.kubernetes.io/instance: "{{ .Release.Name }}"
- app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
- app.kubernetes.io/component: "storage-persistent"
- app.kubernetes.io/part-of: "{{ .Chart.Name }}"
- app.kubernetes.io/managed-by: "{{ .Release.Service }}"
- helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
-provisioner: {{ .Values.STORAGE.PROVISIONER }}
-reclaimPolicy: {{ .Values.STORAGE.RECLAIM_POLICY }}
-volumeBindingMode: {{ .Values.STORAGE.VOLUME_BINDING_MODE }}
-allowVolumeExpansion: {{ .Values.STORAGE.ALLOW_VOLUME_EXPANSION }}
\ No newline at end of file
diff --git a/deployment/src/kubernetes/templates/webapp/ConfigMap.yml b/deployment/src/kubernetes/templates/webapp/ConfigMap.yml
deleted file mode 100644
index 762b355cc..000000000
--- a/deployment/src/kubernetes/templates/webapp/ConfigMap.yml
+++ /dev/null
@@ -1,20 +0,0 @@
-kind: ConfigMap
-apiVersion: v1
-metadata:
- name: configmap-{{ .Release.Name }}-webapp
- labels:
- app.kubernetes.io/name: "{{ .Chart.Name }}"
- app.kubernetes.io/instance: "{{ .Release.Name }}"
- app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
- app.kubernetes.io/component: "configmap-webapp"
- app.kubernetes.io/part-of: "{{ .Chart.Name }}"
- app.kubernetes.io/managed-by: "{{ .Release.Service }}"
- helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
-data:
- HOST: "0.0.0.0"
- PUBLIC_REGISTRATION: "{{ .Values.PUBLIC_REGISTRATION }}"
- INVITE_REGISTRATION: "{{ .Values.INVITE_REGISTRATION }}"
- CATEGORIES_ACTIVE: "{{ .Values.CATEGORIES_ACTIVE }}"
- COOKIE_EXPIRE_TIME: "{{ .Values.COOKIE_EXPIRE_TIME }}"
- WEBSOCKETS_URI: "{{ .Values.WEBAPP.WEBSOCKETS_URI }}"
- GRAPHQL_URI: "http://{{ .Release.Name }}-backend:4000"
\ No newline at end of file
diff --git a/deployment/src/kubernetes/templates/webapp/Deployment.yaml b/deployment/src/kubernetes/templates/webapp/Deployment.yaml
deleted file mode 100644
index 41221185c..000000000
--- a/deployment/src/kubernetes/templates/webapp/Deployment.yaml
+++ /dev/null
@@ -1,49 +0,0 @@
-kind: Deployment
-apiVersion: apps/v1
-metadata:
- name: {{ .Release.Name }}-webapp
- labels:
- app.kubernetes.io/name: "{{ .Chart.Name }}"
- app.kubernetes.io/instance: "{{ .Release.Name }}"
- app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
- app.kubernetes.io/component: "deployment-webapp"
- app.kubernetes.io/part-of: "{{ .Chart.Name }}"
- app.kubernetes.io/managed-by: "{{ .Release.Service }}"
- helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
-spec:
- replicas: {{ .Values.WEBAPP.REPLICAS }}
- minReadySeconds: {{ .Values.WEBAPP.MIN_READY_SECONDS }}
- progressDeadlineSeconds: {{ .Values.WEBAPP.PROGRESS_DEADLINE_SECONDS }}
- revisionHistoryLimit: {{ .Values.WEBAPP.REVISIONS_HISTORY_LIMIT }}
- strategy:
- rollingUpdate:
- maxUnavailable: 1
- selector:
- matchLabels:
- app: {{ .Release.Name }}-webapp
- template:
- metadata:
- annotations:
- # make sure the pod is redeployed
- rollme: {{ randAlphaNum 5 | quote }}
- labels:
- app: {{ .Release.Name }}-webapp
- spec:
- containers:
- - name: container-{{ .Release.Name }}-webapp
- image: "{{ .Values.WEBAPP.DOCKER_IMAGE_REPO }}:{{ .Chart.AppVersion }}"
- imagePullPolicy: {{ .Values.WEBAPP.DOCKER_IMAGE_PULL_POLICY }}
- ports:
- - containerPort: 3000
- envFrom:
- - configMapRef:
- name: configmap-{{ .Release.Name }}-webapp
- - secretRef:
- name: secret-{{ .Release.Name }}-webapp
- resources:
- requests:
- memory: {{ .Values.WEBAPP.RESOURCE_REQUESTS_MEMORY | default "500M" | quote }}
- limits:
- memory: {{ .Values.WEBAPP.RESOURCE_LIMITS_MEMORY | default "1G" | quote }}
- restartPolicy: {{ .Values.WEBAPP.CONTAINER_RESTART_POLICY }}
- terminationGracePeriodSeconds: {{ .Values.WEBAPP.CONTAINER_TERMINATION_GRACE_PERIOD_SECONDS }}
\ No newline at end of file
diff --git a/deployment/src/kubernetes/templates/webapp/Ingress.yaml b/deployment/src/kubernetes/templates/webapp/Ingress.yaml
deleted file mode 100644
index d7b12bdc8..000000000
--- a/deployment/src/kubernetes/templates/webapp/Ingress.yaml
+++ /dev/null
@@ -1,36 +0,0 @@
-kind: Ingress
-apiVersion: networking.k8s.io/v1
-metadata:
- name: ingress-{{ .Release.Name }}-webapp
- labels:
- app.kubernetes.io/name: "{{ .Chart.Name }}"
- app.kubernetes.io/instance: "{{ .Release.Name }}"
- app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
- app.kubernetes.io/component: "ingress-webapp"
- app.kubernetes.io/part-of: "{{ .Chart.Name }}"
- app.kubernetes.io/managed-by: "{{ .Release.Service }}"
- helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
- annotations:
- kubernetes.io/ingress.class: "nginx"
- cert-manager.io/cluster-issuer: {{ .Values.LETSENCRYPT.ISSUER }}
- nginx.ingress.kubernetes.io/proxy-body-size: {{ .Values.NGINX.PROXY_BODY_SIZE }}
-spec:
- tls:
- - hosts:
- {{- range .Values.LETSENCRYPT.DOMAINS }}
- - {{ . }}
- {{- end }}
- secretName: tls
- rules:
- {{- range .Values.LETSENCRYPT.DOMAINS }}
- - host: {{ . }}
- http:
- paths:
- - path: /
- pathType: ImplementationSpecific
- backend:
- service:
- name: {{ $.Release.Name }}-webapp
- port:
- number: 3000
- {{- end }}
\ No newline at end of file
diff --git a/deployment/src/kubernetes/templates/webapp/Secret.yaml b/deployment/src/kubernetes/templates/webapp/Secret.yaml
deleted file mode 100644
index 8c0fd9d39..000000000
--- a/deployment/src/kubernetes/templates/webapp/Secret.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
-kind: Secret
-apiVersion: v1
-metadata:
- name: secret-{{ .Release.Name }}-webapp
- labels:
- app.kubernetes.io/name: "{{ .Chart.Name }}"
- app.kubernetes.io/instance: "{{ .Release.Name }}"
- app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
- app.kubernetes.io/component: "secret-webapp"
- app.kubernetes.io/part-of: "{{ .Chart.Name }}"
- app.kubernetes.io/managed-by: "{{ .Release.Service }}"
- helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
-stringData:
- MAPBOX_TOKEN: "{{ .Values.MAPBOX_TOKEN }}"
\ No newline at end of file
diff --git a/deployment/src/kubernetes/templates/webapp/Service.yaml b/deployment/src/kubernetes/templates/webapp/Service.yaml
deleted file mode 100644
index 0c3112e77..000000000
--- a/deployment/src/kubernetes/templates/webapp/Service.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
-kind: Service
-apiVersion: v1
-metadata:
- name: {{ .Release.Name }}-webapp
- labels:
- app.kubernetes.io/name: "{{ .Chart.Name }}"
- app.kubernetes.io/instance: "{{ .Release.Name }}"
- app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
- app.kubernetes.io/component: "service-webapp"
- app.kubernetes.io/part-of: "{{ .Chart.Name }}"
- app.kubernetes.io/managed-by: "{{ .Release.Service }}"
- helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
-spec:
- ports:
- - name: {{ .Release.Name }}-http
- port: 3000
- targetPort: 3000
- protocol: TCP
- selector:
- app: {{ .Release.Name }}-webapp
diff --git a/deployment/src/old/Maintenance.md b/deployment/src/old/Maintenance.md
deleted file mode 100644
index 08a177e65..000000000
--- a/deployment/src/old/Maintenance.md
+++ /dev/null
@@ -1,45 +0,0 @@
-# Maintenance mode
-
-> Despite our best efforts, systems sometimes require downtime for a variety of reasons.
-
-Quote from [here](https://www.nrmitchi.com/2017/11/easy-maintenance-mode-in-kubernetes/)
-
-We use our maintenance mode for manual database backup and restore. Also we
-bring the database into maintenance mode for manual database migrations.
-
-## Deploy the service
-
-We prepared sample configuration, so you can simply run:
-
-```sh
-# in folder deployment/
-$ kubectl apply -f ./ocelot-social/maintenance/
-```
-
-This will fire up a maintenance service.
-
-## Bring application into maintenance mode
-
-Now if you want to have a controlled downtime and you want to bring your
-application into maintenance mode, you can edit your global ingress server.
-
-E.g. copy file [`deployment/digital-ocean/https/templates/ingress.template.yaml`](../../digital-ocean/https/templates/ingress.template.yaml) to new file `deployment/digital-ocean/https/ingress.yaml` and change the following:
-
-```yaml
-...
-
- - host: develop-k8s.ocelot.social
- http:
- paths:
- - path: /
- backend:
- # serviceName: web
- serviceName: maintenance
- # servicePort: 3000
- servicePort: 80
-```
-
-Then run `$ kubectl apply -f deployment/digital-ocean/https/ingress.yaml`. If you
-want to deactivate the maintenance server, just undo the edit and apply the
-configuration again.
-
diff --git a/deployment/src/old/digital-ocean/README.md b/deployment/src/old/digital-ocean/README.md
deleted file mode 100644
index c5893f645..000000000
--- a/deployment/src/old/digital-ocean/README.md
+++ /dev/null
@@ -1,39 +0,0 @@
-# DigitalOcean
-
-As a start, read the [introduction into Kubernetes](https://www.digitalocean.com/community/tutorials/an-introduction-to-kubernetes) by the folks at DigitalOcean. The following section should enable you to deploy ocelot.social to your Kubernetes cluster.
-
-## Connect to your local cluster
-
-1. Create a cluster at [DigitalOcean](https://www.digitalocean.com/).
-2. Download the `***-kubeconfig.yaml` from the Web UI.
-3. Move the file to the default location where kubectl expects it to be: `mv ***-kubeconfig.yaml ~/.kube/config`. Alternatively you can set the config on every command: `--kubeconfig ***-kubeconfig.yaml`
-4. Now check if you can connect to the cluster and if its your newly created one by running: `kubectl get nodes`
-
-The output should look about like this:
-
-```sh
-$ kubectl get nodes
-NAME STATUS ROLES AGE VERSION
-nifty-driscoll-uu1w Ready 69d v1.13.2
-nifty-driscoll-uuiw Ready 69d v1.13.2
-nifty-driscoll-uusn Ready 69d v1.13.2
-```
-
-If you got the steps right above and see your nodes you can continue.
-
-DigitalOcean Kubernetes clusters don't have a graphical interface, so I suggest
-to setup the [Kubernetes dashboard](./dashboard/README.md) as a next step.
-Configuring [HTTPS](./https/README.md) is bit tricky and therefore I suggest to
-do this as a last step.
-
-## Spaces
-
-We are storing our images in the s3-compatible [DigitalOcean Spaces](https://www.digitalocean.com/docs/spaces/).
-
-We still want to take backups of our images in case something happens to the images in the cloud. See these [instructions](https://www.digitalocean.com/docs/spaces/resources/s3cmd-usage/) about getting set up with `s3cmd` to take a copy of all images in a `Spaces` namespace, i.e. `ocelot-social-uploads`.
-
-After configuring `s3cmd` with your credentials, etc. you should be able to make a backup with this command.
-
-```sh
-s3cmg get --recursive --skip-existing s3://ocelot-social-uploads
-```
diff --git a/deployment/src/old/digital-ocean/dashboard/README.md b/deployment/src/old/digital-ocean/dashboard/README.md
deleted file mode 100644
index 5f66afe0b..000000000
--- a/deployment/src/old/digital-ocean/dashboard/README.md
+++ /dev/null
@@ -1,55 +0,0 @@
-# Install Kubernetes Dashboard
-
-The kubernetes dashboard is optional but very helpful for debugging. If you want to install it, you have to do so only **once** per cluster:
-
-```bash
-# in folder deployment/digital-ocean/
-$ kubectl apply -f dashboard/
-$ kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-beta4/aio/deploy/recommended.yaml
-```
-
-### Login to your dashboard
-
-Proxy the remote kubernetes dashboard to localhost:
-
-```bash
-$ kubectl proxy
-```
-
-Visit:
-
-[http://localhost:8001/api/v1/namespaces/kubernetes-dashboard/services/https:kubernetes-dashboard:/proxy/](http://localhost:8001/api/v1/namespaces/kubernetes-dashboard/services/https:kubernetes-dashboard:/proxy/)
-
-You should see a login screen.
-
-To get your token for the dashboard you can run this command:
-
-```bash
-$ kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin-user | awk '{print $1}')
-```
-
-It should print something like:
-
-```text
-Name: admin-user-token-6gl6l
-Namespace: kube-system
-Labels:
-Annotations: kubernetes.io/service-account.name=admin-user
- kubernetes.io/service-account.uid=b16afba9-dfec-11e7-bbb9-901b0e532516
-
-Type: kubernetes.io/service-account-token
-
-Data
-====
-ca.crt: 1025 bytes
-namespace: 11 bytes
-token: eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJhZG1pbi11c2VyLXRva2VuLTZnbDZsIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImFkbWluLXVzZXIiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiJiMTZhZmJhOS1kZmVjLTExZTctYmJiOS05MDFiMGU1MzI1MTYiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZS1zeXN0ZW06YWRtaW4tdXNlciJ9.M70CU3lbu3PP4OjhFms8PVL5pQKj-jj4RNSLA4YmQfTXpPUuxqXjiTf094_Rzr0fgN_IVX6gC4fiNUL5ynx9KU-lkPfk0HnX8scxfJNzypL039mpGt0bbe1IXKSIRaq_9VW59Xz-yBUhycYcKPO9RM2Qa1Ax29nqNVko4vLn1_1wPqJ6XSq3GYI8anTzV8Fku4jasUwjrws6Cn6_sPEGmL54sq5R4Z5afUtv-mItTmqZZdxnkRqcJLlg2Y8WbCPogErbsaCDJoABQ7ppaqHetwfM_0yMun6ABOQbIwwl8pspJhpplKwyo700OSpvTT9zlBsu-b35lzXGBRHzv5g_RA
-```
-
-Grab the token from above and paste it into the [login screen](http://localhost:8001/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/)
-
-When you are logged in, you should see sth. like:
-
-
-
-Feel free to save the login token from above in your password manager. Unlike the `kubeconfig` file, this token does not expire.
diff --git a/deployment/src/old/digital-ocean/dashboard/admin-user.yaml b/deployment/src/old/digital-ocean/dashboard/admin-user.yaml
deleted file mode 100644
index 27b6bb802..000000000
--- a/deployment/src/old/digital-ocean/dashboard/admin-user.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-apiVersion: v1
-kind: ServiceAccount
-metadata:
- name: admin-user
- namespace: kube-system
diff --git a/deployment/src/old/digital-ocean/dashboard/dashboard-screenshot.png b/deployment/src/old/digital-ocean/dashboard/dashboard-screenshot.png
deleted file mode 100644
index 6aefb5414..000000000
Binary files a/deployment/src/old/digital-ocean/dashboard/dashboard-screenshot.png and /dev/null differ
diff --git a/deployment/src/old/digital-ocean/dashboard/role-binding.yaml b/deployment/src/old/digital-ocean/dashboard/role-binding.yaml
deleted file mode 100644
index faa8927a2..000000000
--- a/deployment/src/old/digital-ocean/dashboard/role-binding.yaml
+++ /dev/null
@@ -1,12 +0,0 @@
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRoleBinding
-metadata:
- name: admin-user
-roleRef:
- apiGroup: rbac.authorization.k8s.io
- kind: ClusterRole
- name: cluster-admin
-subjects:
-- kind: ServiceAccount
- name: admin-user
- namespace: kube-system
diff --git a/deployment/src/old/digital-ocean/https/README.md b/deployment/src/old/digital-ocean/https/README.md
deleted file mode 100644
index 2393f3a0f..000000000
--- a/deployment/src/old/digital-ocean/https/README.md
+++ /dev/null
@@ -1,124 +0,0 @@
-## Create Letsencrypt Issuers and Ingress Services
-
-Copy the configuration templates and change the file according to your needs.
-
-```bash
-# in folder deployment/digital-ocean/https/
-cp templates/issuer.template.yaml ./issuer.yaml
-cp templates/ingress.template.yaml ./ingress.yaml
-```
-
-At least, **change email addresses** in `issuer.yaml`. For sure you also want
-to _change the domain name_ in `ingress.yaml`.
-
-Once you are done, apply the configuration:
-
-```bash
-# in folder deployment/digital-ocean/https/
-$ kubectl apply -f .
-```
-
-{% hint style="info" %}
-CAUTION: It seems that the behaviour of DigitalOcean has changed and the load balancer is not created automatically anymore.
-And to create a load balancer costs money. Please refine the following documentation if required.
-{% endhint %}
-
-::: tabs
-@tab:active Without Load Balancer
-
-A solution without a load balance you can find [here](../no-loadbalancer/README.md).
-
-@tab With DigitalOcean Load Balancer
-
-{% hint style="info" %}
-CAUTION: It seems that the behaviour of DigitalOcean has changed and the load balancer is not created automatically anymore.
-Please refine the following documentation if required.
-{% endhint %}
-
-In earlier days by now, your cluster should have a load balancer assigned with an external IP
-address. On DigitalOcean, this is how it should look like:
-
-
-
-If the load balancer isn't created automatically you have to create it your self on DigitalOcean under Networks.
-In case you don't need a DigitalOcean load balancer (which costs money by the way) have a look in the tab `Without Load Balancer`.
-
-:::
-
-Check the ingress server is working correctly:
-
-```bash
-$ curl -kivL -H 'Host: ' 'https://'
-
-```
-
-If the response looks good, configure your domain registrar for the new IP address and the domain.
-
-Now let's get a valid HTTPS certificate. According to the tutorial above, check your tls certificate for staging:
-
-```bash
-$ kubectl -n ocelot-social describe certificate tls
-<
-...
-Spec:
- ...
- Issuer Ref:
- Group: cert-manager.io
- Kind: ClusterIssuer
- Name: letsencrypt-staging
-...
-Events:
-
->
-$ kubectl -n ocelot-social describe secret tls
-<
-...
-Annotations: ...
- cert-manager.io/issuer-kind: ClusterIssuer
- cert-manager.io/issuer-name: letsencrypt-staging
-...
->
-```
-
-If everything looks good, update the cluster-issuer of your ingress. Change the annotation `cert-manager.io/cluster-issuer` from `letsencrypt-staging` (for testing by getting a dummy certificate – no blocking by letsencrypt, because of to many request cycles) to `letsencrypt-prod` (for production with a real certificate – possible blocking by letsencrypt for several days, because of to many request cycles) in your ingress configuration in `ingress.yaml`.
-
-```bash
-# in folder deployment/digital-ocean/https/
-$ kubectl apply -f ingress.yaml
-```
-
-Take a minute and have a look if the certificate is now newly generated by `letsencrypt-prod`, the cluster-issuer for production:
-
-```bash
-$ kubectl -n ocelot-social describe certificate tls
-<
-...
-Spec:
- ...
- Issuer Ref:
- Group: cert-manager.io
- Kind: ClusterIssuer
- Name: letsencrypt-prod
-...
-Events:
-
->
-$ kubectl -n ocelot-social describe secret tls
-<
-...
-Annotations: ...
- cert-manager.io/issuer-kind: ClusterIssuer
- cert-manager.io/issuer-name: letsencrypt-prod
-...
->
-```
-
-In case the certificate is not newly created delete the former secret to force a refresh:
-
-```bash
-$ kubectl -n ocelot-social delete secret tls
-```
-
-Now, HTTPS should be configured on your domain. Congrats!
-
-For troubleshooting have a look at the cert-manager's [Troubleshooting](https://cert-manager.io/docs/faq/troubleshooting/) or [Troubleshooting Issuing ACME Certificates](https://cert-manager.io/docs/faq/acme/).
diff --git a/deployment/src/old/digital-ocean/https/ip-address.png b/deployment/src/old/digital-ocean/https/ip-address.png
deleted file mode 100644
index db523156a..000000000
Binary files a/deployment/src/old/digital-ocean/https/ip-address.png and /dev/null differ
diff --git a/deployment/src/old/legacy-migration/README.md b/deployment/src/old/legacy-migration/README.md
deleted file mode 100644
index 66100a3c8..000000000
--- a/deployment/src/old/legacy-migration/README.md
+++ /dev/null
@@ -1,85 +0,0 @@
-# Legacy data migration
-
-This setup is **completely optional** and only required if you have data on a
-server which is running our legacy code and you want to import that data. It
-will import the uploads folder and migrate a dump of the legacy Mongo database
-into our new Neo4J graph database.
-
-## Configure Maintenance-Worker Pod
-
-Create a configmap with the specific connection data of your legacy server:
-
-```bash
-$ kubectl create configmap maintenance-worker \
- -n ocelot-social \
- --from-literal=SSH_USERNAME=someuser \
- --from-literal=SSH_HOST=yourhost \
- --from-literal=MONGODB_USERNAME=hc-api \
- --from-literal=MONGODB_PASSWORD=secretpassword \
- --from-literal=MONGODB_AUTH_DB=hc_api \
- --from-literal=MONGODB_DATABASE=hc_api \
- --from-literal=UPLOADS_DIRECTORY=/var/www/api/uploads
-```
-
-Create a secret with your public and private ssh keys. As the [kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/secret/#use-case-pod-with-ssh-keys) points out, you should be careful with your ssh keys. Anyone with access to your cluster will have access to your ssh keys. Better create a new pair with `ssh-keygen` and copy the public key to your legacy server with `ssh-copy-id`:
-
-```bash
-$ kubectl create secret generic ssh-keys \
- -n ocelot-social \
- --from-file=id_rsa=/path/to/.ssh/id_rsa \
- --from-file=id_rsa.pub=/path/to/.ssh/id_rsa.pub \
- --from-file=known_hosts=/path/to/.ssh/known_hosts
-```
-
-## Deploy a Temporary Maintenance-Worker Pod
-
-Bring the application into maintenance mode.
-
-{% hint style="info" %} TODO: implement maintenance mode {% endhint %}
-
-
-Then temporarily delete backend and database deployments
-
-```bash
-$ kubectl -n ocelot-social get deployments
-NAME READY UP-TO-DATE AVAILABLE AGE
-backend 1/1 1 1 3d11h
-neo4j 1/1 1 1 3d11h
-webapp 2/2 2 2 73d
-$ kubectl -n ocelot-social delete deployment neo4j
-deployment.extensions "neo4j" deleted
-$ kubectl -n ocelot-social delete deployment backend
-deployment.extensions "backend" deleted
-```
-
-Deploy one-time develop-maintenance-worker pod:
-
-```bash
-# in deployment/legacy-migration/
-$ kubectl apply -f maintenance-worker.yaml
-pod/develop-maintenance-worker created
-```
-
-Import legacy database and uploads:
-
-```bash
-$ kubectl -n ocelot-social exec -it develop-maintenance-worker bash
-$ import_legacy_db
-$ import_legacy_uploads
-$ exit
-```
-
-Delete the pod when you're done:
-
-```bash
-$ kubectl -n ocelot-social delete pod develop-maintenance-worker
-```
-
-Oh, and of course you have to get those deleted deployments back. One way of
-doing it would be:
-
-```bash
-# in folder deployment/
-$ kubectl apply -f human-connection/deployment-backend.yaml -f human-connection/deployment-neo4j.yaml
-```
-
diff --git a/deployment/src/old/legacy-migration/maintenance-worker.yaml b/deployment/src/old/legacy-migration/maintenance-worker.yaml
deleted file mode 100644
index d8b118b67..000000000
--- a/deployment/src/old/legacy-migration/maintenance-worker.yaml
+++ /dev/null
@@ -1,40 +0,0 @@
----
- kind: Pod
- apiVersion: v1
- metadata:
- name: develop-maintenance-worker
- namespace: ocelot-social
- spec:
- containers:
- - name: develop-maintenance-worker
- image: ocelotsocialnetwork/develop-maintenance-worker:latest
- imagePullPolicy: Always
- resources:
- requests:
- memory: "2G"
- limits:
- memory: "8G"
- envFrom:
- - configMapRef:
- name: maintenance-worker
- - configMapRef:
- name: configmap
- volumeMounts:
- - name: secret-volume
- readOnly: false
- mountPath: /root/.ssh
- - name: uploads
- mountPath: /uploads
- - name: neo4j-data
- mountPath: /data/
- volumes:
- - name: secret-volume
- secret:
- secretName: ssh-keys
- defaultMode: 0400
- - name: uploads
- persistentVolumeClaim:
- claimName: uploads-claim
- - name: neo4j-data
- persistentVolumeClaim:
- claimName: neo4j-data-claim
diff --git a/deployment/src/old/legacy-migration/maintenance-worker/.dockerignore b/deployment/src/old/legacy-migration/maintenance-worker/.dockerignore
deleted file mode 100644
index 59ba63a8b..000000000
--- a/deployment/src/old/legacy-migration/maintenance-worker/.dockerignore
+++ /dev/null
@@ -1 +0,0 @@
-.ssh/
diff --git a/deployment/src/old/legacy-migration/maintenance-worker/.gitignore b/deployment/src/old/legacy-migration/maintenance-worker/.gitignore
deleted file mode 100644
index 485bc00e6..000000000
--- a/deployment/src/old/legacy-migration/maintenance-worker/.gitignore
+++ /dev/null
@@ -1,2 +0,0 @@
-.ssh/
-ssh/
\ No newline at end of file
diff --git a/deployment/src/old/legacy-migration/maintenance-worker/Dockerfile b/deployment/src/old/legacy-migration/maintenance-worker/Dockerfile
deleted file mode 100644
index 760cc06c8..000000000
--- a/deployment/src/old/legacy-migration/maintenance-worker/Dockerfile
+++ /dev/null
@@ -1,21 +0,0 @@
-FROM ocelotsocialnetwork/develop-neo4j:latest
-
-ENV NODE_ENV=maintenance
-EXPOSE 7687 7474
-
-ENV BUILD_DEPS="gettext" \
- RUNTIME_DEPS="libintl"
-
-RUN set -x && \
- apk add --update $RUNTIME_DEPS && \
- apk add --virtual build_deps $BUILD_DEPS && \
- cp /usr/bin/envsubst /usr/local/bin/envsubst && \
- apk del build_deps
-
-
-RUN apk upgrade --update
-RUN apk add --no-cache mongodb-tools openssh nodejs yarn rsync
-
-COPY known_hosts /root/.ssh/known_hosts
-COPY migration /migration
-COPY ./binaries/* /usr/local/bin/
diff --git a/deployment/src/old/legacy-migration/maintenance-worker/binaries/idle b/deployment/src/old/legacy-migration/maintenance-worker/binaries/idle
deleted file mode 100755
index f5b1b2454..000000000
--- a/deployment/src/old/legacy-migration/maintenance-worker/binaries/idle
+++ /dev/null
@@ -1,2 +0,0 @@
-#!/usr/bin/env bash
-tail -f /dev/null
diff --git a/deployment/src/old/legacy-migration/maintenance-worker/binaries/import_legacy_db b/deployment/src/old/legacy-migration/maintenance-worker/binaries/import_legacy_db
deleted file mode 100755
index 6ffdf8e3f..000000000
--- a/deployment/src/old/legacy-migration/maintenance-worker/binaries/import_legacy_db
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/usr/bin/env bash
-set -e
-for var in "SSH_USERNAME" "SSH_HOST" "MONGODB_USERNAME" "MONGODB_PASSWORD" "MONGODB_DATABASE" "MONGODB_AUTH_DB"
-do
- if [[ -z "${!var}" ]]; then
- echo "${var} is undefined"
- exit 1
- fi
-done
-
-/migration/mongo/export.sh
-/migration/neo4j/import.sh
diff --git a/deployment/src/old/legacy-migration/maintenance-worker/binaries/import_legacy_uploads b/deployment/src/old/legacy-migration/maintenance-worker/binaries/import_legacy_uploads
deleted file mode 100755
index 5c0b67d74..000000000
--- a/deployment/src/old/legacy-migration/maintenance-worker/binaries/import_legacy_uploads
+++ /dev/null
@@ -1,17 +0,0 @@
-#!/usr/bin/env bash
-set -e
-
-# import .env config
-set -o allexport
-source $(dirname "$0")/.env
-set +o allexport
-
-for var in "SSH_USERNAME" "SSH_HOST" "UPLOADS_DIRECTORY"
-do
- if [[ -z "${!var}" ]]; then
- echo "${var} is undefined"
- exit 1
- fi
-done
-
-rsync --archive --update --verbose ${SSH_USERNAME}@${SSH_HOST}:${UPLOADS_DIRECTORY}/ ${OUTPUT_DIRECTORY}
diff --git a/deployment/src/old/legacy-migration/maintenance-worker/known_hosts b/deployment/src/old/legacy-migration/maintenance-worker/known_hosts
deleted file mode 100644
index 947840cb2..000000000
--- a/deployment/src/old/legacy-migration/maintenance-worker/known_hosts
+++ /dev/null
@@ -1,3 +0,0 @@
-|1|GuOYlVEhTowidPs18zj9p5F2j3o=|sDHJYLz9Ftv11oXeGEjs7SpVyg0= ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBM5N29bI5CeKu1/RBPyM2fwyf7fuajOO+tyhKe1+CC2sZ1XNB5Ff6t6MtCLNRv2mUuvzTbW/HkisDiA5tuXUHOk=
-|1|2KP9NV+Q5g2MrtjAeFSVcs8YeOI=|nf3h4wWVwC4xbBS1kzgzE2tBldk= ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBNhRK6BeIEUxXlS0z/pOfkUkSPfn33g4J1U3L+MyUQYHm+7agT08799ANJhnvELKE1tt4Vx80I9UR81oxzZcy3E=
-|1|HonYIRNhKyroUHPKU1HSZw0+Qzs=|5T1btfwFBz2vNSldhqAIfTbfIgQ= ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBNhRK6BeIEUxXlS0z/pOfkUkSPfn33g4J1U3L+MyUQYHm+7agT08799ANJhnvELKE1tt4Vx80I9UR81oxzZcy3E=
diff --git a/deployment/src/old/legacy-migration/maintenance-worker/migration/mongo/export.sh b/deployment/src/old/legacy-migration/maintenance-worker/migration/mongo/export.sh
deleted file mode 100755
index b56ace87a..000000000
--- a/deployment/src/old/legacy-migration/maintenance-worker/migration/mongo/export.sh
+++ /dev/null
@@ -1,53 +0,0 @@
-#!/usr/bin/env bash
-set -e
-
-# import .env config
-set -o allexport
-source $(dirname "$0")/.env
-set +o allexport
-
-# Export collection function defintion
-function export_collection () {
- "${EXPORT_MONGOEXPORT_BIN}" --db ${MONGODB_DATABASE} --host localhost -d ${MONGODB_DATABASE} --port 27018 --username ${MONGODB_USERNAME} --password ${MONGODB_PASSWORD} --authenticationDatabase ${MONGODB_AUTH_DB} --collection $1 --out "${EXPORT_PATH}$1.json"
- mkdir -p ${EXPORT_PATH}splits/$1/
- split -l ${MONGO_EXPORT_SPLIT_SIZE} -a 3 ${EXPORT_PATH}$1.json ${EXPORT_PATH}splits/$1/
-}
-
-# Export collection with query function defintion
-function export_collection_query () {
- "${EXPORT_MONGOEXPORT_BIN}" --db ${MONGODB_DATABASE} --host localhost -d ${MONGODB_DATABASE} --port 27018 --username ${MONGODB_USERNAME} --password ${MONGODB_PASSWORD} --authenticationDatabase ${MONGODB_AUTH_DB} --collection $1 --out "${EXPORT_PATH}$1_$3.json" --query "$2"
- mkdir -p ${EXPORT_PATH}splits/$1_$3/
- split -l ${MONGO_EXPORT_SPLIT_SIZE} -a 3 ${EXPORT_PATH}$1_$3.json ${EXPORT_PATH}splits/$1_$3/
-}
-
-# Delete old export & ensure directory
-rm -rf ${EXPORT_PATH}*
-mkdir -p ${EXPORT_PATH}
-
-# Open SSH Tunnel
-ssh -4 -M -S my-ctrl-socket -fnNT -L 27018:localhost:27017 -l ${SSH_USERNAME} ${SSH_HOST}
-
-# Export all Data from the Alpha to json and split them up
-export_collection "badges"
-export_collection "categories"
-export_collection "comments"
-export_collection_query "contributions" '{"type": "DELETED"}' "DELETED"
-export_collection_query "contributions" '{"type": "post"}' "post"
-# export_collection_query "contributions" '{"type": "cando"}' "cando"
-export_collection "emotions"
-# export_collection_query "follows" '{"foreignService": "organizations"}' "organizations"
-export_collection_query "follows" '{"foreignService": "users"}' "users"
-# export_collection "invites"
-# export_collection "organizations"
-# export_collection "pages"
-# export_collection "projects"
-# export_collection "settings"
-export_collection "shouts"
-# export_collection "status"
-export_collection_query "users" '{"isVerified": true }' "verified"
-# export_collection "userscandos"
-# export_collection "usersettings"
-
-# Close SSH Tunnel
-ssh -S my-ctrl-socket -O check -l ${SSH_USERNAME} ${SSH_HOST}
-ssh -S my-ctrl-socket -O exit -l ${SSH_USERNAME} ${SSH_HOST}
diff --git a/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/badges/badges.cql b/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/badges/badges.cql
deleted file mode 100644
index adf63dc1f..000000000
--- a/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/badges/badges.cql
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
-// Alpha Model
-// [ ] Not modeled in Nitro
-// [X] Modeled in Nitro
-// [-] Omitted in Nitro
-// [?] Unclear / has work to be done for Nitro
- {
-[?] image: {
-[?] path: { // Path is incorrect in Nitro - is icon the correct name for this field?
-[X] type: String,
-[X] required: true
- },
-[ ] alt: { // If we use an image - should we not have an alt?
-[ ] type: String,
-[ ] required: true
- }
- },
-[?] status: {
-[X] type: String,
-[X] enum: ['permanent', 'temporary'],
-[ ] default: 'permanent', // Default value is missing in Nitro
-[X] required: true
- },
-[?] type: {
-[?] type: String, // in nitro this is a defined enum - seems good for now
-[X] required: true
- },
-[X] id: {
-[X] type: String,
-[X] required: true
- },
-[?] createdAt: {
-[?] type: Date, // Type is modeled as string in Nitro which is incorrect
-[ ] default: Date.now // Default value is missing in Nitro
- },
-[?] updatedAt: {
-[?] type: Date, // Type is modeled as string in Nitro which is incorrect
-[ ] default: Date.now // Default value is missing in Nitro
- }
- }
-*/
-
-CALL apoc.load.json("file:${IMPORT_CHUNK_PATH_CQL_FILE}") YIELD value as badge
-MERGE(b:Badge {id: badge._id["$oid"]})
-ON CREATE SET
-b.id = badge.key,
-b.type = badge.type,
-b.icon = replace(badge.image.path, 'https://api-alpha.human-connection.org', ''),
-b.status = badge.status,
-b.createdAt = badge.createdAt.`$date`,
-b.updatedAt = badge.updatedAt.`$date`
-;
diff --git a/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/badges/delete.cql b/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/badges/delete.cql
deleted file mode 100644
index 2a6f8c244..000000000
--- a/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/badges/delete.cql
+++ /dev/null
@@ -1 +0,0 @@
-MATCH (n:Badge) DETACH DELETE n;
\ No newline at end of file
diff --git a/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/categories/categories.cql b/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/categories/categories.cql
deleted file mode 100644
index 5d4958876..000000000
--- a/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/categories/categories.cql
+++ /dev/null
@@ -1,129 +0,0 @@
-/*
-// Alpha Model
-// [ ] Not modeled in Nitro
-// [X] Modeled in Nitro
-// [-] Omitted in Nitro
-// [?] Unclear / has work to be done for Nitro
- {
-[X] title: {
-[X] type: String,
-[X] required: true
- },
-[?] slug: {
-[X] type: String,
-[ ] required: true, // Not required in Nitro
-[ ] unique: true // Unique value is not enforced in Nitro?
- },
-[?] icon: { // Nitro adds required: true
-[X] type: String,
-[ ] unique: true // Unique value is not enforced in Nitro?
- },
-[?] createdAt: {
-[?] type: Date, // Type is modeled as string in Nitro which is incorrect
-[ ] default: Date.now // Default value is missing in Nitro
- },
-[?] updatedAt: {
-[?] type: Date, // Type is modeled as string in Nitro which is incorrect
-[ ] default: Date.now // Default value is missing in Nitro
- }
- }
-*/
-
-CALL apoc.load.json("file:${IMPORT_CHUNK_PATH_CQL_FILE}") YIELD value as category
-MERGE(c:Category {id: category._id["$oid"]})
-ON CREATE SET
-c.name = category.title,
-c.slug = category.slug,
-c.icon = category.icon,
-c.createdAt = category.createdAt.`$date`,
-c.updatedAt = category.updatedAt.`$date`
-;
-
-// Transform icon names
-MATCH (c:Category)
-WHERE (c.icon = "categories-justforfun")
-SET c.icon = 'smile'
-SET c.slug = 'just-for-fun'
-;
-
-MATCH (c:Category)
-WHERE (c.icon = "categories-luck")
-SET c.icon = 'heart-o'
-SET c.slug = 'happiness-values'
-;
-
-MATCH (c:Category)
-WHERE (c.icon = "categories-health")
-SET c.icon = 'medkit'
-;
-
-MATCH (c:Category)
-WHERE (c.icon = "categories-environment")
-SET c.icon = 'tree'
-;
-
-MATCH (c:Category)
-WHERE (c.icon = "categories-animal-justice")
-SET c.icon = 'paw'
-SET c.slug = 'animal-protection'
-;
-
-MATCH (c:Category)
-WHERE (c.icon = "categories-human-rights")
-SET c.icon = 'balance-scale'
-SET c.slug = 'human-rights-justice'
-
-;
-
-MATCH (c:Category)
-WHERE (c.icon = "categories-education")
-SET c.icon = 'graduation-cap'
-;
-
-MATCH (c:Category)
-WHERE (c.icon = "categories-cooperation")
-SET c.icon = 'users'
-;
-
-MATCH (c:Category)
-WHERE (c.icon = "categories-politics")
-SET c.icon = 'university'
-;
-
-MATCH (c:Category)
-WHERE (c.icon = "categories-economy")
-SET c.icon = 'money'
-;
-
-MATCH (c:Category)
-WHERE (c.icon = "categories-technology")
-SET c.icon = 'flash'
-;
-
-MATCH (c:Category)
-WHERE (c.icon = "categories-internet")
-SET c.icon = 'mouse-pointer'
-SET c.slug = 'it-internet-data-privacy'
-;
-
-MATCH (c:Category)
-WHERE (c.icon = "categories-art")
-SET c.icon = 'paint-brush'
-;
-
-MATCH (c:Category)
-WHERE (c.icon = "categories-freedom-of-speech")
-SET c.icon = 'bullhorn'
-SET c.slug = 'freedom-of-speech'
-;
-
-MATCH (c:Category)
-WHERE (c.icon = "categories-sustainability")
-SET c.icon = 'shopping-cart'
-;
-
-MATCH (c:Category)
-WHERE (c.icon = "categories-peace")
-SET c.icon = 'angellist'
-SET c.slug = 'global-peace-nonviolence'
-;
diff --git a/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/categories/delete.cql b/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/categories/delete.cql
deleted file mode 100644
index c06b5ef2b..000000000
--- a/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/categories/delete.cql
+++ /dev/null
@@ -1 +0,0 @@
-MATCH (n:Category) DETACH DELETE n;
\ No newline at end of file
diff --git a/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/comments/comments.cql b/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/comments/comments.cql
deleted file mode 100644
index 083f9f762..000000000
--- a/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/comments/comments.cql
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
-// Alpha Model
-// [ ] Not modeled in Nitro
-// [X] Modeled in Nitro
-// [-] Omitted in Nitro
-// [?] Unclear / has work to be done for Nitro
- {
-[?] userId: {
-[X] type: String,
-[ ] required: true, // Not required in Nitro
-[-] index: true
- },
-[?] contributionId: {
-[X] type: String,
-[ ] required: true, // Not required in Nitro
-[-] index: true
- },
-[X] content: {
-[X] type: String,
-[X] required: true
- },
-[?] contentExcerpt: { // Generated from content
-[X] type: String,
-[ ] required: true // Not required in Nitro
- },
-[ ] hasMore: { type: Boolean },
-[ ] upvotes: {
-[ ] type: Array,
-[ ] default: []
- },
-[ ] upvoteCount: {
-[ ] type: Number,
-[ ] default: 0
- },
-[?] deleted: {
-[X] type: Boolean,
-[ ] default: false, // Default value is missing in Nitro
-[-] index: true
- },
-[ ] createdAt: {
-[ ] type: Date,
-[ ] default: Date.now
- },
-[ ] updatedAt: {
-[ ] type: Date,
-[ ] default: Date.now
- },
-[ ] wasSeeded: { type: Boolean }
- }
-*/
-
-CALL apoc.load.json("file:${IMPORT_CHUNK_PATH_CQL_FILE}") YIELD value as comment
-MERGE (c:Comment {id: comment._id["$oid"]})
-ON CREATE SET
-c.content = comment.content,
-c.contentExcerpt = comment.contentExcerpt,
-c.deleted = comment.deleted,
-c.createdAt = comment.createdAt.`$date`,
-c.updatedAt = comment.updatedAt.`$date`,
-c.disabled = false
-WITH c, comment, comment.contributionId as postId
-MATCH (post:Post {id: postId})
-WITH c, post, comment.userId as userId
-MATCH (author:User {id: userId})
-MERGE (c)-[:COMMENTS]->(post)
-MERGE (author)-[:WROTE]->(c)
-;
diff --git a/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/comments/delete.cql b/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/comments/delete.cql
deleted file mode 100644
index c4a7961c5..000000000
--- a/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/comments/delete.cql
+++ /dev/null
@@ -1 +0,0 @@
-MATCH (n:Comment) DETACH DELETE n;
\ No newline at end of file
diff --git a/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/contributions/contributions.cql b/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/contributions/contributions.cql
deleted file mode 100644
index f09b5ad71..000000000
--- a/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/contributions/contributions.cql
+++ /dev/null
@@ -1,156 +0,0 @@
-/*
-// Alpha Model
-// [ ] Not modeled in Nitro
-// [X] Modeled in Nitro
-// [-] Omitted in Nitro
-// [?] Unclear / has work to be done for Nitro
-[?] { //Modeled incorrect as Post
-[?] userId: {
-[X] type: String,
-[ ] required: true, // Not required in Nitro
-[-] index: true
- },
-[ ] organizationId: {
-[ ] type: String,
-[-] index: true
- },
-[X] categoryIds: {
-[X] type: Array,
-[-] index: true
- },
-[X] title: {
-[X] type: String,
-[X] required: true
- },
-[?] slug: { // Generated from title
-[X] type: String,
-[ ] required: true, // Not required in Nitro
-[?] unique: true, // Unique value is not enforced in Nitro?
-[-] index: true
- },
-[ ] type: { // db.getCollection('contributions').distinct('type') -> 'DELETED', 'cando', 'post'
-[ ] type: String,
-[ ] required: true,
-[-] index: true
- },
-[ ] cando: {
-[ ] difficulty: {
-[ ] type: String,
-[ ] enum: ['easy', 'medium', 'hard']
- },
-[ ] reasonTitle: { type: String },
-[ ] reason: { type: String }
- },
-[X] content: {
-[X] type: String,
-[X] required: true
- },
-[?] contentExcerpt: { // Generated from content
-[X] type: String,
-[?] required: true // Not required in Nitro
- },
-[ ] hasMore: { type: Boolean },
-[X] teaserImg: { type: String },
-[ ] language: {
-[ ] type: String,
-[ ] required: true,
-[-] index: true
- },
-[ ] shoutCount: {
-[ ] type: Number,
-[ ] default: 0,
-[-] index: true
- },
-[ ] meta: {
-[ ] hasVideo: {
-[ ] type: Boolean,
-[ ] default: false
- },
-[ ] embedds: {
-[ ] type: Object,
-[ ] default: {}
- }
- },
-[?] visibility: {
-[X] type: String,
-[X] enum: ['public', 'friends', 'private'],
-[ ] default: 'public', // Default value is missing in Nitro
-[-] index: true
- },
-[?] isEnabled: {
-[X] type: Boolean,
-[ ] default: true, // Default value is missing in Nitro
-[-] index: true
- },
-[?] tags: { type: Array }, // ensure this is working properly
-[ ] emotions: {
-[ ] type: Object,
-[-] index: true,
-[ ] default: {
-[ ] angry: {
-[ ] count: 0,
-[ ] percent: 0
-[ ] },
-[ ] cry: {
-[ ] count: 0,
-[ ] percent: 0
-[ ] },
-[ ] surprised: {
-[ ] count: 0,
-[ ] percent: 0
- },
-[ ] happy: {
-[ ] count: 0,
-[ ] percent: 0
- },
-[ ] funny: {
-[ ] count: 0,
-[ ] percent: 0
- }
- }
- },
-[?] deleted: { // THis field is not always present in the alpha-data
-[?] type: Boolean,
-[ ] default: false, // Default value is missing in Nitro
-[-] index: true
- },
-[?] createdAt: {
-[?] type: Date, // Type is modeled as string in Nitro which is incorrect
-[ ] default: Date.now // Default value is missing in Nitro
- },
-[?] updatedAt: {
-[?] type: Date, // Type is modeled as string in Nitro which is incorrect
-[ ] default: Date.now // Default value is missing in Nitro
- },
-[ ] wasSeeded: { type: Boolean }
- }
-*/
-CALL apoc.load.json("file:${IMPORT_CHUNK_PATH_CQL_FILE}") YIELD value as post
-MERGE (p:Post {id: post._id["$oid"]})
-ON CREATE SET
-p.title = post.title,
-p.slug = post.slug,
-p.image = replace(post.teaserImg, 'https://api-alpha.human-connection.org', ''),
-p.content = post.content,
-p.contentExcerpt = post.contentExcerpt,
-p.visibility = toLower(post.visibility),
-p.createdAt = post.createdAt.`$date`,
-p.updatedAt = post.updatedAt.`$date`,
-p.deleted = COALESCE(post.deleted, false),
-p.disabled = COALESCE(NOT post.isEnabled, false)
-WITH p, post
-MATCH (u:User {id: post.userId})
-MERGE (u)-[:WROTE]->(p)
-WITH p, post, post.categoryIds as categoryIds
-UNWIND categoryIds AS categoryId
-MATCH (c:Category {id: categoryId})
-MERGE (p)-[:CATEGORIZED]->(c)
-WITH p, post.tags AS tags
-UNWIND tags AS tag
-WITH apoc.text.replace(tag, '[^\\p{L}0-9]', '') as tagNoSpacesAllowed
-CALL apoc.when(tagNoSpacesAllowed =~ '^((\\p{L}+[\\p{L}0-9]*)|([0-9]+\\p{L}+[\\p{L}0-9]*))$', 'RETURN tagNoSpacesAllowed', '', {tagNoSpacesAllowed: tagNoSpacesAllowed})
-YIELD value as validated
-WHERE validated.tagNoSpacesAllowed IS NOT NULL
-MERGE (t:Tag { id: validated.tagNoSpacesAllowed, disabled: false, deleted: false })
-MERGE (p)-[:TAGGED]->(t)
-;
diff --git a/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/contributions/delete.cql b/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/contributions/delete.cql
deleted file mode 100644
index 70adad664..000000000
--- a/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/contributions/delete.cql
+++ /dev/null
@@ -1,2 +0,0 @@
-MATCH (n:Post) DETACH DELETE n;
-MATCH (n:Tag) DETACH DELETE n;
\ No newline at end of file
diff --git a/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/delete_all.cql b/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/delete_all.cql
deleted file mode 100644
index d01871300..000000000
--- a/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/delete_all.cql
+++ /dev/null
@@ -1 +0,0 @@
-MATCH (n) DETACH DELETE n;
\ No newline at end of file
diff --git a/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/emotions/delete.cql b/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/emotions/delete.cql
deleted file mode 100644
index 18fb6699f..000000000
--- a/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/emotions/delete.cql
+++ /dev/null
@@ -1 +0,0 @@
-MATCH (u:User)-[e:EMOTED]->(c:Post) DETACH DELETE e;
\ No newline at end of file
diff --git a/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/emotions/emotions.cql b/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/emotions/emotions.cql
deleted file mode 100644
index 06341f277..000000000
--- a/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/emotions/emotions.cql
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
-// Alpha Model
-// [ ] Not modeled in Nitro
-// [X] Modeled in Nitro
-// [-] Omitted in Nitro
-// [?] Unclear / has work to be done for Nitro
- {
-[X] userId: {
-[X] type: String,
-[X] required: true,
-[-] index: true
- },
-[X] contributionId: {
-[X] type: String,
-[X] required: true,
-[-] index: true
- },
-[?] rated: {
-[X] type: String,
-[ ] required: true,
-[?] enum: ['funny', 'happy', 'surprised', 'cry', 'angry']
- },
-[X] createdAt: {
-[X] type: Date,
-[X] default: Date.now
- },
-[X] updatedAt: {
-[X] type: Date,
-[X] default: Date.now
- },
-[-] wasSeeded: { type: Boolean }
- }
-*/
-
-CALL apoc.load.json("file:${IMPORT_CHUNK_PATH_CQL_FILE}") YIELD value as emotion
-MATCH (u:User {id: emotion.userId}),
- (c:Post {id: emotion.contributionId})
-MERGE (u)-[e:EMOTED {
- id: emotion._id["$oid"],
- emotion: emotion.rated,
- createdAt: datetime(emotion.createdAt.`$date`),
- updatedAt: datetime(emotion.updatedAt.`$date`)
- }]->(c)
-RETURN e;
-/*
- // Queries
- // user sets an emotion emotion:
- // MERGE (u)-[e:EMOTED {id: ..., emotion: "funny", createdAt: ..., updatedAt: ...}]->(c)
- // user removes emotion
- // MATCH (u)-[e:EMOTED]->(c) DELETE e
- // contribution distributions over every `emotion` property value for one post
- // MATCH (u:User)-[e:EMOTED]->(c:Post {id: "5a70bbc8508f5b000b443b1a"}) RETURN e.emotion,COUNT(e.emotion)
- // contribution distributions over every `emotion` property value for one user (advanced - "whats the primary emotion used by the user?")
- // MATCH (u:User{id:"5a663b1ac64291000bf302a1"})-[e:EMOTED]->(c:Post) RETURN e.emotion,COUNT(e.emotion)
- // contribution distributions over every `emotion` property value for all posts created by one user (advanced - "how do others react to my contributions?")
- // MATCH (u:User)-[e:EMOTED]->(c:Post)<-[w:WROTE]-(a:User{id:"5a663b1ac64291000bf302a1"}) RETURN e.emotion,COUNT(e.emotion)
- // if we can filter the above an a variable timescale that would be great (should be possible on createdAt and updatedAt fields)
-*/
\ No newline at end of file
diff --git a/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/follows/delete.cql b/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/follows/delete.cql
deleted file mode 100644
index 3de01f8ea..000000000
--- a/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/follows/delete.cql
+++ /dev/null
@@ -1 +0,0 @@
-MATCH (u1:User)-[f:FOLLOWS]->(u2:User) DETACH DELETE f;
\ No newline at end of file
diff --git a/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/follows/follows.cql b/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/follows/follows.cql
deleted file mode 100644
index fac858a9a..000000000
--- a/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/follows/follows.cql
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
-// Alpha Model
-// [ ] Not modeled in Nitro
-// [X] Modeled in Nitro
-// [-] Omitted in Nitro
-// [?] Unclear / has work to be done for Nitro
- {
-[?] userId: {
-[-] type: String,
-[ ] required: true,
-[-] index: true
- },
-[?] foreignId: {
-[ ] type: String,
-[ ] required: true,
-[-] index: true
- },
-[?] foreignService: { // db.getCollection('follows').distinct('foreignService') returns 'organizations' and 'users'
-[ ] type: String,
-[ ] required: true,
-[ ] index: true
- },
-[ ] createdAt: {
-[ ] type: Date,
-[ ] default: Date.now
- },
-[ ] wasSeeded: { type: Boolean }
- }
- index:
-[?] { userId: 1, foreignId: 1, foreignService: 1 },{ unique: true } // is the unique constrain modeled?
-*/
-
-CALL apoc.load.json("file:${IMPORT_CHUNK_PATH_CQL_FILE}") YIELD value as follow
-MATCH (u1:User {id: follow.userId}), (u2:User {id: follow.foreignId})
-MERGE (u1)-[:FOLLOWS]->(u2)
-;
diff --git a/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/import.sh b/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/import.sh
deleted file mode 100755
index ccb22dafb..000000000
--- a/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/import.sh
+++ /dev/null
@@ -1,108 +0,0 @@
-#!/usr/bin/env bash
-set -e
-
-# import .env config
-set -o allexport
-source $(dirname "$0")/.env
-set +o allexport
-
-# Delete collection function defintion
-function delete_collection () {
- # Delete from Database
- echo "Delete $2"
- "${IMPORT_CYPHERSHELL_BIN}" < $(dirname "$0")/$1/delete.cql > /dev/null
- # Delete index file
- rm -f "${IMPORT_PATH}splits/$2.index"
-}
-
-# Import collection function defintion
-function import_collection () {
- # index file of those chunks we have already imported
- INDEX_FILE="${IMPORT_PATH}splits/$1.index"
- # load index file
- if [ -f "$INDEX_FILE" ]; then
- readarray -t IMPORT_INDEX <$INDEX_FILE
- else
- declare -a IMPORT_INDEX
- fi
- # for each chunk import data
- for chunk in ${IMPORT_PATH}splits/$1/*
- do
- CHUNK_FILE_NAME=$(basename "${chunk}")
- # does the index not contain the chunk file name?
- if [[ ! " ${IMPORT_INDEX[@]} " =~ " ${CHUNK_FILE_NAME} " ]]; then
- # calculate the path of the chunk
- export IMPORT_CHUNK_PATH_CQL_FILE="${IMPORT_CHUNK_PATH_CQL}$1/${CHUNK_FILE_NAME}"
- # load the neo4j command and replace file variable with actual path
- NEO4J_COMMAND="$(envsubst '${IMPORT_CHUNK_PATH_CQL_FILE}' < $(dirname "$0")/$2)"
- # run the import of the chunk
- echo "Import $1 ${CHUNK_FILE_NAME} (${chunk})"
- echo "${NEO4J_COMMAND}" | "${IMPORT_CYPHERSHELL_BIN}" > /dev/null
- # add file to array and file
- IMPORT_INDEX+=("${CHUNK_FILE_NAME}")
- echo "${CHUNK_FILE_NAME}" >> ${INDEX_FILE}
- else
- echo "Skipping $1 ${CHUNK_FILE_NAME} (${chunk})"
- fi
- done
-}
-
-# Time variable
-SECONDS=0
-
-# Delete all Neo4J Database content
-echo "Deleting Database Contents"
-delete_collection "badges" "badges"
-delete_collection "categories" "categories"
-delete_collection "users" "users"
-delete_collection "follows" "follows_users"
-delete_collection "contributions" "contributions_post"
-delete_collection "contributions" "contributions_cando"
-delete_collection "shouts" "shouts"
-delete_collection "comments" "comments"
-delete_collection "emotions" "emotions"
-
-#delete_collection "invites"
-#delete_collection "notifications"
-#delete_collection "organizations"
-#delete_collection "pages"
-#delete_collection "projects"
-#delete_collection "settings"
-#delete_collection "status"
-#delete_collection "systemnotifications"
-#delete_collection "userscandos"
-#delete_collection "usersettings"
-echo "DONE"
-
-# Import Data
-echo "Start Importing Data"
-import_collection "badges" "badges/badges.cql"
-import_collection "categories" "categories/categories.cql"
-import_collection "users_verified" "users/users.cql"
-import_collection "follows_users" "follows/follows.cql"
-#import_collection "follows_organizations" "follows/follows.cql"
-import_collection "contributions_post" "contributions/contributions.cql"
-#import_collection "contributions_cando" "contributions/contributions.cql"
-#import_collection "contributions_DELETED" "contributions/contributions.cql"
-import_collection "shouts" "shouts/shouts.cql"
-import_collection "comments" "comments/comments.cql"
-import_collection "emotions" "emotions/emotions.cql"
-
-# import_collection "invites"
-# import_collection "notifications"
-# import_collection "organizations"
-# import_collection "pages"
-# import_collection "systemnotifications"
-# import_collection "userscandos"
-# import_collection "usersettings"
-
-# does only contain dummy data
-# import_collection "projects"
-
-# does only contain alpha specifc data
-# import_collection "status
-# import_collection "settings""
-
-echo "DONE"
-
-echo "Time elapsed: $SECONDS seconds"
diff --git a/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/invites/invites.cql b/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/invites/invites.cql
deleted file mode 100644
index f4a5bf006..000000000
--- a/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/invites/invites.cql
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
-// Alpha Model
-// [ ] Not modeled in Nitro
-// [X] Modeled in Nitro
-// [-] Omitted in Nitro
-// [?] Unclear / has work to be done for Nitro
- {
-[ ] email: {
-[ ] type: String,
-[ ] required: true,
-[-] index: true,
-[ ] unique: true
- },
-[ ] code: {
-[ ] type: String,
-[-] index: true,
-[ ] required: true
- },
-[ ] role: {
-[ ] type: String,
-[ ] enum: ['admin', 'moderator', 'manager', 'editor', 'user'],
-[ ] default: 'user'
- },
-[ ] invitedByUserId: { type: String },
-[ ] language: { type: String },
-[ ] badgeIds: [],
-[ ] wasUsed: {
-[ ] type: Boolean,
-[-] index: true
- },
-[ ] createdAt: {
-[ ] type: Date,
-[ ] default: Date.now
- },
-[ ] wasSeeded: { type: Boolean }
- }
-*/
-
-CALL apoc.load.json("file:${IMPORT_CHUNK_PATH_CQL_FILE}") YIELD value as invite;
diff --git a/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/notifications/notifications.cql b/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/notifications/notifications.cql
deleted file mode 100644
index aa6ac8eb9..000000000
--- a/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/notifications/notifications.cql
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
-// Alpha Model
-// [ ] Not modeled in Nitro
-// [X] Modeled in Nitro
-// [-] Omitted in Nitro
-// [?] Unclear / has work to be done for Nitro
- {
-[ ] userId: { // User this notification is sent to
-[ ] type: String,
-[ ] required: true,
-[-] index: true
- },
-[ ] type: {
-[ ] type: String,
-[ ] required: true,
-[ ] enum: ['comment','comment-mention','contribution-mention','following-contribution']
- },
-[ ] relatedUserId: {
-[ ] type: String,
-[-] index: true
- },
-[ ] relatedContributionId: {
-[ ] type: String,
-[-] index: true
- },
-[ ] relatedOrganizationId: {
-[ ] type: String,
-[-] index: true
- },
-[ ] relatedCommentId: {type: String },
-[ ] unseen: {
-[ ] type: Boolean,
-[ ] default: true,
-[-] index: true
- },
-[ ] createdAt: {
-[ ] type: Date,
-[ ] default: Date.now
- },
-[ ] updatedAt: {
-[ ] type: Date,
-[ ] default: Date.now
- },
-[ ] wasSeeded: { type: Boolean }
- }
-*/
-
-CALL apoc.load.json("file:${IMPORT_CHUNK_PATH_CQL_FILE}") YIELD value as notification;
diff --git a/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/organizations/delete.cql b/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/organizations/delete.cql
deleted file mode 100644
index e69de29bb..000000000
diff --git a/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/organizations/organizations.cql b/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/organizations/organizations.cql
deleted file mode 100644
index e473e697c..000000000
--- a/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/organizations/organizations.cql
+++ /dev/null
@@ -1,137 +0,0 @@
-/*
-// Alpha Model
-// [ ] Not modeled in Nitro
-// [X] Modeled in Nitro
-// [-] Omitted in Nitro
-// [?] Unclear / has work to be done for Nitro
- {
-[ ] name: {
-[ ] type: String,
-[ ] required: true,
-[-] index: true
- },
-[ ] slug: {
-[ ] type: String,
-[ ] required: true,
-[ ] unique: true,
-[-] index: true
- },
-[ ] followersCounts: {
-[ ] users: {
-[ ] type: Number,
-[ ] default: 0
- },
-[ ] organizations: {
-[ ] type: Number,
-[ ] default: 0
- },
-[ ] projects: {
-[ ] type: Number,
-[ ] default: 0
- }
- },
-[ ] followingCounts: {
-[ ] users: {
-[ ] type: Number,
-[ ] default: 0
- },
-[ ] organizations: {
-[ ] type: Number,
-[ ] default: 0
- },
-[ ] projects: {
-[ ] type: Number,
-[ ] default: 0
- }
- },
-[ ] categoryIds: {
-[ ] type: Array,
-[ ] required: true,
-[-] index: true
- },
-[ ] logo: { type: String },
-[ ] coverImg: { type: String },
-[ ] userId: {
-[ ] type: String,
-[ ] required: true,
-[-] index: true
- },
-[ ] description: {
-[ ] type: String,
-[ ] required: true
- },
-[ ] descriptionExcerpt: { type: String }, // will be generated automatically
-[ ] publicEmail: { type: String },
-[ ] url: { type: String },
-[ ] type: {
-[ ] type: String,
-[-] index: true,
-[ ] enum: ['ngo', 'npo', 'goodpurpose', 'ev', 'eva']
- },
-[ ] language: {
-[ ] type: String,
-[ ] required: true,
-[ ] default: 'de',
-[-] index: true
- },
-[ ] addresses: {
-[ ] type: [{
-[ ] street: {
-[ ] type: String,
-[ ] required: true
- },
-[ ] zipCode: {
-[ ] type: String,
-[ ] required: true
- },
-[ ] city: {
-[ ] type: String,
-[ ] required: true
- },
-[ ] country: {
-[ ] type: String,
-[ ] required: true
- },
-[ ] lat: {
-[ ] type: Number,
-[ ] required: true
- },
-[ ] lng: {
-[ ] type: Number,
-[ ] required: true
- }
- }],
-[ ] default: []
- },
-[ ] createdAt: {
-[ ] type: Date,
-[ ] default: Date.now
- },
-[ ] updatedAt: {
-[ ] type: Date,
-[ ] default: Date.now
- },
-[ ] isEnabled: {
-[ ] type: Boolean,
-[ ] default: false,
-[-] index: true
- },
-[ ] reviewedBy: {
-[ ] type: String,
-[ ] default: null,
-[-] index: true
- },
-[ ] tags: {
-[ ] type: Array,
-[-] index: true
- },
-[ ] deleted: {
-[ ] type: Boolean,
-[ ] default: false,
-[-] index: true
- },
-[ ] wasSeeded: { type: Boolean }
- }
-*/
-
-CALL apoc.load.json("file:${IMPORT_CHUNK_PATH_CQL_FILE}") YIELD value as organisation;
diff --git a/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/pages/delete.cql b/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/pages/delete.cql
deleted file mode 100644
index e69de29bb..000000000
diff --git a/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/pages/pages.cql b/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/pages/pages.cql
deleted file mode 100644
index 18223136b..000000000
--- a/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/pages/pages.cql
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
-// Alpha Model
-// [ ] Not modeled in Nitro
-// [X] Modeled in Nitro
-// [-] Omitted in Nitro
-// [?] Unclear / has work to be done for Nitro
- {
-[ ] title: {
-[ ] type: String,
-[ ] required: true
- },
-[ ] slug: {
-[ ] type: String,
-[ ] required: true,
-[-] index: true
- },
-[ ] type: {
-[ ] type: String,
-[ ] required: true,
-[ ] default: 'page'
- },
-[ ] key: {
-[ ] type: String,
-[ ] required: true,
-[-] index: true
- },
-[ ] content: {
-[ ] type: String,
-[ ] required: true
- },
-[ ] language: {
-[ ] type: String,
-[ ] required: true,
-[-] index: true
- },
-[ ] active: {
-[ ] type: Boolean,
-[ ] default: true,
-[-] index: true
- },
-[ ] createdAt: {
-[ ] type: Date,
-[ ] default: Date.now
- },
-[ ] updatedAt: {
-[ ] type: Date,
-[ ] default: Date.now
- },
-[ ] wasSeeded: { type: Boolean }
- }
- index:
-[ ] { slug: 1, language: 1 },{ unique: true }
-*/
-
-CALL apoc.load.json("file:${IMPORT_CHUNK_PATH_CQL_FILE}") YIELD value as page;
diff --git a/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/projects/delete.cql b/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/projects/delete.cql
deleted file mode 100644
index e69de29bb..000000000
diff --git a/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/projects/projects.cql b/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/projects/projects.cql
deleted file mode 100644
index ed859c157..000000000
--- a/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/projects/projects.cql
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
-// Alpha Model
-// [ ] Not modeled in Nitro
-// [X] Modeled in Nitro
-// [-] Omitted in Nitro
-// [?] Unclear / has work to be done for Nitro
- {
-[ ] name: {
-[ ] type: String,
-[ ] required: true
- },
-[ ] slug: { type: String },
-[ ] followerIds: [],
-[ ] categoryIds: { type: Array },
-[ ] logo: { type: String },
-[ ] userId: {
-[ ] type: String,
-[ ] required: true
- },
-[ ] description: {
-[ ] type: String,
-[ ] required: true
- },
-[ ] content: {
-[ ] type: String,
-[ ] required: true
- },
-[ ] addresses: {
-[ ] type: Array,
-[ ] default: []
- },
-[ ] createdAt: {
-[ ] type: Date,
-[ ] default: Date.now
- },
-[ ] updatedAt: {
-[ ] type: Date,
-[ ] default: Date.now
- },
-[ ] wasSeeded: { type: Boolean }
- }
-*/
-
-CALL apoc.load.json("file:${IMPORT_CHUNK_PATH_CQL_FILE}") YIELD value as project;
diff --git a/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/settings/delete.cql b/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/settings/delete.cql
deleted file mode 100644
index e69de29bb..000000000
diff --git a/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/settings/settings.cql b/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/settings/settings.cql
deleted file mode 100644
index 1d557d30c..000000000
--- a/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/settings/settings.cql
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
-// Alpha Model
-// [ ] Not modeled in Nitro
-// [X] Modeled in Nitro
-// [-] Omitted in Nitro
-// [?] Unclear / has work to be done for Nitro
- {
-[ ] key: {
-[ ] type: String,
-[ ] default: 'system',
-[-] index: true,
-[ ] unique: true
- },
-[ ] invites: {
-[ ] userCanInvite: {
-[ ] type: Boolean,
-[ ] required: true,
-[ ] default: false
- },
-[ ] maxInvitesByUser: {
-[ ] type: Number,
-[ ] required: true,
-[ ] default: 1
- },
-[ ] onlyUserWithBadgesCanInvite: {
-[ ] type: Array,
-[ ] default: []
- }
- },
-[ ] maintenance: false
- }, {
-[ ] timestamps: true
- }
-*/
-
-CALL apoc.load.json("file:${IMPORT_CHUNK_PATH_CQL_FILE}") YIELD value as setting;
diff --git a/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/shouts/delete.cql b/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/shouts/delete.cql
deleted file mode 100644
index 21c2e1f90..000000000
--- a/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/shouts/delete.cql
+++ /dev/null
@@ -1 +0,0 @@
-// this is just a relation between users and contributions - no need to delete
\ No newline at end of file
diff --git a/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/shouts/shouts.cql b/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/shouts/shouts.cql
deleted file mode 100644
index d370b4b4a..000000000
--- a/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/shouts/shouts.cql
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
-// Alpha Model
-// [ ] Not modeled in Nitro
-// [X] Modeled in Nitro
-// [-] Omitted in Nitro
-// [?] Unclear / has work to be done for Nitro
- {
-[?] userId: {
-[X] type: String,
-[ ] required: true, // Not required in Nitro
-[-] index: true
- },
-[?] foreignId: {
-[X] type: String,
-[ ] required: true, // Not required in Nitro
-[-] index: true
- },
-[?] foreignService: { // db.getCollection('shots').distinct('foreignService') returns 'contributions'
-[X] type: String,
-[ ] required: true, // Not required in Nitro
-[-] index: true
- },
-[ ] createdAt: {
-[ ] type: Date,
-[ ] default: Date.now
- },
-[ ] wasSeeded: { type: Boolean }
- }
- index:
-[?] { userId: 1, foreignId: 1 },{ unique: true } // is the unique constrain modeled?
-*/
-
-CALL apoc.load.json("file:${IMPORT_CHUNK_PATH_CQL_FILE}") YIELD value as shout
-MATCH (u:User {id: shout.userId}), (p:Post {id: shout.foreignId})
-MERGE (u)-[:SHOUTED]->(p)
-;
diff --git a/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/status/delete.cql b/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/status/delete.cql
deleted file mode 100644
index e69de29bb..000000000
diff --git a/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/status/status.cql b/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/status/status.cql
deleted file mode 100644
index 010c2ca09..000000000
--- a/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/status/status.cql
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
-// Alpha Model
-// [ ] Not modeled in Nitro
-// [X] Modeled in Nitro
-// [-] Omitted in Nitro
-// [?] Unclear / has work to be done for Nitro
- {
-[ ] maintenance: {
-[ ] type: Boolean,
-[ ] default: false
- },
-[ ] updatedAt: {
-[ ] type: Date,
-[ ] default: Date.now
- }
- }
-*/
-
-CALL apoc.load.json("file:${IMPORT_CHUNK_PATH_CQL_FILE}") YIELD value as status;
diff --git a/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/systemnotifications/delete.cql b/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/systemnotifications/delete.cql
deleted file mode 100644
index e69de29bb..000000000
diff --git a/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/systemnotifications/systemnotifications.cql b/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/systemnotifications/systemnotifications.cql
deleted file mode 100644
index 4bd33eb7c..000000000
--- a/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/systemnotifications/systemnotifications.cql
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
-// Alpha Model
-// [ ] Not modeled in Nitro
-// [X] Modeled in Nitro
-// [-] Omitted in Nitro
-// [?] Unclear / has work to be done for Nitro
- {
-[ ] type: {
-[ ] type: String,
-[ ] default: 'info',
-[ ] required: true,
-[-] index: true
- },
-[ ] title: {
-[ ] type: String,
-[ ] required: true
- },
-[ ] content: {
-[ ] type: String,
-[ ] required: true
- },
-[ ] slot: {
-[ ] type: String,
-[ ] required: true,
-[-] index: true
- },
-[ ] language: {
-[ ] type: String,
-[ ] required: true,
-[-] index: true
- },
-[ ] permanent: {
-[ ] type: Boolean,
-[ ] default: false
- },
-[ ] requireConfirmation: {
-[ ] type: Boolean,
-[ ] default: false
- },
-[ ] active: {
-[ ] type: Boolean,
-[ ] default: true,
-[-] index: true
- },
-[ ] totalCount: {
-[ ] type: Number,
-[ ] default: 0
- },
-[ ] createdAt: {
-[ ] type: Date,
-[ ] default: Date.now
- },
-[ ] updatedAt: {
-[ ] type: Date,
-[ ] default: Date.now
- },
-[ ] wasSeeded: { type: Boolean }
- }
-*/
-
-CALL apoc.load.json("file:${IMPORT_CHUNK_PATH_CQL_FILE}") YIELD value as systemnotification;
diff --git a/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/users/delete.cql b/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/users/delete.cql
deleted file mode 100644
index 32679f6c8..000000000
--- a/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/users/delete.cql
+++ /dev/null
@@ -1,2 +0,0 @@
-MATCH (n:User) DETACH DELETE n;
-MATCH (e:EmailAddress) DETACH DELETE e;
diff --git a/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/users/users.cql b/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/users/users.cql
deleted file mode 100644
index 02dff089f..000000000
--- a/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/users/users.cql
+++ /dev/null
@@ -1,124 +0,0 @@
-/*
-// Alpha Model
-// [ ] Not modeled in Nitro
-// [X] Modeled in Nitro
-// [-] Omitted in Nitro
-// [?] Unclear / has work to be done for Nitro
- {
-[?] email: {
-[X] type: String,
-[-] index: true,
-[X] required: true,
-[?] unique: true //unique constrain missing in Nitro
- },
-[?] password: { // Not required in Alpha -> verify if always present
-[X] type: String
- },
-[X] name: { type: String },
-[X] slug: {
-[X] type: String,
-[-] index: true
- },
-[ ] gender: { type: String },
-[ ] followersCounts: {
-[ ] users: {
-[ ] type: Number,
-[ ] default: 0
- },
-[ ] organizations: {
-[ ] type: Number,
-[ ] default: 0
- },
-[ ] projects: {
-[ ] type: Number,
-[ ] default: 0
- }
- },
-[ ] followingCounts: {
-[ ] users: {
-[ ] type: Number,
-[ ] default: 0
- },
-[ ] organizations: {
-[ ] type: Number,
-[ ] default: 0
- },
-[ ] projects: {
-[ ] type: Number,
-[ ] default: 0
- }
- },
-[ ] timezone: { type: String },
-[X] avatar: { type: String },
-[X] coverImg: { type: String },
-[ ] doiToken: { type: String },
-[ ] confirmedAt: { type: Date },
-[?] badgeIds: [], // Verify this is working properly
-[?] deletedAt: { type: Date }, // The Date of deletion is not saved in Nitro
-[?] createdAt: {
-[?] type: Date, // Modeled as String in Nitro
-[ ] default: Date.now // Default value is missing in Nitro
- },
-[?] updatedAt: {
-[?] type: Date, // Modeled as String in Nitro
-[ ] default: Date.now // Default value is missing in Nitro
- },
-[ ] lastActiveAt: {
-[ ] type: Date,
-[ ] default: Date.now
- },
-[ ] isVerified: { type: Boolean },
-[?] role: {
-[X] type: String,
-[-] index: true,
-[?] enum: ['admin', 'moderator', 'manager', 'editor', 'user'], // missing roles manager & editor in Nitro
-[ ] default: 'user' // Default value is missing in Nitro
- },
-[ ] verifyToken: { type: String },
-[ ] verifyShortToken: { type: String },
-[ ] verifyExpires: { type: Date },
-[ ] verifyChanges: { type: Object },
-[ ] resetToken: { type: String },
-[ ] resetShortToken: { type: String },
-[ ] resetExpires: { type: Date },
-[X] wasSeeded: { type: Boolean },
-[X] wasInvited: { type: Boolean },
-[ ] language: {
-[ ] type: String,
-[ ] default: 'en'
- },
-[ ] termsAndConditionsAccepted: { type: Date }, // we display the terms and conditions on registration
-[ ] systemNotificationsSeen: {
-[ ] type: Array,
-[ ] default: []
- }
- }
-*/
-CALL apoc.load.json("file:${IMPORT_CHUNK_PATH_CQL_FILE}") YIELD value as user
-MERGE(u:User {id: user._id["$oid"]})
-ON CREATE SET
-u.name = user.name,
-u.slug = COALESCE(user.slug, apoc.text.random(20, "[A-Za-z]")),
-u.email = user.email,
-u.encryptedPassword = user.password,
-u.avatar = replace(user.avatar, 'https://api-alpha.human-connection.org', ''),
-u.coverImg = replace(user.coverImg, 'https://api-alpha.human-connection.org', ''),
-u.wasInvited = user.wasInvited,
-u.wasSeeded = user.wasSeeded,
-u.role = toLower(user.role),
-u.createdAt = user.createdAt.`$date`,
-u.updatedAt = user.updatedAt.`$date`,
-u.deleted = user.deletedAt IS NOT NULL,
-u.disabled = false
-MERGE (e:EmailAddress {
- email: user.email,
- createdAt: toString(datetime()),
- verifiedAt: toString(datetime())
-})
-MERGE (e)-[:BELONGS_TO]->(u)
-MERGE (u)-[:PRIMARY_EMAIL]->(e)
-WITH u, user, user.badgeIds AS badgeIds
-UNWIND badgeIds AS badgeId
-MATCH (b:Badge {id: badgeId})
-MERGE (b)-[:REWARDED]->(u)
-;
diff --git a/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/userscandos/delete.cql b/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/userscandos/delete.cql
deleted file mode 100644
index e69de29bb..000000000
diff --git a/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/userscandos/userscandos.cql b/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/userscandos/userscandos.cql
deleted file mode 100644
index 55f58f171..000000000
--- a/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/userscandos/userscandos.cql
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
-// Alpha Model
-// [ ] Not modeled in Nitro
-// [X] Modeled in Nitro
-// [-] Omitted in Nitro
-// [?] Unclear / has work to be done for Nitro
- {
-[ ] userId: {
-[ ] type: String,
-[ ] required: true
- },
-[ ] contributionId: {
-[ ] type: String,
-[ ] required: true
- },
-[ ] done: {
-[ ] type: Boolean,
-[ ] default: false
- },
-[ ] doneAt: { type: Date },
-[ ] createdAt: {
-[ ] type: Date,
-[ ] default: Date.now
- },
-[ ] updatedAt: {
-[ ] type: Date,
-[ ] default: Date.now
- },
-[ ] wasSeeded: { type: Boolean }
- }
- index:
-[ ] { userId: 1, contributionId: 1 },{ unique: true }
-*/
-
-CALL apoc.load.json("file:${IMPORT_CHUNK_PATH_CQL_FILE}") YIELD value as usercando;
diff --git a/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/usersettings/delete.cql b/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/usersettings/delete.cql
deleted file mode 100644
index e69de29bb..000000000
diff --git a/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/usersettings/usersettings.cql b/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/usersettings/usersettings.cql
deleted file mode 100644
index 722625944..000000000
--- a/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/usersettings/usersettings.cql
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
-// Alpha Model
-// [ ] Not modeled in Nitro
-// [X] Modeled in Nitro
-// [-] Omitted in Nitro
-// [?] Unclear / has work to be done for Nitro
- {
-[ ] userId: {
-[ ] type: String,
-[ ] required: true,
-[ ] unique: true
- },
-[ ] blacklist: {
-[ ] type: Array,
-[ ] default: []
- },
-[ ] uiLanguage: {
-[ ] type: String,
-[ ] required: true
- },
-[ ] contentLanguages: {
-[ ] type: Array,
-[ ] default: []
- },
-[ ] filter: {
-[ ] categoryIds: {
-[ ] type: Array,
-[ ] index: true
- },
-[ ] emotions: {
-[ ] type: Array,
-[ ] index: true
- }
- },
-[ ] hideUsersWithoutTermsOfUseSigniture: {type: Boolean},
-[ ] updatedAt: {
-[ ] type: Date,
-[ ] default: Date.now
- }
- }
-*/
-
-CALL apoc.load.json("file:${IMPORT_CHUNK_PATH_CQL_FILE}") YIELD value as usersetting;
diff --git a/deployment/src/old/mailserver/Deployment.yaml b/deployment/src/old/mailserver/Deployment.yaml
deleted file mode 100644
index a36e1652e..000000000
--- a/deployment/src/old/mailserver/Deployment.yaml
+++ /dev/null
@@ -1,40 +0,0 @@
-{{- if .Values.developmentMailserverDomain }}
-apiVersion: apps/v1
-kind: Deployment
-metadata:
- name: {{ .Release.Name }}-mailserver
- labels:
- app.kubernetes.io/instance: {{ .Release.Name }}
- app.kubernetes.io/managed-by: {{ .Release.Service }}
- app.kubernetes.io/name: ocelot-social
- app.kubernetes.io/version: {{ .Chart.AppVersion }}
- helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
-spec:
- replicas: 1
- minReadySeconds: 15
- progressDeadlineSeconds: 60
- selector:
- matchLabels:
- ocelot.social/selector: deployment-mailserver
- template:
- metadata:
- labels:
- ocelot.social/selector: deployment-mailserver
- name: mailserver
- spec:
- containers:
- - name: mailserver
- image: djfarrelly/maildev
- imagePullPolicy: {{ .Values.image.pullPolicy }}
- ports:
- - containerPort: 80
- - containerPort: 25
- envFrom:
- - configMapRef:
- name: {{ .Release.Name }}-configmap
- - secretRef:
- name: {{ .Release.Name }}-secrets
- restartPolicy: Always
- terminationGracePeriodSeconds: 30
-status: {}
-{{- end}}
\ No newline at end of file
diff --git a/deployment/src/old/mailserver/README.md b/deployment/src/old/mailserver/README.md
deleted file mode 100644
index ed9292d5c..000000000
--- a/deployment/src/old/mailserver/README.md
+++ /dev/null
@@ -1,18 +0,0 @@
-# Development Mail Server
-
-You can deploy a fake smtp server which captures all send mails and displays
-them in a web interface. The [sample configuration](../templates/configmap.template.yaml)
-is assuming such a dummy server in the `SMTP_HOST` configuration and points to
-a cluster-internal SMTP server.
-
-To deploy the SMTP server just uncomment the relevant code in the
-[ingress server configuration](../../https/templates/ingress.template.yaml) and
-run the following:
-
-```bash
-# in folder deployment/ocelot-social
-$ kubectl apply -f mailserver/
-```
-
-You might need to refresh the TLS secret to enable HTTPS on the publicly
-available web interface.
diff --git a/deployment/src/old/mailserver/Service.yaml b/deployment/src/old/mailserver/Service.yaml
deleted file mode 100644
index bba734967..000000000
--- a/deployment/src/old/mailserver/Service.yaml
+++ /dev/null
@@ -1,22 +0,0 @@
-{{- if .Values.developmentMailserverDomain }}
-apiVersion: v1
-kind: Service
-metadata:
- name: {{ .Release.Name }}-mailserver
- labels:
- app.kubernetes.io/instance: {{ .Release.Name }}
- app.kubernetes.io/managed-by: {{ .Release.Service }}
- app.kubernetes.io/name: ocelot-social
- app.kubernetes.io/version: {{ .Chart.AppVersion }}
- helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
-spec:
- ports:
- - name: web
- port: 80
- targetPort: 80
- - name: smtp
- port: 25
- targetPort: 25
- selector:
- ocelot.social/selector: deployment-mailserver
-{{- end}}
\ No newline at end of file
diff --git a/deployment/src/old/mailserver/ingress.yaml b/deployment/src/old/mailserver/ingress.yaml
deleted file mode 100644
index 1ea9c58be..000000000
--- a/deployment/src/old/mailserver/ingress.yaml
+++ /dev/null
@@ -1,42 +0,0 @@
-kind: Ingress
-apiVersion: networking.k8s.io/v1
-metadata:
- name: ingress-{{ .Release.Name }}-webapp
- labels:
- app.kubernetes.io/name: "{{ .Chart.Name }}"
- app.kubernetes.io/instance: "{{ .Release.Name }}"
- app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
- app.kubernetes.io/component: "ingress webapp"
- app.kubernetes.io/part-of: "{{ .Chart.Name }}"
- app.kubernetes.io/managed-by: "{{ .Release.Service }}"
- helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
- annotations:
- kubernetes.io/ingress.class: "nginx"
- cert-manager.io/cluster-issuer: {{ .Values.LETSENCRYPT.ISSUER }}
- nginx.ingress.kubernetes.io/proxy-body-size: {{ .Values.NGINX.PROXY_BODY_SIZE }}
-spec:
- tls:
- - hosts:
- - {{ .Values.LETSENCRYPT.DOMAIN }}
- secretName: tls
- rules:
- - host: {{ .Values.LETSENCRYPT.DOMAIN }}
- http:
- paths:
- - path: /
- pathType: ImplementationSpecific
- backend:
- service:
- name: {{ .Release.Name }}-webapp
- port:
- number: 3000
-
-#{{- if .Values.developmentMailserverDomain }}
-# - host: {{ .Values.developmentMailserverDomain }}
-# http:
-# paths:
-# - path: /
-# backend:
-# serviceName: {{ .Release.Name }}-mailserver
-# servicePort: 80
-#{{- end }}
diff --git a/deployment/src/old/monitoring/README.md b/deployment/src/old/monitoring/README.md
deleted file mode 100644
index 46dfb0301..000000000
--- a/deployment/src/old/monitoring/README.md
+++ /dev/null
@@ -1,43 +0,0 @@
-# Metrics
-
-You can optionally setup [prometheus](https://prometheus.io/) and
-[grafana](https://grafana.com/) for metrics.
-
-We follow this tutorial [here](https://medium.com/@chris_linguine/how-to-monitor-your-kubernetes-cluster-with-prometheus-and-grafana-2d5704187fc8):
-
-```bash
-kubectl proxy # proxy to your kubernetes dashboard
-
-helm repo list
-# If using helm v3, the stable repository is not set, so you need to manually add it.
-helm repo add stable https://kubernetes-charts.storage.googleapis.com
-# Create a monitoring namespace for your cluster
-kubectl create namespace monitoring
-helm --namespace monitoring install prometheus stable/prometheus
-kubectl -n monitoring get pods # look for 'server'
-kubectl port-forward -n monitoring 9090
-# You can now see your prometheus server on: http://localhost:9090
-
-# Make sure you are in folder `deployment/`
-kubectl apply -f monitoring/grafana/config.yml
-helm --namespace monitoring install grafana stable/grafana -f monitoring/grafana/values.yml
-# Get the admin password for grafana from your kubernetes dashboard.
-kubectl --namespace monitoring port-forward 3000
-# You can now see your grafana dashboard on: http://localhost:3000
-# Login with user 'admin' and the password you just looked up.
-# In your dashboard import this dashboard:
-# https://grafana.com/grafana/dashboards/1860
-# Enter ID 180 and choose "Prometheus" as datasource.
-# You got metrics!
-```
-
-Now you should see something like this:
-
-
-
-You can set up a grafana dashboard, by visiting https://grafana.com/dashboards, finding one that is suitable and copying it's id.
-You then go to the left hand menu in localhost, choose `Dashboard` > `Manage` > `Import`
-Paste in the id, click `Load`, select `Prometheus` for the data source, and click `Import`
-
-When you just installed prometheus and grafana, the data will not be available
-immediately, so wait for a couple of minutes and reload.
diff --git a/deployment/src/old/monitoring/grafana/config.yml b/deployment/src/old/monitoring/grafana/config.yml
deleted file mode 100644
index a338e3480..000000000
--- a/deployment/src/old/monitoring/grafana/config.yml
+++ /dev/null
@@ -1,16 +0,0 @@
-apiVersion: v1
-kind: ConfigMap
-metadata:
- name: prometheus-grafana-datasource
- namespace: monitoring
- labels:
- grafana_datasource: '1'
-data:
- datasource.yaml: |-
- apiVersion: 1
- datasources:
- - name: Prometheus
- type: prometheus
- access: proxy
- orgId: 1
- url: http://prometheus-server.monitoring.svc.cluster.local
diff --git a/deployment/src/old/monitoring/grafana/metrics.png b/deployment/src/old/monitoring/grafana/metrics.png
deleted file mode 100644
index cc68f1bad..000000000
Binary files a/deployment/src/old/monitoring/grafana/metrics.png and /dev/null differ
diff --git a/deployment/src/old/monitoring/grafana/values.yml b/deployment/src/old/monitoring/grafana/values.yml
deleted file mode 100644
index 02004cc1c..000000000
--- a/deployment/src/old/monitoring/grafana/values.yml
+++ /dev/null
@@ -1,4 +0,0 @@
-sidecar:
- datasources:
- enabled: true
- label: grafana_datasource
diff --git a/deployment/src/old/volumes/README.md b/deployment/src/old/volumes/README.md
deleted file mode 100644
index 00619d33a..000000000
--- a/deployment/src/old/volumes/README.md
+++ /dev/null
@@ -1,37 +0,0 @@
-# Persistent Volumes
-
-At the moment, the application needs two persistent volumes:
-
-* The `/data/` folder where `neo4j` stores its database and
-* the folder `/develop-backend/public/uploads` where the backend stores uploads, in case you don't use DigitalOcean Spaces (an AWS S3 bucket) for this purpose.
-
-As a matter of precaution, the persistent volume claims that setup these volumes
-live in a separate folder. You don't want to accidently loose all your data in
-your database by running
-
-```sh
-kubectl delete -f ocelot-social/
-```
-
-or do you?
-
-## Create Persistent Volume Claims
-
-Run the following:
-
-```sh
-# in folder deployments/
-$ kubectl apply -f volumes
-persistentvolumeclaim/neo4j-data-claim created
-persistentvolumeclaim/uploads-claim created
-```
-
-## Backup And Restore
-
-We tested a couple of options how to do disaster recovery in kubernetes. First,
-there is the [offline backup strategy](./neo4j-offline-backup/README.md) of the
-community edition of Neo4J, which you can also run on a local installation.
-Kubernetes also offers so-called [volume snapshots](./volume-snapshots/README.md).
-Changing the [reclaim policy](./reclaim-policy/README.md) of your persistent
-volumes might be an additional safety measure. Finally, there is also a
-kubernetes specific disaster recovery tool called [Velero](./velero/README.md).
diff --git a/deployment/src/old/volumes/neo4j-offline-backup/README.md b/deployment/src/old/volumes/neo4j-offline-backup/README.md
deleted file mode 100644
index 2d8a848a3..000000000
--- a/deployment/src/old/volumes/neo4j-offline-backup/README.md
+++ /dev/null
@@ -1,88 +0,0 @@
-# Backup (offline)
-
-This tutorial explains how to carry out an offline backup of your Neo4J
-database in a kubernetes cluster.
-
-An offline backup requires the Neo4J database to be stopped. Read
-[the docs](https://neo4j.com/docs/operations-manual/current/tools/dump-load/).
-Neo4J also offers online backups but this is available in enterprise edition
-only.
-
-The tricky part is to stop the Neo4J database *without* stopping the container.
-Neo4J's docker container image starts `neo4j` by default, so we have to override
-this command with sth. that keeps the container spinning but does not terminate
-it.
-
-## Stop and Restart Neo4J Database in Kubernetes
-
-[This tutorial](http://bigdatums.net/2017/11/07/how-to-keep-docker-containers-running/)
-explains how to keep a docker container running. For kubernetes, the way to
-override the docker image `CMD` is explained [here](https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#define-a-command-and-arguments-when-you-create-a-pod).
-
-So, all we have to do is edit the kubernetes deployment of our Neo4J database
-and set a custom `command` every time we have to carry out tasks like backup,
-restore, seed etc.
-
-First bring the application into [maintenance mode](https://github.com/Ocelot-Social-Community/Ocelot-Social/blob/master/deployment/ocelot-social/maintenance/README.md) to ensure there are no
-database connections left and nobody can access the application.
-
-Run the following:
-
-```sh
-$ kubectl -n ocelot-social edit deployment develop-neo4j
-```
-
-Add the following to `spec.template.spec.containers`:
-
-```sh
-["tail", "-f", "/dev/null"]
-```
-
-and write the file which will update the deployment.
-
-The command `tail -f /dev/null` is the equivalent of *sleep forever*. It is a
-hack to keep the container busy and to prevent its shutdown. It will also
-override the default `neo4j` command and the kubernetes pod will not start the
-database.
-
-Now perform your tasks!
-
-When you're done, edit the deployment again and remove the `command`. Write the
-file and trigger an update of the deployment.
-
-## Create a Backup in Kubernetes
-
-First stop your Neo4J database, see above. Then:
-
-```sh
-$ kubectl -n ocelot-social get pods
-# Copy the ID of the pod running Neo4J.
-$ kubectl -n ocelot-social exec -it bash
-# Once you're in the pod, dump the db to a file e.g. `/root/neo4j-backup`.
-> neo4j-admin dump --to=/root/neo4j-backup
-> exit
-# Download the file from the pod to your computer.
-$ kubectl cp human-connection/:/root/neo4j-backup ./neo4j-backup
-```
-
-Revert your changes to deployment `develop-neo4j` which will restart the database.
-
-## Restore a Backup in Kubernetes
-
-First stop your Neo4J database. Then:
-
-```sh
-$ kubectl -n ocelot-social get pods
-# Copy the ID of the pod running Neo4J.
-# Then upload your local backup to the pod. Note that once the pod gets deleted
-# e.g. if you change the deployment, the backup file is gone with it.
-$ kubectl cp ./neo4j-backup human-connection/:/root/
-$ kubectl -n ocelot-social exec -it bash
-# Once you're in the pod restore the backup and overwrite the default database
-# called `neo4j` with `--force`.
-# This will delete all existing data in database `neo4j`!
-> neo4j-admin load --from=/root/neo4j-backup --force
-> exit
-```
-
-Revert your changes to deployment `develop-neo4j` which will restart the database.
diff --git a/deployment/src/old/volumes/neo4j-online-backup/README.md b/deployment/src/old/volumes/neo4j-online-backup/README.md
deleted file mode 100644
index 602bbd577..000000000
--- a/deployment/src/old/volumes/neo4j-online-backup/README.md
+++ /dev/null
@@ -1,59 +0,0 @@
-# Backup (online)
-
-## Online backups are only avaible with a Neo4j Enterprise and a license, see https://neo4j.com/licensing/ for the different licenses available
-
-This tutorial explains how to carry out an online backup of your Neo4J
-database in a kubernetes cluster.
-
-One of the benefits of doing an online backup is that the Neo4j database does not need to be stopped, so there is no downtime. Read [the docs](https://neo4j.com/docs/operations-manual/current/backup/performing/)
-
-To use Neo4j Enterprise you must add this line to your configmap, if using, or your deployment `develop-neo4j` env.
-
-```sh
-NEO4J_ACCEPT_LICENSE_AGREEMENT: "yes"
-```
-
-## Create a Backup in Kubernetes
-
-```sh
-# Backup the database with one command, this will get the develop-neo4j pod, ssh into it, and run the backup command
-$ kubectl -n=human-connection exec -it $(kubectl -n=human-connection get pods | grep develop-neo4j | awk '{ print $1 }') -- neo4j-admin backup --backup-dir=/var/lib/neo4j --name=neo4j-backup
-# Download the file from the pod to your computer.
-$ kubectl cp human-connection/$(kubectl -n=human-connection get pods | grep develop-neo4j | awk '{ print $1 }'):/var/lib/neo4j/neo4j-backup ./neo4j-backup/
-```
-
-You should now have a backup of the database locally. If you want, you can simulate disaster recovery by sshing into the develop-neo4j pod, deleting all data and restoring from backup
-
-## Disaster where database data is gone somehow
-
-```sh
-$ kubectl -n=human-connection exec -it $(kubectl -n=human-connection get pods | grep develop-neo4j |awk '{ print $1 }') bash
-# Enter cypher-shell
-$ cypher-shell
-# Delete all data
-> MATCH (n) DETACH DELETE (n);
-
-> exit
-```
-
-## Restore a backup in Kubernetes
-
-Restoration must be done while the database is not running, see [our docs](https://docs.human-connection.org/human-connection/deployment/volumes/neo4j-offline-backup#stop-and-restart-neo-4-j-database-in-kubernetes) for how to stop the database, but keep the container running
-
-After, you have stopped the database, and have the pod running, you can restore the database by running these commands:
-
-```sh
-$ kubectl -n ocelot-social get pods
-# Copy the ID of the pod running Neo4J.
-# Then upload your local backup to the pod. Note that once the pod gets deleted
-# e.g. if you change the deployment, the backup file is gone with it.
-$ kubectl cp ./neo4j-backup/ human-connection/:/root/
-$ kubectl -n ocelot-social exec -it bash
-# Once you're in the pod restore the backup and overwrite the default database
-# called `graph.db` with `--force`.
-# This will delete all existing data in database `graph.db`!
-> neo4j-admin restore --from=/root/neo4j-backup --force
-> exit
-```
-
-Revert your changes to deployment `develop-neo4j` which will restart the database.
diff --git a/deployment/src/old/volumes/uploads.yaml b/deployment/src/old/volumes/uploads.yaml
deleted file mode 100644
index 45e1292a8..000000000
--- a/deployment/src/old/volumes/uploads.yaml
+++ /dev/null
@@ -1,12 +0,0 @@
----
- kind: PersistentVolumeClaim
- apiVersion: v1
- metadata:
- name: uploads-claim
- namespace: ocelot-social
- spec:
- accessModes:
- - ReadWriteOnce
- resources:
- requests:
- storage: "10Gi"
diff --git a/deployment/src/old/volumes/velero/README.md b/deployment/src/old/volumes/velero/README.md
deleted file mode 100644
index bf63f13c8..000000000
--- a/deployment/src/old/volumes/velero/README.md
+++ /dev/null
@@ -1,112 +0,0 @@
-# Velero
-
-{% hint style="danger" %}
-I tried Velero and it did not work reliably all the time. Sometimes the
-kubernetes cluster crashes during recovery or data is not fully recovered.
-
-Feel free to test it out and update this documentation once you feel that it's
-working reliably. It is very likely that DigitalOcean had some bugs when I
-tried out the steps below.
-{% endhint %}
-
-We use [velero](https://github.com/heptio/velero) for on premise backups, we
-tested on version `v0.11.0`, you can find their
-documentation [here](https://heptio.github.io/velero/v0.11.0/).
-
-Our kubernets configurations adds some annotations to pods. The annotations
-define the important persistent volumes that need to be backed up. Velero will
-pick them up and store the volumes in the same cluster but in another namespace
-`velero`.
-
-## Prequisites
-
-You have to install the binary `velero` on your computer and get a tarball of
-the latest release. We use `v0.11.0` so visit the
-[release](https://github.com/heptio/velero/releases/tag/v0.11.0) page and
-download and extract e.g. [velero-v0.11.0-linux-arm64.tar.gz](https://github.com/heptio/velero/releases/download/v0.11.0/velero-v0.11.0-linux-amd64.tar.gz).
-
-
-## Setup Velero Namespace
-
-Follow their [getting started](https://heptio.github.io/velero/v0.11.0/get-started)
-instructions to setup the Velero namespace. We use
-[Minio](https://docs.min.io/docs/deploy-minio-on-kubernetes) and
-[restic](https://github.com/restic/restic), so check out Velero's instructions
-how to setup [restic](https://heptio.github.io/velero/v0.11.0/restic):
-
-```sh
-# run from the extracted folder of the tarball
-$ kubectl apply -f config/common/00-prereqs.yaml
-$ kubectl apply -f config/minio/
-```
-
-Once completed, you should see the namespace in your kubernetes dashboard.
-
-## Manually Create an On-Premise Backup
-
-When you create your deployments for Human Connection the required annotations
-should already be in place. So when you create a backup of namespace
-`human-connection`:
-
-```sh
-$ velero backup create hc-backup --include-namespaces=human-connection
-```
-
-That should backup your persistent volumes, too. When you enter:
-
-```sh
-$ velero backup describe hc-backup --details
-```
-
-You should see the persistent volumes at the end of the log:
-
-```sh
-....
-
-Restic Backups:
- Completed:
- human-connection/develop-backend-5b6dd96d6b-q77n6: uploads
- human-connection/develop-neo4j-686d768598-z2vhh: neo4j-data
-```
-
-## Simulate a Disaster
-
-Feel free to try out if you loose any data when you simulate a disaster and try
-to restore the namespace from the backup:
-
-```sh
-$ kubectl delete namespace human-connection
-```
-
-Wait until the wrongdoing has completed, then:
-```sh
-$ velero restore create --from-backup hc-backup
-```
-
-Now, I keep my fingers crossed that everything comes back again. If not, I feel
-very sorry for you.
-
-
-## Schedule a Regular Backup
-
-Check out the [docs](https://heptio.github.io/velero/v0.11.0/get-started). You
-can create a regular schedule e.g. with:
-
-```sh
-$ velero schedule create hc-weekly-backup --schedule="@weekly" --include-namespaces=human-connection
-```
-
-Inspect the created backups:
-
-```sh
-$ velero schedule get
-NAME STATUS CREATED SCHEDULE BACKUP TTL LAST BACKUP SELECTOR
-hc-weekly-backup Enabled 2019-05-08 17:51:31 +0200 CEST @weekly 720h0m0s 6s ago
-
-$ velero backup get
-NAME STATUS CREATED EXPIRES STORAGE LOCATION SELECTOR
-hc-weekly-backup-20190508155132 Completed 2019-05-08 17:51:32 +0200 CEST 29d default
-
-$ velero backup describe hc-weekly-backup-20190508155132 --details
-# see if the persistent volumes are backed up
-```
diff --git a/deployment/src/old/volumes/volume-snapshots/README.md b/deployment/src/old/volumes/volume-snapshots/README.md
deleted file mode 100644
index 010cfc636..000000000
--- a/deployment/src/old/volumes/volume-snapshots/README.md
+++ /dev/null
@@ -1,49 +0,0 @@
-# Kubernetes Volume Snapshots
-
-It is possible to backup persistent volumes through volume snapshots. This is especially handy if you don't want to stop the database to create an [offline backup](../neo4j-offline-backup/README.md) thus having a downtime.
-
-Kubernetes announced this feature in a [blog post](https://kubernetes.io/blog/2018/10/09/introducing-volume-snapshot-alpha-for-kubernetes/). Please make yourself familiar with it before you continue.
-
-## Create a Volume Snapshot
-
-There is an example in this folder how you can e.g. create a volume snapshot for the persistent volume claim of the database `volume-claim-ocelot-neo4j`, or for the uploads of the backend `volume-claim-ocelot-uploads`.
-
-Replace `YYYY-MM-DD` in the `metadata.name` entry in the yaml files with the actual date before you enter the following commands:
-
-```bash
-# in folder deployment/volumes/volume-snapshots/
-kubectl apply -f neo4j-data-snapshot.yaml
-# in case the images are stored on backend and not in S3 storage
-kubectl apply -f backen-uploads-snapshot.yaml
-```
-
-If you are on DigitalOcean the volume snapshot should show up in the Web UI:
-
-
-
-## Provision a Volume based on a Snapshot
-
-Edit your persistent volume claim configuration and add a `dataSource` pointing
-to your volume snapshot. [The blog post](https://kubernetes.io/blog/2018/10/09/introducing-volume-snapshot-alpha-for-kubernetes/) has an example in section "Provision a new volume from a snapshot with
-Kubernetes".
-
-There is also an example in this folder how the configuration could look like. If you apply the configuration new persistent volume claim will be provisioned with the data from the volume snapshot:
-
-```bash
-# in folder deployment/volumes/volume-snapshots/
-kubectl apply -f neo4j-data-provision-snapshot.yaml
-```
-
-## Data Consistency Warning
-
-Note that volume snapshots do not guarantee data consistency. Quote from the
-[blog post](https://kubernetes.io/blog/2018/10/09/introducing-volume-snapshot-alpha-for-kubernetes/):
-
-> Please note that the alpha release of Kubernetes Snapshot does not provide
-> any consistency guarantees. You have to prepare your application (pause
-> application, freeze filesystem etc.) before taking the snapshot for data
-> consistency.
-
-In case of Neo4J this probably means that enterprise edition is required which
-supports [online backups](https://neo4j.com/docs/operations-manual/current/backup/).
-
diff --git a/deployment/src/old/volumes/volume-snapshots/backen-uploads-snapshot.yaml b/deployment/src/old/volumes/volume-snapshots/backen-uploads-snapshot.yaml
deleted file mode 100644
index 697346c82..000000000
--- a/deployment/src/old/volumes/volume-snapshots/backen-uploads-snapshot.yaml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-apiVersion: snapshot.storage.k8s.io/v1beta1
-kind: VolumeSnapshot
-metadata:
- name: YYYY-MM-DD-uploads-snapshot
-spec:
- source:
- persistentVolumeClaimName: volume-claim-ocelot-uploads
diff --git a/deployment/src/old/volumes/volume-snapshots/digital-ocean-volume-snapshots.png b/deployment/src/old/volumes/volume-snapshots/digital-ocean-volume-snapshots.png
deleted file mode 100644
index cb6599616..000000000
Binary files a/deployment/src/old/volumes/volume-snapshots/digital-ocean-volume-snapshots.png and /dev/null differ
diff --git a/deployment/src/old/volumes/volume-snapshots/neo4j-data-provision-snapshot.yaml b/deployment/src/old/volumes/volume-snapshots/neo4j-data-provision-snapshot.yaml
deleted file mode 100644
index cd8552bda..000000000
--- a/deployment/src/old/volumes/volume-snapshots/neo4j-data-provision-snapshot.yaml
+++ /dev/null
@@ -1,18 +0,0 @@
----
- kind: PersistentVolumeClaim
- apiVersion: v1
- metadata:
- name: neo4j-data-claim
- namespace: ocelot-social
- labels:
- app: ocelot-social
- spec:
- dataSource:
- name: neo4j-data-snapshot
- kind: VolumeSnapshot
- apiGroup: snapshot.storage.k8s.io
- accessModes:
- - ReadWriteOnce
- resources:
- requests:
- storage: 1Gi
diff --git a/deployment/src/old/volumes/volume-snapshots/neo4j-data-snapshot.yaml b/deployment/src/old/volumes/volume-snapshots/neo4j-data-snapshot.yaml
deleted file mode 100644
index 6ac15cc05..000000000
--- a/deployment/src/old/volumes/volume-snapshots/neo4j-data-snapshot.yaml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-apiVersion: snapshot.storage.k8s.io/v1beta1
-kind: VolumeSnapshot
-metadata:
- name: YYYY-MM-DD-neo4j-data-snapshot
-spec:
- source:
- persistentVolumeClaimName: volume-claim-ocelot-neo4j
\ No newline at end of file
diff --git a/docker-compose.base.yml b/docker-compose.base.yml
new file mode 100644
index 000000000..b0e46d08c
--- /dev/null
+++ b/docker-compose.base.yml
@@ -0,0 +1,20 @@
+services:
+
+ webapp:
+ image: ghcr.io/ocelot-social-community/ocelot-social/webapp-base:${OCELOT_VERSION:-latest}
+ build:
+ target: base
+ context: webapp
+
+ backend:
+ image: ghcr.io/ocelot-social-community/ocelot-social/backend-base:${OCELOT_VERSION:-latest}
+ build:
+ target: base
+ context: backend
+
+ maintenance:
+ image: ghcr.io/ocelot-social-community/ocelot-social/maintenance-base:${OCELOT_VERSION:-latest}
+ build:
+ target: base
+ context: webapp
+ dockerfile: ./Dockerfile.maintenance
diff --git a/docker-compose.build.yml b/docker-compose.build.yml
new file mode 100644
index 000000000..2f6e4c17b
--- /dev/null
+++ b/docker-compose.build.yml
@@ -0,0 +1,20 @@
+services:
+
+ webapp:
+ image: ghcr.io/ocelot-social-community/ocelot-social/webapp-build:${OCELOT_VERSION:-latest}
+ build:
+ target: build
+ context: webapp
+
+ backend:
+ image: ghcr.io/ocelot-social-community/ocelot-social/backend-build:${OCELOT_VERSION:-latest}
+ build:
+ target: build
+ context: backend
+
+ maintenance:
+ image: ghcr.io/ocelot-social-community/ocelot-social/maintenance-build:${OCELOT_VERSION:-latest}
+ build:
+ target: build
+ context: webapp
+ dockerfile: ./Dockerfile.maintenance
diff --git a/docker-compose.maintenance.yml b/docker-compose.maintenance.yml
index 1f2a2f5b4..e2cd1e515 100644
--- a/docker-compose.maintenance.yml
+++ b/docker-compose.maintenance.yml
@@ -1,9 +1,8 @@
# Todo: !!! This file seems related to our old maintenance worker for MongoDB and has to be refactored in case of using it !!!
services:
-
maintenance-worker:
- image: ocelotsocialnetwork/develop-maintenance-worker:latest
+ image: ghcr.io/ocelot-social-community/ocelot-social/develop-maintenance-worker:latest
build:
context: deployment/legacy-migration/maintenance-worker
volumes:
@@ -11,8 +10,6 @@ services:
- neo4j-data:/data
- ./deployment/legacy-migration/maintenance-worker/migration/:/migration
- ./deployment/legacy-migration/maintenance-worker/ssh/:/root/.ssh
- networks:
- - hc-network
environment:
- NEO4J_dbms_security_auth__enabled=false
- NEO4J_dbms_memory_heap_max__size=2G
@@ -33,11 +30,6 @@ services:
- 7687:7687
- 7474:7474
-networks:
- hc-network:
-
volumes:
- webapp_node_modules:
- backend_node_modules:
neo4j-data:
uploads:
diff --git a/docker-compose.override.yml b/docker-compose.override.yml
index d99ef9cfb..fed2ae70a 100644
--- a/docker-compose.override.yml
+++ b/docker-compose.override.yml
@@ -1,11 +1,7 @@
services:
- ########################################################
- # WEBAPP ###############################################
- ########################################################
webapp:
- # name the image so that it cannot be found in a DockerHub repository, otherwise it will not be built locally from the 'dockerfile' but pulled from there
- image: ocelotsocialnetwork/webapp:local-development
+ image: ghcr.io/ocelot-social-community/ocelot-social/webapp:local-development
build:
target: development
environment:
@@ -13,18 +9,10 @@ services:
# - DEBUG=true
- NUXT_BUILD=/tmp/nuxt # avoid file permission issues when `rm -rf .nuxt/`
volumes:
- # This makes sure the docker container has its own node modules.
- # Therefore it is possible to have a different node version on the host machine
- - webapp_node_modules:/app/node_modules
- # bind the local folder to the docker to allow live reload
- ./webapp:/app
- ########################################################
- # FRONTEND #############################################
- ########################################################
frontend:
- # name the image so that it cannot be found in a DockerHub repository, otherwise it will not be built locally from the 'dockerfile' but pulled from there
- image: ocelotsocialnetwork/frontend:local-development
+ image: ghcr.io/ocelot-social-community/ocelot-social/frontend:local-development
build:
target: development
environment:
@@ -33,67 +21,25 @@ services:
# port required for npm run dev
- 24678:24678
volumes:
- # This makes sure the docker container has its own node modules.
- # Therefore it is possible to have a different node version on the host machine
- - frontend_node_modules:/app/node_modules
- # bind the local folder to the docker to allow live reload
- ./frontend:/app
- ########################################################
- # BACKEND ##############################################
- ########################################################
backend:
- # name the image so that it cannot be found in a DockerHub repository, otherwise it will not be built locally from the 'dockerfile' but pulled from there
- image: ocelotsocialnetwork/backend:local-development
+ image: ghcr.io/ocelot-social-community/ocelot-social/backend:local-development
build:
target: development
environment:
- NODE_ENV="development"
- DEBUG=true
volumes:
- # This makes sure the docker container has its own node modules.
- # Therefore it is possible to have a different node version on the host machine
- - backend_node_modules:/app/node_modules
- # bind the local folder to the docker to allow live reload
- ./backend:/app
- ########################################################
- # MAINTENANCE ##########################################
- ########################################################
- maintenance:
- # name the image so that it cannot be found in a DockerHub repository, otherwise it will not be built locally from the 'dockerfile' but pulled from there
- image: ocelotsocialnetwork/maintenance:local-development
-
- ########################################################
- # NEO4J ################################################
- ########################################################
neo4j:
- # name the image so that it cannot be found in a DockerHub repository, otherwise it will not be built locally from the 'dockerfile' but pulled from there
- image: ocelotsocialnetwork/neo4j-community:local-development
ports:
# Also expose the neo4j query browser
- 7474:7474
- networks:
- # So we can access the neo4j query browser from our host machine
- - external-net
- ########################################################
- # MAILSERVER TO FAKE SMTP ##############################
- ########################################################
mailserver:
image: djfarrelly/maildev
container_name: mailserver
ports:
- 1080:80
- networks:
- - external-net
-
-# the following network from the main YAML gives the warning `WARNING: Some networks were defined but are not used by any service: internal-net` and should be removed
-# but removing is not possible yet, it seems: https://github.com/docker/compose/issues/3729#issuecomment-623154878
-# networks:
-# internal-net:
-
-volumes:
- webapp_node_modules:
- frontend_node_modules:
- backend_node_modules:
diff --git a/docker-compose.test.yml b/docker-compose.test.yml
index 9ee94801d..542a6c187 100644
--- a/docker-compose.test.yml
+++ b/docker-compose.test.yml
@@ -1,44 +1,32 @@
services:
- ########################################################
- # WEBAPP ###############################################
- ########################################################
webapp:
# name the image so that it cannot be found in a DockerHub repository, otherwise it will not be built locally from the 'dockerfile' but pulled from there
- image: ocelotsocialnetwork/webapp:test
+ image: ghcr.io/ocelot-social-community/ocelot-social/webapp:test
build:
target: test
environment:
- NODE_ENV="test"
volumes:
- - ./coverage:/app/coverage
+ - ./coverage:/app/coverage
- ########################################################
- # BACKEND ##############################################
- ########################################################
backend:
# name the image so that it cannot be found in a DockerHub repository, otherwise it will not be built locally from the 'dockerfile' but pulled from there
- image: ocelotsocialnetwork/backend:test
+ image: ghcr.io/ocelot-social-community/ocelot-social/backend:test
build:
target: test
environment:
- NODE_ENV="test"
volumes:
- - ./coverage:/app/coverage
+ - ./coverage:/app/coverage
- ########################################################
- # MAINTENANCE ##########################################
- ########################################################
maintenance:
# name the image so that it cannot be found in a DockerHub repository, otherwise it will not be built locally from the 'dockerfile' but pulled from there
- image: ocelotsocialnetwork/maintenance:test
+ image: ghcr.io/ocelot-social-community/ocelot-social/maintenance:test
- ########################################################
- # NEO4J ################################################
- ########################################################
neo4j:
# name the image so that it cannot be found in a DockerHub repository, otherwise it will not be built locally from the 'dockerfile' but pulled from there
- image: ocelotsocialnetwork/neo4j-community:test
+ image: ghcr.io/ocelot-social-community/ocelot-social/neo4j-community:test
#environment:
# - NEO4J_dbms_connector_bolt_enabled=true
# - NEO4J_dbms_connector_bolt_tls__level=OPTIONAL
@@ -47,20 +35,8 @@ services:
# - NEO4J_dbms_connectors_default__listen__address=0.0.0.0
# - NEO4J_dbms_connector_http_listen__address=0.0.0.0:7474
# - NEO4J_dbms_connector_https_listen__address=0.0.0.0:7473
- networks:
- # So we can access the neo4j query browser from our host machine
- - external-net
- ########################################################
- # MAILSERVER TO FAKE SMTP ##############################
- ########################################################
mailserver:
image: djfarrelly/maildev
ports:
- 1080:80
- networks:
- - external-net
-
-volumes:
- webapp_node_modules:
- backend_node_modules:
diff --git a/docker-compose.yml b/docker-compose.yml
index ed69e7f01..d46b5cd29 100644
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -1,22 +1,14 @@
# This file defines the production settings. It is overwritten by docker-compose.override.yml,
# which defines the development settings. The override.yml is loaded by default. Therefore it
# is required to explicitly define if you want an production build:
-# > docker-compose -f docker-compose.yml up
+# > docker-compose -f docker-compose.yml up
services:
-
- ########################################################
- # WEBAPP ###############################################
- ########################################################
webapp:
- # name the image so that it cannot be found in a DockerHub repository, otherwise it will not be built locally from the 'dockerfile' but pulled from there
- image: ocelotsocialnetwork/webapp:local-production
- container_name: webapp
+ image: ghcr.io/ocelot-social-community/ocelot-social/webapp:${OCELOT_VERSION:-latest}
build:
context: ./webapp
target: production
- networks:
- - external-net
depends_on:
- backend
ports:
@@ -27,9 +19,6 @@ services:
# Envs used in Dockerfile
# - DOCKER_WORKDIR="/app"
# - PORT="3000"
- - BUILD_DATE
- - BUILD_VERSION
- - BUILD_COMMIT
- NODE_ENV="production"
# Application only envs
- HOST=0.0.0.0 # This is nuxt specific, alternative value is HOST=webapp
@@ -37,18 +26,11 @@ services:
env_file:
- ./webapp/.env
- ########################################################
- # FRONTEND #############################################
- ########################################################
frontend:
- # name the image so that it cannot be found in a DockerHub repository, otherwise it will not be built locally from the 'dockerfile' but pulled from there
- image: ocelotsocialnetwork/frontend:local-production
- container_name: frontend
+ image: ghcr.io/ocelot-social-community/ocelot-social/frontend:${OCELOT_VERSION:-latest}
build:
context: ./frontend
target: production
- networks:
- - external-net
depends_on:
- backend
ports:
@@ -57,9 +39,6 @@ services:
# Envs used in Dockerfile
# - DOCKER_WORKDIR="/app"
# - PORT="3002"
- - BUILD_DATE
- - BUILD_VERSION
- - BUILD_COMMIT
- NODE_ENV="production"
# Application only envs
#- HOST=0.0.0.0 # This is nuxt specific, alternative value is HOST=webapp
@@ -67,19 +46,11 @@ services:
env_file:
- ./frontend/.env
- ########################################################
- # BACKEND ##############################################
- ########################################################
backend:
- # name the image so that it cannot be found in a DockerHub repository, otherwise it will not be built locally from the 'dockerfile' but pulled from there
- image: ocelotsocialnetwork/backend:local-production
- container_name: backend
+ image: ghcr.io/ocelot-social-community/ocelot-social/backend:${OCELOT_VERSION:-latest}
build:
context: ./backend
target: production
- networks:
- - external-net
- - internal-net
depends_on:
- neo4j
ports:
@@ -90,9 +61,6 @@ services:
# Envs used in Dockerfile
# - DOCKER_WORKDIR="/app"
# - PORT="4000"
- - BUILD_DATE
- - BUILD_VERSION
- - BUILD_COMMIT
- NODE_ENV="production"
# Application only envs
- DEBUG=false
@@ -102,35 +70,20 @@ services:
env_file:
- ./backend/.env
- ########################################################
- # MAINTENANCE ##########################################
- ########################################################
maintenance:
- # name the image so that it cannot be found in a DockerHub repository, otherwise it will not be built locally from the 'dockerfile' but pulled from there
- image: ocelotsocialnetwork/maintenance:local-production
- container_name: maintenance
+ image: ghcr.io/ocelot-social-community/ocelot-social/maintenance:${OCELOT_VERSION:-latest}
build:
- # TODO: Separate from webapp, this must be independent
context: ./webapp
dockerfile: Dockerfile.maintenance
- networks:
- - external-net
ports:
- 3001:80
- ########################################################
- # NEO4J ################################################
- ########################################################
neo4j:
- # name the image so that it cannot be found in a DockerHub repository, otherwise it will not be built locally from the 'dockerfile' but pulled from there
- image: ocelotsocialnetwork/neo4j-community:local-production
- container_name: neo4j
+ image: ghcr.io/ocelot-social-community/ocelot-social/neo4j
build:
context: ./neo4j
# community edition 👆🏼, because we have no enterprise licence 👇🏼 at the moment
target: community
- networks:
- - internal-net
ports:
- 7687:7687
volumes:
@@ -152,11 +105,6 @@ services:
# bring the database in offline mode to export or load dumps
# command: ["tail", "-f", "/dev/null"]
-networks:
- external-net:
- internal-net:
- internal: true
-
volumes:
backend_uploads:
neo4j_data:
diff --git a/neo4j/Dockerfile b/neo4j/Dockerfile
index b1d07080e..a7f52ad36 100644
--- a/neo4j/Dockerfile
+++ b/neo4j/Dockerfile
@@ -1,45 +1,27 @@
-##################################################################################
-# COMMUNITY ######################################################################
-##################################################################################
FROM amd64/neo4j:4.4-community AS community
-
-# ENVs
-## We Cannot do `$(date -u +'%Y-%m-%dT%H:%M:%SZ')` here so we use unix timestamp=0
-ARG BBUILD_DATE="1970-01-01T00:00:00.00Z"
-ENV BUILD_DATE=$BBUILD_DATE
-## We cannot do $(yarn run version)-${BUILD_NUMBER} here so we default to 0.0.0-0
-ARG BBUILD_VERSION="0.0.0-0"
-ENV BUILD_VERSION=$BBUILD_VERSION
-## We cannot do `$(git rev-parse --short HEAD)` here so we default to 0000000
-ARG BBUILD_COMMIT="0000000"
-ENV BUILD_COMMIT=$BBUILD_COMMIT
-
-# Labels
-LABEL org.label-schema.build-date="${BUILD_DATE}"
LABEL org.label-schema.name="ocelot.social:neo4j"
LABEL org.label-schema.description="Neo4J database of the Social Network Software ocelot.social with preinstalled database constraints and indices"
LABEL org.label-schema.usage="https://github.com/Ocelot-Social-Community/Ocelot-Social/blob/master/README.md"
LABEL org.label-schema.url="https://ocelot.social"
LABEL org.label-schema.vcs-url="https://github.com/Ocelot-Social-Community/Ocelot-Social/tree/master/neo4j"
-LABEL org.label-schema.vcs-ref="${BUILD_COMMIT}"
LABEL org.label-schema.vendor="ocelot.social Community"
-LABEL org.label-schema.version="${BUILD_VERSION}"
LABEL org.label-schema.schema-version="1.0"
LABEL maintainer="devops@ocelot.social"
-
-# Install Additional Software
## install: wget, htop (TODO: why do we need htop?)
RUN apt-get update && apt-get -y install wget htop
## install: apoc plugin for neo4j
RUN wget https://github.com/neo4j-contrib/neo4j-apoc-procedures/releases/download/4.4.0.17/apoc-4.4.0.17-all.jar -P plugins/
-##################################################################################
-# ENTERPRISE #####################################################################
-##################################################################################
FROM neo4j:4.4-enterprise AS enterprise
-
-# Install Additional Software
+LABEL org.label-schema.name="ocelot.social:neo4j"
+LABEL org.label-schema.description="Neo4J database of the Social Network Software ocelot.social with preinstalled database constraints and indices"
+LABEL org.label-schema.usage="https://github.com/Ocelot-Social-Community/Ocelot-Social/blob/master/README.md"
+LABEL org.label-schema.url="https://ocelot.social"
+LABEL org.label-schema.vcs-url="https://github.com/Ocelot-Social-Community/Ocelot-Social/tree/master/neo4j"
+LABEL org.label-schema.vendor="ocelot.social Community"
+LABEL org.label-schema.schema-version="1.0"
+LABEL maintainer="devops@ocelot.social"
## install: wget, htop (TODO: why do we need htop?)
RUN apt-get update && apt-get -y install wget htop
## install: apoc plugin for neo4j
-RUN wget https://github.com/neo4j-contrib/neo4j-apoc-procedures/releases/download/4.4.0.17/apoc-4.4.0.17-all.jar -P plugins/
\ No newline at end of file
+RUN wget https://github.com/neo4j-contrib/neo4j-apoc-procedures/releases/download/4.4.0.17/apoc-4.4.0.17-all.jar -P plugins/
diff --git a/webapp/Dockerfile b/webapp/Dockerfile
index f610713fc..7ec65cbf9 100644
--- a/webapp/Dockerfile
+++ b/webapp/Dockerfile
@@ -1,108 +1,46 @@
-##################################################################################
-# BASE (Is pushed to DockerHub for rebranding) ###################################
-##################################################################################
FROM node:20.12.1-alpine3.19 AS base
-
-# ENVs
-## DOCKER_WORKDIR would be a classical ARG, but that is not multi layer persistent - shame
-ENV DOCKER_WORKDIR="/app"
-## We Cannot do `$(date -u +'%Y-%m-%dT%H:%M:%SZ')` here so we use unix timestamp=0
-ARG BBUILD_DATE="1970-01-01T00:00:00.00Z"
-ENV BUILD_DATE=$BBUILD_DATE
-## We cannot do $(yarn run version)-${BUILD_NUMBER} here so we default to 0.0.0-0
-ARG BBUILD_VERSION="0.0.0-0"
-ENV BUILD_VERSION=$BBUILD_VERSION
-## We cannot do `$(git rev-parse --short HEAD)` here so we default to 0000000
-ARG BBUILD_COMMIT="0000000"
-ENV BUILD_COMMIT=$BBUILD_COMMIT
-## SET NODE_ENV
-ENV NODE_ENV="production"
-## App relevant Envs
-ENV PORT="3000"
-
-# Labels
-LABEL org.label-schema.build-date="${BUILD_DATE}"
LABEL org.label-schema.name="ocelot.social:webapp"
LABEL org.label-schema.description="Web Frontend of the Social Network Software ocelot.social"
LABEL org.label-schema.usage="https://github.com/Ocelot-Social-Community/Ocelot-Social/blob/master/README.md"
LABEL org.label-schema.url="https://ocelot.social"
LABEL org.label-schema.vcs-url="https://github.com/Ocelot-Social-Community/Ocelot-Social/tree/master/webapp"
-LABEL org.label-schema.vcs-ref="${BUILD_COMMIT}"
LABEL org.label-schema.vendor="ocelot.social Community"
-LABEL org.label-schema.version="${BUILD_VERSION}"
LABEL org.label-schema.schema-version="1.0"
LABEL maintainer="devops@ocelot.social"
-
-# Install Additional Software
-## install: git
-RUN apk --no-cache add git python3 make g++
-
-# Settings
-## Expose Container Port
+ENV NODE_ENV="production"
+ENV PORT="3000"
EXPOSE ${PORT}
+RUN apk --no-cache add git python3 make g++ bash jq
+RUN mkdir -p /app
+WORKDIR /app
+CMD ["/bin/bash", "-c", "yarn run start"]
-## Workdir
-RUN mkdir -p ${DOCKER_WORKDIR}
-WORKDIR ${DOCKER_WORKDIR}
-
-##################################################################################
-# DEVELOPMENT (Connected to the local environment, to reload on demand) ##########
-##################################################################################
FROM base AS development
+CMD ["/bin/bash", "-c", "yarn install && yarn run dev"]
-# We don't need to copy or build anything since we gonna bind to the
-# local filesystem which will need a rebuild anyway
-
-# Run command
-# (for development we need to execute yarn install since the
-# node_modules are on another volume and need updating)
-CMD /bin/sh -c "yarn install && yarn run dev"
-
-##################################################################################
-# CODE (Does contain all code files and is pushed to DockerHub for rebranding) ###
-##################################################################################
-FROM base AS code
-
-# copy everything, but do not build.
+FROM base AS build
COPY . .
-
-##################################################################################
-# BUILD (Does contain all files and the compilate and is therefore bloated) ######
-##################################################################################
-FROM code AS build
-
-# yarn install
-RUN yarn install --production=false --frozen-lockfile --non-interactive
-# yarn build
-RUN yarn run build
-
-##################################################################################
-# TEST ###########################################################################
-##################################################################################
-FROM build AS test
-
-# Run command
-CMD /bin/sh -c "yarn run dev"
-
-##################################################################################
-# PRODUCTION (Does contain only "binary"- and static-files to reduce image size) #
-##################################################################################
-FROM base AS production
-
-# TODO - do all copying with one COPY command to have one layer
-# Copy "binary"-files from build image
-COPY --from=build ${DOCKER_WORKDIR}/.nuxt ./.nuxt
-COPY --from=build ${DOCKER_WORKDIR}/node_modules ./node_modules
-COPY --from=build ${DOCKER_WORKDIR}/nuxt.config.js ./nuxt.config.js
+ONBUILD COPY ./branding .
+ONBUILD RUN tools/merge-locales.sh
+ONBUILD RUN yarn install --production=false --frozen-lockfile --non-interactive
+ONBUILD RUN yarn run build
+ONBUILD RUN mkdir /build
+ONBUILD RUN cp -r ./.nuxt /build
+ONBUILD RUN cp -r ./nuxt.config.js /build
# Copy static files
# TODO - this seems not be needed anymore for the new rebranding
# TODO - this should be one Folder containign all stuff needed to be copied
-COPY --from=build ${DOCKER_WORKDIR}/config/ ./config/
-COPY --from=build ${DOCKER_WORKDIR}/constants ./constants
-COPY --from=build ${DOCKER_WORKDIR}/static ./static
-COPY --from=build ${DOCKER_WORKDIR}/locales ./locales
-# Copy package.json for script definitions (lock file should not be needed)
-COPY --from=build ${DOCKER_WORKDIR}/package.json ./package.json
+ONBUILD RUN cp -r ./config/ /build
+ONBUILD RUN cp -r ./constants /build
+ONBUILD RUN cp -r ./static /build
+ONBUILD RUN cp -r ./locales /build
+ONBUILD RUN cp -r ./package.json ./yarn.lock /build
+ONBUILD RUN cd /build && yarn install --production=true --frozen-lockfile --non-interactive
-# Run command
-CMD /bin/sh -c "yarn run start"
+FROM build AS test
+CMD ["/bin/bash", "-c", "yarn run dev"]
+
+FROM build AS production_build
+
+FROM base AS production
+COPY --from=production_build /build .
diff --git a/webapp/Dockerfile.maintenance b/webapp/Dockerfile.maintenance
index 096995dca..93d104fd5 100644
--- a/webapp/Dockerfile.maintenance
+++ b/webapp/Dockerfile.maintenance
@@ -1,66 +1,21 @@
-##################################################################################
-# BASE ###########################################################################
-##################################################################################
-FROM node:20.12.1-alpine3.19 AS base
-
-# ENVs
-## DOCKER_WORKDIR would be a classical ARG, but that is not multi layer persistent - shame
-ENV DOCKER_WORKDIR="/app"
-## We Cannot do `$(date -u +'%Y-%m-%dT%H:%M:%SZ')` here so we use unix timestamp=0
-ARG BBUILD_DATE="1970-01-01T00:00:00.00Z"
-ENV BUILD_DATE=$BBUILD_DATE
-## We cannot do $(yarn run version)-${BUILD_NUMBER} here so we default to 0.0.0-0
-ARG BBUILD_VERSION="0.0.0-0"
-ENV BUILD_VERSION=$BBUILD_VERSION
-## We cannot do `$(git rev-parse --short HEAD)` here so we default to 0000000
-ARG BBUILD_COMMIT="0000000"
-ENV BUILD_COMMIT=$BBUILD_COMMIT
-## SET NODE_ENV
-ENV NODE_ENV="production"
-## App relevant Envs
-ENV PORT="3000"
-
-# Labels
-LABEL org.label-schema.build-date="${BUILD_DATE}"
+FROM nginx:alpine AS base
LABEL org.label-schema.name="ocelot.social:maintenance"
LABEL org.label-schema.description="Maintenance page of the Social Network Software ocelot.social"
LABEL org.label-schema.usage="https://github.com/Ocelot-Social-Community/Ocelot-Social/blob/master/README.md"
LABEL org.label-schema.url="https://ocelot.social"
LABEL org.label-schema.vcs-url="https://github.com/Ocelot-Social-Community/Ocelot-Social/tree/master/webapp"
-LABEL org.label-schema.vcs-ref="${BUILD_COMMIT}"
LABEL org.label-schema.vendor="ocelot.social Community"
-LABEL org.label-schema.version="${BUILD_VERSION}"
LABEL org.label-schema.schema-version="1.0"
LABEL maintainer="devops@ocelot.social"
-# Install Additional Software
-## install: git
-RUN apk --no-cache add git python3 make g++
-
-# Settings
-## Expose Container Port
-EXPOSE ${PORT}
-
-## Workdir
-RUN mkdir -p ${DOCKER_WORKDIR}
-WORKDIR ${DOCKER_WORKDIR}
-
-CMD ["yarn", "run", "start"]
-
-##################################################################################
-# CODE (Does contain all code files and is pushed to DockerHub for rebranding) ###
-##################################################################################
-FROM base AS code
-
-COPY package.json yarn.lock ./
-# yarn install
-RUN yarn install --production=false --frozen-lockfile --non-interactive
-
+FROM node:20.12.1-alpine3.19 AS build
+ENV NODE_ENV="production"
+RUN apk --no-cache add git python3 make g++ bash jq
+RUN mkdir -p /app
+WORKDIR /app
COPY assets assets
-# COPY components/_new/generic/ components/_new/generic
COPY components/LocaleSwitch/ components/LocaleSwitch
COPY components/Dropdown.vue components/Dropdown.vue
-# COPY components/Logo/ components/Logo
COPY layouts/blank.vue layouts/blank.vue
COPY locales locales
COPY mixins mixins
@@ -69,25 +24,18 @@ COPY static static
COPY constants constants
COPY nuxt.config.js nuxt.config.js
COPY config/ config/
-
-# this is needed in rebranding
+COPY tools/ tools/
COPY maintenance/nginx maintenance/nginx
-# this will also ovewrite the existing package.json
-COPY maintenance/source ./
+COPY maintenance/source maintenance/source
+COPY package.json yarn.lock ./
+ONBUILD COPY ./branding .
+ONBUILD RUN tools/merge-locales.sh
+ONBUILD RUN yarn install --production=false --frozen-lockfile --non-interactive
+ONBUILD RUN cp -r maintenance/source/* ./
+ONBUILD RUN yarn run generate
-##################################################################################
-# BUILD ### TODO # TODO # TODO # TODO # TODO # TODO # TODO # TODO # TODO # TODO ##
-##################################################################################
-FROM code AS build
+FROM build AS production_build
-# yarn generate
-RUN yarn run generate
-
-##################################################################################
-# PRODUCTION ### TODO # TODO # TODO # TODO # TODO # TODO # TODO # TODO # TODO ####
-##################################################################################
-FROM nginx:alpine AS production
-
-COPY --from=build ./app/dist/ /usr/share/nginx/html/
-RUN rm /etc/nginx/conf.d/default.conf
-COPY maintenance/nginx/custom.conf /etc/nginx/conf.d/
+FROM base as production
+COPY --from=production_build ./app/dist/ /usr/share/nginx/html/
+COPY --from=production_build ./app/maintenance/nginx/custom.conf /etc/nginx/conf.d/default.conf
diff --git a/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/invites/delete.cql b/webapp/branding/.gitkeep
similarity index 100%
rename from deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/invites/delete.cql
rename to webapp/branding/.gitkeep
diff --git a/deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/notifications/delete.cql b/webapp/locales/tmp/.gitkeep
similarity index 100%
rename from deployment/src/old/legacy-migration/maintenance-worker/migration/neo4j/notifications/delete.cql
rename to webapp/locales/tmp/.gitkeep
diff --git a/webapp/nuxt.config.js b/webapp/nuxt.config.js
index 086a62f1b..4e82e9330 100644
--- a/webapp/nuxt.config.js
+++ b/webapp/nuxt.config.js
@@ -302,7 +302,7 @@ export default {
modules: [
{
preTransformNode(abstractSyntaxTreeElement) {
- if (!ctx.isDev) {
+ if (!ctx.isDev && CONFIG.NODE_ENV !== 'test') {
const { attrsMap, attrsList } = abstractSyntaxTreeElement
tagAttributesForTesting.forEach((attribute) => {
if (attrsMap[attribute]) {
diff --git a/webapp/package.json b/webapp/package.json
index 70f35761c..486937dfa 100644
--- a/webapp/package.json
+++ b/webapp/package.json
@@ -25,6 +25,7 @@
"@mapbox/mapbox-gl-geocoder": "^5.0.2",
"@nuxtjs/apollo": "^4.0.0-rc19",
"@nuxtjs/axios": "~5.9.7",
+ "@nuxtjs/composition-api": "0.32.0",
"@nuxtjs/dotenv": "~1.4.1",
"@nuxtjs/pwa": "^3.0.0-beta.20",
"@nuxtjs/sentry": "^4.0.0",
@@ -72,7 +73,6 @@
"@babel/plugin-syntax-dynamic-import": "^7.8.3",
"@babel/preset-env": "^7.25.8",
"@faker-js/faker": "9.5.0",
- "@nuxtjs/composition-api": "0.32.0",
"@storybook/addon-a11y": "^8.0.8",
"@storybook/addon-actions": "^5.3.21",
"@storybook/addon-notes": "^5.3.18",
diff --git a/deployment/src/tools/merge-locales.sh b/webapp/tools/merge-locales.sh
similarity index 100%
rename from deployment/src/tools/merge-locales.sh
rename to webapp/tools/merge-locales.sh
diff --git a/webapp/yarn.lock b/webapp/yarn.lock
index 76b14180a..55873eb69 100644
--- a/webapp/yarn.lock
+++ b/webapp/yarn.lock
@@ -123,6 +123,15 @@
"@babel/highlight" "^7.25.7"
picocolors "^1.0.0"
+"@babel/code-frame@^7.26.2":
+ version "7.26.2"
+ resolved "https://registry.yarnpkg.com/@babel/code-frame/-/code-frame-7.26.2.tgz#4b5fab97d33338eff916235055f0ebc21e573a85"
+ integrity sha512-RJlIHRueQgwWitWgF8OdFYGZX328Ax5BCemNGlqHfplnRT9ESi8JkFlvaVYbS+UubVY6dpv87Fs2u5M29iNFVQ==
+ dependencies:
+ "@babel/helper-validator-identifier" "^7.25.9"
+ js-tokens "^4.0.0"
+ picocolors "^1.0.0"
+
"@babel/compat-data@^7.22.6", "@babel/compat-data@^7.25.7", "@babel/compat-data@^7.25.8":
version "7.25.8"
resolved "https://registry.yarnpkg.com/@babel/compat-data/-/compat-data-7.25.8.tgz#0376e83df5ab0eb0da18885c0140041f0747a402"
@@ -133,6 +142,11 @@
resolved "https://registry.yarnpkg.com/@babel/compat-data/-/compat-data-7.23.5.tgz#ffb878728bb6bdcb6f4510aa51b1be9afb8cfd98"
integrity sha512-uU27kfDRlhfKl+w1U6vp16IuvSLtjAxdArVXPa9BvLkrr7CYIsxH5adpHObeAGY/41+syctUWOZ140a2Rvkgjw==
+"@babel/compat-data@^7.26.5":
+ version "7.26.8"
+ resolved "https://registry.yarnpkg.com/@babel/compat-data/-/compat-data-7.26.8.tgz#821c1d35641c355284d4a870b8a4a7b0c141e367"
+ integrity sha512-oH5UPLMWR3L2wEFLnFJ1TZXqHufiTKAiLfqw5zkhS4dKXLJ10yVztfil/twG8EDTA4F/tvVNw9nOl4ZMslB8rQ==
+
"@babel/compat-data@^7.9.0":
version "7.9.0"
resolved "https://registry.yarnpkg.com/@babel/compat-data/-/compat-data-7.9.0.tgz#04815556fc90b0c174abd2c0c1bb966faa036a6c"
@@ -142,7 +156,7 @@
invariant "^2.2.4"
semver "^5.5.0"
-"@babel/core@^7.11.6", "@babel/core@^7.12.3", "@babel/core@^7.16.7", "@babel/core@^7.23.9", "@babel/core@^7.9.0":
+"@babel/core@^7.11.6", "@babel/core@^7.12.3", "@babel/core@^7.23.9", "@babel/core@^7.9.0":
version "7.24.4"
resolved "https://registry.yarnpkg.com/@babel/core/-/core-7.24.4.tgz#1f758428e88e0d8c563874741bc4ffc4f71a4717"
integrity sha512-MBVlMXP+kkl5394RBLSxxk/iLTeVGuXTV3cIDXavPpMMqnSnt6apKgan/U8O3USWZCWZT/TbgfEpKa4uMgN4Dg==
@@ -163,6 +177,27 @@
json5 "^2.2.3"
semver "^6.3.1"
+"@babel/core@^7.16.7":
+ version "7.26.9"
+ resolved "https://registry.yarnpkg.com/@babel/core/-/core-7.26.9.tgz#71838542a4b1e49dfed353d7acbc6eb89f4a76f2"
+ integrity sha512-lWBYIrF7qK5+GjY5Uy+/hEgp8OJWOD/rpy74GplYRhEauvbHDeFB8t5hPOZxCZ0Oxf4Cc36tK51/l3ymJysrKw==
+ dependencies:
+ "@ampproject/remapping" "^2.2.0"
+ "@babel/code-frame" "^7.26.2"
+ "@babel/generator" "^7.26.9"
+ "@babel/helper-compilation-targets" "^7.26.5"
+ "@babel/helper-module-transforms" "^7.26.0"
+ "@babel/helpers" "^7.26.9"
+ "@babel/parser" "^7.26.9"
+ "@babel/template" "^7.26.9"
+ "@babel/traverse" "^7.26.9"
+ "@babel/types" "^7.26.9"
+ convert-source-map "^2.0.0"
+ debug "^4.1.0"
+ gensync "^1.0.0-beta.2"
+ json5 "^2.2.3"
+ semver "^6.3.1"
+
"@babel/core@^7.25.8":
version "7.25.8"
resolved "https://registry.yarnpkg.com/@babel/core/-/core-7.25.8.tgz#a57137d2a51bbcffcfaeba43cb4dd33ae3e0e1c6"
@@ -194,7 +229,18 @@
lodash "^4.17.13"
source-map "^0.5.0"
-"@babel/generator@^7.16.8", "@babel/generator@^7.24.1", "@babel/generator@^7.24.4", "@babel/generator@^7.7.2":
+"@babel/generator@^7.16.8", "@babel/generator@^7.26.9":
+ version "7.26.9"
+ resolved "https://registry.yarnpkg.com/@babel/generator/-/generator-7.26.9.tgz#75a9482ad3d0cc7188a537aa4910bc59db67cbca"
+ integrity sha512-kEWdzjOAUMW4hAyrzJ0ZaTOu9OmpyDIQicIh0zg0EEcEkYXZb2TjtBhnHi2ViX7PKwZqF4xwqfAm299/QMP3lg==
+ dependencies:
+ "@babel/parser" "^7.26.9"
+ "@babel/types" "^7.26.9"
+ "@jridgewell/gen-mapping" "^0.3.5"
+ "@jridgewell/trace-mapping" "^0.3.25"
+ jsesc "^3.0.2"
+
+"@babel/generator@^7.24.1", "@babel/generator@^7.24.4", "@babel/generator@^7.7.2":
version "7.24.4"
resolved "https://registry.yarnpkg.com/@babel/generator/-/generator-7.24.4.tgz#1fc55532b88adf952025d5d2d1e71f946cb1c498"
integrity sha512-Xd6+v6SnjWVx/nus+y0l1sxMOTOMBkyL4+BIdbALyatQnAe/SRVjANeDPSCYaX+i1iJmuGSKf3Z+E+V/va1Hvw==
@@ -282,6 +328,17 @@
lru-cache "^5.1.1"
semver "^6.3.1"
+"@babel/helper-compilation-targets@^7.26.5":
+ version "7.26.5"
+ resolved "https://registry.yarnpkg.com/@babel/helper-compilation-targets/-/helper-compilation-targets-7.26.5.tgz#75d92bb8d8d51301c0d49e52a65c9a7fe94514d8"
+ integrity sha512-IXuyn5EkouFJscIDuFF5EsiSolseme1s0CZB+QxVugqJLYmKdxI1VfIBOst0SUu4rnk2Z7kqTwmoO1lp3HIfnA==
+ dependencies:
+ "@babel/compat-data" "^7.26.5"
+ "@babel/helper-validator-option" "^7.25.9"
+ browserslist "^4.24.0"
+ lru-cache "^5.1.1"
+ semver "^6.3.1"
+
"@babel/helper-create-class-features-plugin@^7.25.7":
version "7.25.7"
resolved "https://registry.yarnpkg.com/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.25.7.tgz#5d65074c76cae75607421c00d6bd517fe1892d6b"
@@ -449,6 +506,14 @@
"@babel/traverse" "^7.25.7"
"@babel/types" "^7.25.7"
+"@babel/helper-module-imports@^7.25.9":
+ version "7.25.9"
+ resolved "https://registry.yarnpkg.com/@babel/helper-module-imports/-/helper-module-imports-7.25.9.tgz#e7f8d20602ebdbf9ebbea0a0751fb0f2a4141715"
+ integrity sha512-tnUA4RsrmflIM6W6RFTLFSXITtl0wKjgpnLgXyowocVPrbYrLUXSBXDgTs8BlbmIzIdlBySRQjINYs2BAkiLtw==
+ dependencies:
+ "@babel/traverse" "^7.25.9"
+ "@babel/types" "^7.25.9"
+
"@babel/helper-module-transforms@^7.21.5", "@babel/helper-module-transforms@^7.23.3", "@babel/helper-module-transforms@^7.9.0":
version "7.23.3"
resolved "https://registry.yarnpkg.com/@babel/helper-module-transforms/-/helper-module-transforms-7.23.3.tgz#d7d12c3c5d30af5b3c0fcab2a6d5217773e2d0f1"
@@ -470,6 +535,15 @@
"@babel/helper-validator-identifier" "^7.25.7"
"@babel/traverse" "^7.25.7"
+"@babel/helper-module-transforms@^7.26.0":
+ version "7.26.0"
+ resolved "https://registry.yarnpkg.com/@babel/helper-module-transforms/-/helper-module-transforms-7.26.0.tgz#8ce54ec9d592695e58d84cd884b7b5c6a2fdeeae"
+ integrity sha512-xO+xu6B5K2czEnQye6BHA7DolFFmS3LB7stHZFaOLb1pAwO1HWLS8fXA+eh0A2yIvltPVmx3eNNDBJA2SLHXFw==
+ dependencies:
+ "@babel/helper-module-imports" "^7.25.9"
+ "@babel/helper-validator-identifier" "^7.25.9"
+ "@babel/traverse" "^7.25.9"
+
"@babel/helper-optimise-call-expression@^7.25.7":
version "7.25.7"
resolved "https://registry.yarnpkg.com/@babel/helper-optimise-call-expression/-/helper-optimise-call-expression-7.25.7.tgz#1de1b99688e987af723eed44fa7fc0ee7b97d77a"
@@ -619,6 +693,11 @@
resolved "https://registry.yarnpkg.com/@babel/helper-string-parser/-/helper-string-parser-7.25.7.tgz#d50e8d37b1176207b4fe9acedec386c565a44a54"
integrity sha512-CbkjYdsJNHFk8uqpEkpCvRs3YRp9tY6FmFY7wLMSYuGYkrdUi7r2lc4/wqsvlHoMznX3WJ9IP8giGPq68T/Y6g==
+"@babel/helper-string-parser@^7.25.9":
+ version "7.25.9"
+ resolved "https://registry.yarnpkg.com/@babel/helper-string-parser/-/helper-string-parser-7.25.9.tgz#1aabb72ee72ed35789b4bbcad3ca2862ce614e8c"
+ integrity sha512-4A/SCr/2KLd5jrtOMFzaKjVtAei3+2r/NChoBNoZ3EyP/+GlhoaEGoWOZUmFmoITP7zOJyHIMm+DYRd8o3PvHA==
+
"@babel/helper-validator-identifier@^7.22.20":
version "7.22.20"
resolved "https://registry.yarnpkg.com/@babel/helper-validator-identifier/-/helper-validator-identifier-7.22.20.tgz#c4ae002c61d2879e724581d96665583dbc1dc0e0"
@@ -629,6 +708,11 @@
resolved "https://registry.yarnpkg.com/@babel/helper-validator-identifier/-/helper-validator-identifier-7.25.7.tgz#77b7f60c40b15c97df735b38a66ba1d7c3e93da5"
integrity sha512-AM6TzwYqGChO45oiuPqwL2t20/HdMC1rTPAesnBCgPCSF1x3oN9MVUwQV2iyz4xqWrctwK5RNC8LV22kaQCNYg==
+"@babel/helper-validator-identifier@^7.25.9":
+ version "7.25.9"
+ resolved "https://registry.yarnpkg.com/@babel/helper-validator-identifier/-/helper-validator-identifier-7.25.9.tgz#24b64e2c3ec7cd3b3c547729b8d16871f22cbdc7"
+ integrity sha512-Ed61U6XJc3CVRfkERJWDz4dJwKe7iLmmJsbOGu9wSloNSFttHV0I8g6UAgb7qnK5ly5bGLPd4oXZlxCdANBOWQ==
+
"@babel/helper-validator-option@^7.23.5":
version "7.23.5"
resolved "https://registry.yarnpkg.com/@babel/helper-validator-option/-/helper-validator-option-7.23.5.tgz#907a3fbd4523426285365d1206c423c4c5520307"
@@ -639,6 +723,11 @@
resolved "https://registry.yarnpkg.com/@babel/helper-validator-option/-/helper-validator-option-7.25.7.tgz#97d1d684448228b30b506d90cace495d6f492729"
integrity sha512-ytbPLsm+GjArDYXJ8Ydr1c/KJuutjF2besPNbIZnZ6MKUxi/uTA22t2ymmA4WFjZFpjiAMO0xuuJPqK2nvDVfQ==
+"@babel/helper-validator-option@^7.25.9":
+ version "7.25.9"
+ resolved "https://registry.yarnpkg.com/@babel/helper-validator-option/-/helper-validator-option-7.25.9.tgz#86e45bd8a49ab7e03f276577f96179653d41da72"
+ integrity sha512-e/zv1co8pp55dNdEcCynfj9X7nyUKUXoUEwfXqaZt0omVOmDe9oOTdKStH4GmAw6zxMFs50ZayuMfHDKlO7Tfw==
+
"@babel/helper-wrap-function@^7.25.7":
version "7.25.7"
resolved "https://registry.yarnpkg.com/@babel/helper-wrap-function/-/helper-wrap-function-7.25.7.tgz#9f6021dd1c4fdf4ad515c809967fc4bac9a70fe7"
@@ -675,6 +764,14 @@
"@babel/template" "^7.25.7"
"@babel/types" "^7.25.7"
+"@babel/helpers@^7.26.9":
+ version "7.26.9"
+ resolved "https://registry.yarnpkg.com/@babel/helpers/-/helpers-7.26.9.tgz#28f3fb45252fc88ef2dc547c8a911c255fc9fef6"
+ integrity sha512-Mz/4+y8udxBKdmzt/UjPACs4G3j5SshJJEFFKxlCGPydG4JAHXxjWjAwjd09tf6oINvl1VfMJo+nB7H2YKQ0dA==
+ dependencies:
+ "@babel/template" "^7.26.9"
+ "@babel/types" "^7.26.9"
+
"@babel/highlight@^7.10.4":
version "7.23.4"
resolved "https://registry.yarnpkg.com/@babel/highlight/-/highlight-7.23.4.tgz#edaadf4d8232e1a961432db785091207ead0621b"
@@ -704,11 +801,18 @@
js-tokens "^4.0.0"
picocolors "^1.0.0"
-"@babel/parser@^7.1.0", "@babel/parser@^7.1.3", "@babel/parser@^7.14.7", "@babel/parser@^7.16.4", "@babel/parser@^7.16.8", "@babel/parser@^7.20.7", "@babel/parser@^7.23.9", "@babel/parser@^7.24.0", "@babel/parser@^7.24.1", "@babel/parser@^7.24.4", "@babel/parser@^7.7.0":
+"@babel/parser@^7.1.0", "@babel/parser@^7.1.3", "@babel/parser@^7.14.7", "@babel/parser@^7.20.7", "@babel/parser@^7.23.9", "@babel/parser@^7.24.0", "@babel/parser@^7.24.1", "@babel/parser@^7.24.4", "@babel/parser@^7.7.0":
version "7.24.4"
resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.24.4.tgz#234487a110d89ad5a3ed4a8a566c36b9453e8c88"
integrity sha512-zTvEBcghmeBma9QIGunWevvBAp4/Qu9Bdq+2k0Ot4fVMD6v3dsC9WOcRSKk7tRRyBM/53yKMJko9xOatGQAwSg==
+"@babel/parser@^7.16.8", "@babel/parser@^7.23.5", "@babel/parser@^7.25.3", "@babel/parser@^7.26.9":
+ version "7.26.9"
+ resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.26.9.tgz#d9e78bee6dc80f9efd8f2349dcfbbcdace280fd5"
+ integrity sha512-81NWa1njQblgZbQHxWHpxxCzNsa3ZwvFqpUg7P+NNUU6f3UU2jBEg4OlF/J6rl8+PQGh1q6/zWScd001YwcA5A==
+ dependencies:
+ "@babel/types" "^7.26.9"
+
"@babel/parser@^7.25.7", "@babel/parser@^7.25.8":
version "7.25.8"
resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.25.8.tgz#f6aaf38e80c36129460c1657c0762db584c9d5e2"
@@ -1880,7 +1984,29 @@
"@babel/parser" "^7.25.7"
"@babel/types" "^7.25.7"
-"@babel/traverse@^7.16.8", "@babel/traverse@^7.24.1", "@babel/traverse@^7.7.0", "@babel/traverse@^7.8.3", "@babel/traverse@^7.8.6":
+"@babel/template@^7.26.9":
+ version "7.26.9"
+ resolved "https://registry.yarnpkg.com/@babel/template/-/template-7.26.9.tgz#4577ad3ddf43d194528cff4e1fa6b232fa609bb2"
+ integrity sha512-qyRplbeIpNZhmzOysF/wFMuP9sctmh2cFzRAZOn1YapxBsE1i9bJIY586R/WBLfLcmcBlM8ROBiQURnnNy+zfA==
+ dependencies:
+ "@babel/code-frame" "^7.26.2"
+ "@babel/parser" "^7.26.9"
+ "@babel/types" "^7.26.9"
+
+"@babel/traverse@^7.16.8", "@babel/traverse@^7.25.9", "@babel/traverse@^7.26.9":
+ version "7.26.9"
+ resolved "https://registry.yarnpkg.com/@babel/traverse/-/traverse-7.26.9.tgz#4398f2394ba66d05d988b2ad13c219a2c857461a"
+ integrity sha512-ZYW7L+pL8ahU5fXmNbPF+iZFHCv5scFak7MZ9bwaRPLUhHh7QQEMjZUg0HevihoqCM5iSYHN61EyCoZvqC+bxg==
+ dependencies:
+ "@babel/code-frame" "^7.26.2"
+ "@babel/generator" "^7.26.9"
+ "@babel/parser" "^7.26.9"
+ "@babel/template" "^7.26.9"
+ "@babel/types" "^7.26.9"
+ debug "^4.3.1"
+ globals "^11.1.0"
+
+"@babel/traverse@^7.24.1", "@babel/traverse@^7.7.0", "@babel/traverse@^7.8.3", "@babel/traverse@^7.8.6":
version "7.24.1"
resolved "https://registry.yarnpkg.com/@babel/traverse/-/traverse-7.24.1.tgz#d65c36ac9dd17282175d1e4a3c49d5b7988f530c"
integrity sha512-xuU6o9m68KeqZbQuDt2TcKSxUw/mrsvavlEqQ1leZ/B+C9tk6E4sRWy97WaXgvq5E+nU3cXMxv3WKOCanVMCmQ==
@@ -1918,7 +2044,7 @@
lodash "^4.17.13"
to-fast-properties "^2.0.0"
-"@babel/types@^7.0.0", "@babel/types@^7.16.8", "@babel/types@^7.18.6", "@babel/types@^7.20.7", "@babel/types@^7.21.5", "@babel/types@^7.22.15", "@babel/types@^7.22.5", "@babel/types@^7.23.0", "@babel/types@^7.24.0", "@babel/types@^7.3.0", "@babel/types@^7.3.3", "@babel/types@^7.4.4", "@babel/types@^7.6.3", "@babel/types@^7.7.0", "@babel/types@^7.8.3", "@babel/types@^7.8.6", "@babel/types@^7.8.7", "@babel/types@^7.9.0":
+"@babel/types@^7.0.0", "@babel/types@^7.18.6", "@babel/types@^7.20.7", "@babel/types@^7.21.5", "@babel/types@^7.22.15", "@babel/types@^7.22.5", "@babel/types@^7.23.0", "@babel/types@^7.24.0", "@babel/types@^7.3.0", "@babel/types@^7.3.3", "@babel/types@^7.4.4", "@babel/types@^7.6.3", "@babel/types@^7.7.0", "@babel/types@^7.8.3", "@babel/types@^7.8.6", "@babel/types@^7.8.7", "@babel/types@^7.9.0":
version "7.24.0"
resolved "https://registry.yarnpkg.com/@babel/types/-/types-7.24.0.tgz#3b951f435a92e7333eba05b7566fd297960ea1bf"
integrity sha512-+j7a5c253RfKh8iABBhywc8NSfP5LURe7Uh4qpsh6jc+aLJguvmIUBdjSdEMQv2bENrCR5MfRdjGo7vzS/ob7w==
@@ -1927,6 +2053,14 @@
"@babel/helper-validator-identifier" "^7.22.20"
to-fast-properties "^2.0.0"
+"@babel/types@^7.16.8", "@babel/types@^7.25.9", "@babel/types@^7.26.9":
+ version "7.26.9"
+ resolved "https://registry.yarnpkg.com/@babel/types/-/types-7.26.9.tgz#08b43dec79ee8e682c2ac631c010bdcac54a21ce"
+ integrity sha512-Y3IR1cRnOxOCDvMmNiym7XpXQ93iGDDPHx+Zj+NM+rg0fBaShfQLkg+hKPaZCEvg5N/LeCo4+Rj/i3FuJsIQaw==
+ dependencies:
+ "@babel/helper-string-parser" "^7.25.9"
+ "@babel/helper-validator-identifier" "^7.25.9"
+
"@babel/types@^7.25.7", "@babel/types@^7.25.8":
version "7.25.8"
resolved "https://registry.yarnpkg.com/@babel/types/-/types-7.25.8.tgz#5cf6037258e8a9bcad533f4979025140cb9993e1"
@@ -2633,6 +2767,11 @@
resolved "https://registry.yarnpkg.com/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.15.tgz#d7c6e6755c78567a951e04ab52ef0fd26de59f32"
integrity sha512-eF2rxCRulEKXHTRiDrDy6erMYWqNw4LPdQ8UQA4huuxaQsVeRPFl2oM8oDGxMFhJUWZf9McpLtJasDDZb/Bpeg==
+"@jridgewell/sourcemap-codec@^1.5.0":
+ version "1.5.0"
+ resolved "https://registry.yarnpkg.com/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.0.tgz#3188bcb273a414b0d215fd22a58540b989b9409a"
+ integrity sha512-gv3ZRaISU3fjPAgNsriBRqGWQL6quFx04YMPW/zD8XMLsU32mhCCbfbO6KZFLjvYpCZ8zyDEgqsgf+PwPaM7GQ==
+
"@jridgewell/trace-mapping@^0.3.12", "@jridgewell/trace-mapping@^0.3.9":
version "0.3.17"
resolved "https://registry.yarnpkg.com/@jridgewell/trace-mapping/-/trace-mapping-0.3.17.tgz#793041277af9073b0951a7fe0f0d8c4c98c36985"
@@ -4838,23 +4977,34 @@
semver "^6.1.0"
strip-ansi "^6.0.0"
-"@vue/compiler-core@3.2.45", "@vue/compiler-core@^3.2.26":
- version "3.2.45"
- resolved "https://registry.yarnpkg.com/@vue/compiler-core/-/compiler-core-3.2.45.tgz#d9311207d96f6ebd5f4660be129fb99f01ddb41b"
- integrity sha512-rcMj7H+PYe5wBV3iYeUgbCglC+pbpN8hBLTJvRiK2eKQiWqu+fG9F+8sW99JdL4LQi7Re178UOxn09puSXvn4A==
+"@vue/compiler-core@3.3.13":
+ version "3.3.13"
+ resolved "https://registry.yarnpkg.com/@vue/compiler-core/-/compiler-core-3.3.13.tgz#b3d5f8f84caee5de3f31d95cb568d899fd19c599"
+ integrity sha512-bwi9HShGu7uaZLOErZgsH2+ojsEdsjerbf2cMXPwmvcgZfVPZ2BVZzCVnwZBxTAYd6Mzbmf6izcUNDkWnBBQ6A==
dependencies:
- "@babel/parser" "^7.16.4"
- "@vue/shared" "3.2.45"
+ "@babel/parser" "^7.23.5"
+ "@vue/shared" "3.3.13"
estree-walker "^2.0.2"
- source-map "^0.6.1"
+ source-map-js "^1.0.2"
+
+"@vue/compiler-core@3.5.13", "@vue/compiler-core@^3.2.26":
+ version "3.5.13"
+ resolved "https://registry.yarnpkg.com/@vue/compiler-core/-/compiler-core-3.5.13.tgz#b0ae6c4347f60c03e849a05d34e5bf747c9bda05"
+ integrity sha512-oOdAkwqUfW1WqpwSYJce06wvt6HljgY3fGeM9NcVA1HaYOij3mZG9Rkysn0OHuyUAGMbEbARIpsG+LPVlBJ5/Q==
+ dependencies:
+ "@babel/parser" "^7.25.3"
+ "@vue/shared" "3.5.13"
+ entities "^4.5.0"
+ estree-walker "^2.0.2"
+ source-map-js "^1.2.0"
"@vue/compiler-dom@^3.2.26":
- version "3.2.45"
- resolved "https://registry.yarnpkg.com/@vue/compiler-dom/-/compiler-dom-3.2.45.tgz#c43cc15e50da62ecc16a42f2622d25dc5fd97dce"
- integrity sha512-tyYeUEuKqqZO137WrZkpwfPCdiiIeXYCcJ8L4gWz9vqaxzIQRccTSwSWZ/Axx5YR2z+LvpUbmPNXxuBU45lyRw==
+ version "3.5.13"
+ resolved "https://registry.yarnpkg.com/@vue/compiler-dom/-/compiler-dom-3.5.13.tgz#bb1b8758dbc542b3658dda973b98a1c9311a8a58"
+ integrity sha512-ZOJ46sMOKUjO3e94wPdCzQ6P1Lx/vhp2RSvfaab88Ajexs0AHeV0uasYhi99WPaogmBlRHNRuly8xV75cNTMDA==
dependencies:
- "@vue/compiler-core" "3.2.45"
- "@vue/shared" "3.2.45"
+ "@vue/compiler-core" "3.5.13"
+ "@vue/shared" "3.5.13"
"@vue/component-compiler-utils@^3.1.0":
version "3.1.0"
@@ -4872,9 +5022,9 @@
vue-template-es2015-compiler "^1.9.0"
"@vue/composition-api@^1.4.6":
- version "1.7.1"
- resolved "https://registry.yarnpkg.com/@vue/composition-api/-/composition-api-1.7.1.tgz#aa6831be5a12817d93e89e247460c310dd7a3a32"
- integrity sha512-xDWoEtxGXhH9Ku3ROYX/rzhcpt4v31hpPU5zF3UeVC/qxA3dChmqU8zvTUYoKh3j7rzpNsoFOwqsWG7XPMlaFA==
+ version "1.7.2"
+ resolved "https://registry.yarnpkg.com/@vue/composition-api/-/composition-api-1.7.2.tgz#0b656f3ec39fefc2cf40aaa8c12426bcfeae1b44"
+ integrity sha512-M8jm9J/laYrYT02665HkZ5l2fWTK4dcVg3BsDHm/pfz+MjDYwX+9FUaZyGwEyXEDonQYRCo0H7aLgdklcIELjw==
"@vue/eslint-config-prettier@~6.0.0":
version "6.0.0"
@@ -4884,15 +5034,15 @@
eslint-config-prettier "^6.0.0"
"@vue/reactivity-transform@^3.2.26":
- version "3.2.45"
- resolved "https://registry.yarnpkg.com/@vue/reactivity-transform/-/reactivity-transform-3.2.45.tgz#07ac83b8138550c83dfb50db43cde1e0e5e8124d"
- integrity sha512-BHVmzYAvM7vcU5WmuYqXpwaBHjsS8T63jlKGWVtHxAHIoMIlmaMyurUSEs1Zcg46M4AYT5MtB1U274/2aNzjJQ==
+ version "3.3.13"
+ resolved "https://registry.yarnpkg.com/@vue/reactivity-transform/-/reactivity-transform-3.3.13.tgz#dc8e9be961865dc666e367e1aaaea0716afa5c90"
+ integrity sha512-oWnydGH0bBauhXvh5KXUy61xr9gKaMbtsMHk40IK9M4gMuKPJ342tKFarY0eQ6jef8906m35q37wwA8DMZOm5Q==
dependencies:
- "@babel/parser" "^7.16.4"
- "@vue/compiler-core" "3.2.45"
- "@vue/shared" "3.2.45"
+ "@babel/parser" "^7.23.5"
+ "@vue/compiler-core" "3.3.13"
+ "@vue/shared" "3.3.13"
estree-walker "^2.0.2"
- magic-string "^0.25.7"
+ magic-string "^0.30.5"
"@vue/server-test-utils@~1.0.0-beta.31":
version "1.0.0-beta.32"
@@ -4902,10 +5052,15 @@
"@types/cheerio" "^0.22.10"
cheerio "^1.0.0-rc.2"
-"@vue/shared@3.2.45", "@vue/shared@^3.2.26":
- version "3.2.45"
- resolved "https://registry.yarnpkg.com/@vue/shared/-/shared-3.2.45.tgz#a3fffa7489eafff38d984e23d0236e230c818bc2"
- integrity sha512-Ewzq5Yhimg7pSztDV+RH1UDKBzmtqieXQlpTVm2AwraoRL/Rks96mvd8Vgi7Lj+h+TH8dv7mXD3FRZR3TUvbSg==
+"@vue/shared@3.3.13":
+ version "3.3.13"
+ resolved "https://registry.yarnpkg.com/@vue/shared/-/shared-3.3.13.tgz#4cb73cda958d77ffd389c8640cf7d93a10ac676f"
+ integrity sha512-/zYUwiHD8j7gKx2argXEMCUXVST6q/21DFU0sTfNX0URJroCe3b1UF6vLJ3lQDfLNIiiRl2ONp7Nh5UVWS6QnA==
+
+"@vue/shared@3.5.13", "@vue/shared@^3.2.26":
+ version "3.5.13"
+ resolved "https://registry.yarnpkg.com/@vue/shared/-/shared-3.5.13.tgz#87b309a6379c22b926e696893237826f64339b6f"
+ integrity sha512-/hnE/qP5ZoGpol0a5mDi45bOd7t3tjYJBjsgCsivow7D48cJeV5l05RD82lPqi7gRiphZM37rnhW1l6ZoCNNnQ==
"@vue/test-utils@1.3.4":
version "1.3.4"
@@ -8963,6 +9118,11 @@ entities@^4.4.0:
resolved "https://registry.yarnpkg.com/entities/-/entities-4.4.0.tgz#97bdaba170339446495e653cfd2db78962900174"
integrity sha512-oYp7156SP8LkeGD0GF85ad1X9Ai79WtRsZ2gxJqtBuzH+98YUV6jkHEKlZkMbcrjJjIVJNIDP/3WL9wQkoPbWA==
+entities@^4.5.0:
+ version "4.5.0"
+ resolved "https://registry.yarnpkg.com/entities/-/entities-4.5.0.tgz#5d268ea5e7113ec74c4d033b79ea5a35a488fb48"
+ integrity sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw==
+
env-ci@3.2.2:
version "3.2.2"
resolved "https://registry.yarnpkg.com/env-ci/-/env-ci-3.2.2.tgz#06936f1fcfbc999102a2211fc2539df64062b61f"
@@ -13316,11 +13476,18 @@ lru_map@^0.3.3:
integrity sha1-tcg1G5Rky9dQM1p5ZQoOwOVhGN0=
magic-string@^0.25.7:
- version "0.25.7"
- resolved "https://registry.yarnpkg.com/magic-string/-/magic-string-0.25.7.tgz#3f497d6fd34c669c6798dcb821f2ef31f5445051"
- integrity sha512-4CrMT5DOHTDk4HYDlzmwu4FVCcIYI8gauveasrdCu2IKIFOJ3f0v/8MDGJCDL9oD2ppz/Av1b0Nj345H9M+XIA==
+ version "0.25.9"
+ resolved "https://registry.yarnpkg.com/magic-string/-/magic-string-0.25.9.tgz#de7f9faf91ef8a1c91d02c2e5314c8277dbcdd1c"
+ integrity sha512-RmF0AsMzgt25qzqqLc1+MbHmhdx0ojF2Fvs4XnOqz2ZOBXzzkEwc/dJQZCYHAn7v1jbVOjAZfK8msRn4BxO4VQ==
dependencies:
- sourcemap-codec "^1.4.4"
+ sourcemap-codec "^1.4.8"
+
+magic-string@^0.30.5:
+ version "0.30.17"
+ resolved "https://registry.yarnpkg.com/magic-string/-/magic-string-0.30.17.tgz#450a449673d2460e5bbcfba9a61916a1714c7453"
+ integrity sha512-sNPKHvyjVf7gyjwS4xGTaW/mCnF8wnjtifKBEhxfZ7E/S8tQ0rssrwGNn6q8JH/ohItJfSQp9mBtQYuTlH5QnA==
+ dependencies:
+ "@jridgewell/sourcemap-codec" "^1.5.0"
make-dir@^1.0.0:
version "1.3.0"
@@ -17654,6 +17821,11 @@ source-list-map@^2.0.0:
resolved "https://registry.yarnpkg.com/source-map-js/-/source-map-js-1.2.0.tgz#16b809c162517b5b8c3e7dcd315a2a5c2612b2af"
integrity sha512-itJW8lvSA0TXEphiRoawsCksnlf8SyvmFzIhltqAHluXd88pkCd+cXJVHTDwdCr0IzwptSm035IHQktUu1QUMg==
+source-map-js@^1.0.2, source-map-js@^1.2.0:
+ version "1.2.1"
+ resolved "https://registry.yarnpkg.com/source-map-js/-/source-map-js-1.2.1.tgz#1ce5650fddd87abc099eda37dcff024c2667ae46"
+ integrity sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==
+
source-map-resolve@^0.5.0:
version "0.5.3"
resolved "https://registry.yarnpkg.com/source-map-resolve/-/source-map-resolve-0.5.3.tgz#190866bece7553e1f8f267a2ee82c606b5509a1a"
@@ -17701,7 +17873,7 @@ source-map@^0.6.0, source-map@^0.6.1, source-map@~0.6.0, source-map@~0.6.1:
resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.6.1.tgz#74722af32e9614e9c287a8d0bbde48b5e2f1a263"
integrity sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==
-sourcemap-codec@^1.4.4:
+sourcemap-codec@^1.4.8:
version "1.4.8"
resolved "https://registry.yarnpkg.com/sourcemap-codec/-/sourcemap-codec-1.4.8.tgz#ea804bd94857402e6992d05a38ef1ae35a9ab4c4"
integrity sha512-9NykojV5Uih4lgo5So5dtw+f0JgJX30KCNI8gwhz2J9A15wD0Ml6tjHKwf6fTSa6fAdVBdZeNOs9eJ71qCk8vA==
@@ -18853,7 +19025,12 @@ tslib@^1, tslib@^1.10.0, tslib@^1.8.1, tslib@^1.9.0, tslib@^1.9.3:
resolved "https://registry.yarnpkg.com/tslib/-/tslib-1.14.1.tgz#cf2d38bdc34a134bcaf1091c41f6619e2f672d00"
integrity sha512-Xni35NKzjgMrwevysHTCArtLDpPvye8zV/0E4EyYn43P7/7qvQwPh9BGkHewbMulVntbigmcT7rdX3BNo9wRJg==
-tslib@^2.3.1, tslib@^2.6.2:
+tslib@^2.3.1:
+ version "2.8.1"
+ resolved "https://registry.yarnpkg.com/tslib/-/tslib-2.8.1.tgz#612efe4ed235d567e8aba5f2a5fab70280ade83f"
+ integrity sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==
+
+tslib@^2.6.2:
version "2.6.2"
resolved "https://registry.yarnpkg.com/tslib/-/tslib-2.6.2.tgz#703ac29425e7b37cd6fd456e92404d46d1f3e4ae"
integrity sha512-AEYxH93jGFPn/a2iVAwW87VuUIkR1FVUKB77NwMF7nBTDkDrrT/Hpt/IrCJ0QXhW27jTBDcf5ZY7w6RiqTMw2Q==