Merge branch 'master' into 6179-add-script-to-set-neo4j-in-offline-mode-and-back

This commit is contained in:
Wolfgang Huß 2023-11-01 15:14:49 +01:00 committed by GitHub
commit e8c5fa3696
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
19 changed files with 195 additions and 80 deletions

View File

@ -26,7 +26,7 @@ In other words, we are interested in a network of networks and in keeping the da
<img src="https://user-images.githubusercontent.com/17728384/218597429-554e4082-3906-4721-8f68-0c13146fc218.png" alt="Post feed" title="Post feed" />
Check out more screenshots [here](/wiki/en:Screenshots).
Check out more screenshots [here](https://github.com/Ocelot-Social-Community/Ocelot-Social/wiki/en:Screenshots).
## Features
@ -40,14 +40,14 @@ Ocelot.social networks feature:
* <strong>filters</strong>
* and more …
Check out the [full feature list](/wiki/en:FAQ#what-are-the-features).
Check out the [full feature list](https://github.com/Ocelot-Social-Community/Ocelot-Social/wiki/en:FAQ#what-are-the-features).
## User Guide and Frequently Asked Questions
In the [wiki](/wiki) you can find more information.
In the [wiki](https://github.com/Ocelot-Social-Community/Ocelot-Social/wiki) you can find more information.
* [User Guide](/wiki/en:User-Guide)
* [Frequently Asked Questions](/wiki/en:FAQ)
* [User Guide](https://github.com/Ocelot-Social-Community/Ocelot-Social/wiki/en:User-Guide)
* [Frequently Asked Questions](https://github.com/Ocelot-Social-Community/Ocelot-Social/wiki/en:FAQ)
## Demo

View File

@ -31,7 +31,8 @@ const { parsed } = dotenv.config({ path: '../backend/.env' })
module.exports = defineConfig({
e2e: {
projectId: "qa7fe2",
defaultCommandTimeout: 10000,
defaultCommandTimeout: 60000,
pageLoadTimeout:180000,
chromeWebSecurity: false,
baseUrl: "http://localhost:3000",
specPattern: "cypress/e2e/**/*.feature",

View File

@ -0,0 +1,58 @@
#!/bin/bash
# !!! never tested !!!
# base setup
SCRIPT_PATH=$(realpath $0)
SCRIPT_DIR=$(dirname $SCRIPT_PATH)
# check CONFIGURATION
if [ -z ${CONFIGURATION} ]; then
echo "You must provide a `CONFIGURATION` via environment variable"
exit 1
fi
echo "Using CONFIGURATION=${CONFIGURATION}"
# configuration
KUBECONFIG=${KUBECONFIG:-${SCRIPT_DIR}/../configurations/${CONFIGURATION}/kubeconfig.yaml}
VALUES=${SCRIPT_DIR}/../configurations/${CONFIGURATION}/kubernetes/values.yaml
DOCKERHUB_OCELOT_TAG=${DOCKERHUB_OCELOT_TAG:-"latest"}
## install Ingress-Nginx
helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx
helm repo update
helm install \
ingress-nginx ingress-nginx/ingress-nginx \
--kubeconfig=${KUBECONFIG} \
-f ${SCRIPT_DIR}/../src/kubernetes/nginx.values.yaml
## install Cert-Manager
helm repo add jetstack https://charts.jetstack.io
helm repo update
helm install \
cert-manager jetstack/cert-manager \
--kubeconfig=${KUBECONFIG} \
--namespace cert-manager \
--create-namespace \
--version v1.13.1 \
--set installCRDs=true
## install Ocelot with helm
helm install \
ocelot \
--kubeconfig=${KUBECONFIG} \
--values ${VALUES} \
--set appVersion="${DOCKERHUB_OCELOT_TAG}" \
${SCRIPT_DIR}/../src/kubernetes/ \
--timeout 10m
## set Neo4j database indexes, constrains, and initial admin account plus run migrate up
kubectl --kubeconfig=${KUBECONFIG} \
-n default \
exec -it \
$(kubectl --kubeconfig=${KUBECONFIG} -n default get pods | grep ocelot-backend | awk '{ print $1 }') -- \
/bin/sh -c "yarn prod:migrate init && yarn prod:migrate up"
# /bin/sh -c "node --experimental-repl-await build/src/db/clean.js && node --experimental-repl-await build/src/db/seed.js"
echo "!!! You must install a firewall or similar !!! (for DigitalOcean see: deployment/src/kubernetes/README.md)"

View File

@ -75,7 +75,7 @@ The IPs of the DigitalOcean machines are not necessarily stable, so the cluster'
## Deploy
Yeah, you're done here. Back to [Deployment with Helm for Kubernetes](/deployment/kubernetes/README.md).
Yeah, you're done here. Back to [Deployment with Helm for Kubernetes](/deployment/src/kubernetes/README.md).
## Backups On DigitalOcean

View File

@ -9,7 +9,7 @@ Please contact us if you are interested in options not listed below.
Managed Kubernetes:
- [DigitalOcean](/deployment/kubernetes/DigitalOcean.md)
- [DigitalOcean](/deployment/src/kubernetes/DigitalOcean.md)
## Configuration
@ -46,29 +46,20 @@ Please have a look here:
- [Installing with Helm](https://cert-manager.io/docs/installation/helm/#installing-with-helm)
Our Helm installation is optimized for cert-manager version `v1.9.1` and `kubectl` version `"v1.24.2`.
Our Helm installation is optimized for cert-manager version `v1.13.1` and `kubectl` version `"v1.28.2`.
Please search here for cert-manager versions that are compatible with your `kubectl` version on the cluster and on the client: [cert-manager Supported Releases](https://cert-manager.io/docs/installation/supported-releases/#supported-releases).
***ATTENTION:*** *When uninstalling cert-manager, be sure to use the same method as for installation! Otherwise, we could end up in a broken state, see [Uninstall](https://cert-manager.io/docs/installation/kubectl/#uninstalling).*
<!-- #### 1. Create Namespace
```bash
# kubeconfig.yaml set globaly
$ kubectl create namespace cert-manager
# or kubeconfig.yaml in your repo, then adjust
$ kubectl --kubeconfig=/../kubeconfig.yaml create namespace cert-manager
```
#### 2. Add Helm repository and update
<!-- #### 1. Add Helm repository and update
```bash
$ helm repo add jetstack https://charts.jetstack.io
$ helm repo update
```
#### 3. Install Cert-Manager Helm chart
#### 2. Install Cert-Manager Helm chart
```bash
# option 1
@ -76,16 +67,24 @@ $ helm repo update
# $ kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.3.1/cert-manager.crds.yaml
# option 2
# !!! untested for now for new deployment structure !!!
# in configuration/<deployment-name>
# kubeconfig.yaml set globaly
$ helm install cert-manager jetstack/cert-manager \
$ helm install \
cert-manager jetstack/cert-manager \
--namespace cert-manager \
--version v1.9.1 \
--create-namespace \
--version v1.13.1 \
--set installCRDs=true
# or kubeconfig.yaml in your repo, then adjust
$ helm --kubeconfig=/../kubeconfig.yaml \
install cert-manager jetstack/cert-manager \
$ helm install \
cert-manager jetstack/cert-manager \
--kubeconfig ./kubeconfig.yaml \
--namespace cert-manager \
--version v1.9.1 \
--create-namespace \
--version v1.13.1 \
--set installCRDs=true
``` -->
@ -101,10 +100,15 @@ $ helm repo update
#### 2. Install ingress-nginx
```bash
# in configuration/<deployment-name>
# kubeconfig.yaml set globaly
$ helm install ingress-nginx ingress-nginx/ingress-nginx -f nginx.values.yaml
helm install ingress-nginx ingress-nginx/ingress-nginx -f ../../src/kubernetes/nginx.values.yaml
# or kubeconfig.yaml in your repo, then adjust
$ helm --kubeconfig=/../kubeconfig.yaml install ingress-nginx ingress-nginx/ingress-nginx -f nginx.values.yaml
helm install \
ingress-nginx ingress-nginx/ingress-nginx -f ../../src/kubernetes/nginx.values.yaml \
--kubeconfig ./kubeconfig.yaml
```
### DigitalOcean Firewall
@ -159,6 +163,8 @@ $ doctl compute firewall get <ID> --context <context-name>
### DNS
***ATTENTION:** This seems not to work at all so we leave it away at the moment*
***TODO:** I thought this is necessary if we use the DigitalOcean DNS management service? See [Manage DNS With DigitalOcean](/deployment/kubernetes/DigitalOcean.md#manage-dns-with-digitalocean)*
This chart is only necessary (recommended is more precise) if you run DigitalOcean without load balancer.
@ -174,6 +180,8 @@ $ helm repo update
#### 2. Install DNS
```bash
# !!! untested for now for new deployment structure !!!
# kubeconfig.yaml set globaly
$ helm install dns bitnami/external-dns -f dns.values.yaml
# or kubeconfig.yaml in your repo, then adjust
@ -191,10 +199,22 @@ All commands for ocelot need to be executed in the kubernetes folder. Therefore
Only run once for the first time of installation:
```bash
# in configuration/<deployment-name>
# kubeconfig.yaml set globaly
$ helm install ocelot ./
helm install ocelot \
--values ./kubernetes/values.yaml \
--set appVersion="latest" \
../../src/kubernetes/ \
--timeout 10m
# or kubeconfig.yaml in your repo, then adjust
$ helm --kubeconfig=/../kubeconfig.yaml install ocelot ./
helm install ocelot \
--kubeconfig ./kubeconfig.yaml \
--values ./kubernetes/values.yaml \
--set appVersion="latest" \
../../src/kubernetes/ \
--timeout 10m
```
#### Upgrade & Update
@ -202,10 +222,24 @@ $ helm --kubeconfig=/../kubeconfig.yaml install ocelot ./
Run for all upgrades and updates:
```bash
# !!! untested for now for new deployment structure !!!
# in configuration/<deployment-name>
# kubeconfig.yaml set globaly
$ helm upgrade ocelot ./
helm upgrade ocelot \
--values ./kubernetes/values.yaml \
--set appVersion="latest" \
../../src/kubernetes/ \
--timeout 10m
# or kubeconfig.yaml in your repo, then adjust
$ helm --kubeconfig=/../kubeconfig.yaml upgrade ocelot ./
helm upgrade ocelot \
--kubeconfig ./kubeconfig.yaml \
--values ./kubernetes/values.yaml \
--set appVersion="latest" \
../../src/kubernetes/ \
--timeout 10m
```
#### Rollback
@ -213,10 +247,17 @@ $ helm --kubeconfig=/../kubeconfig.yaml upgrade ocelot ./
Run for a rollback, in case something went wrong:
```bash
# !!! untested for now for new deployment structure !!!
# in configuration/<deployment-name>
# kubeconfig.yaml set globaly
$ helm rollback ocelot
helm rollback ocelot --timeout 10m
# or kubeconfig.yaml in your repo, then adjust
$ helm --kubeconfig=/../kubeconfig.yaml rollback ocelot
helm rollback ocelot \
--kubeconfig ./kubeconfig.yaml \
--timeout 10m
```
#### Uninstall
@ -224,10 +265,17 @@ $ helm --kubeconfig=/../kubeconfig.yaml rollback ocelot
Be aware that if you uninstall ocelot the formerly bound volumes become unbound. Those volumes contain all data from uploads and database. You have to manually free their reference in order to bind them again when reinstalling. Once unbound from their former container references they should automatically be rebound (considering the sizes did not change)
```bash
# !!! untested for now for new deployment structure !!!
# in configuration/<deployment-name>
# kubeconfig.yaml set globaly
$ helm uninstall ocelot
helm uninstall ocelot --timeout 10m
# or kubeconfig.yaml in your repo, then adjust
$ helm --kubeconfig=/../kubeconfig.yaml uninstall ocelot
helm uninstall ocelot \
--kubeconfig ./kubeconfig.yaml \
--timeout 10m
```
## Backups
@ -292,8 +340,11 @@ $ kubectl -n default rollout status deployment/ocelot-neo4j --timeout=240s
# !!! be aware of the correct kube context !!!
$ kubectl config get-contexts
# reset and seed Neo4j database via backend for staging
# for staging: reset and seed Neo4j database via backend
$ kubectl -n default exec -it $(kubectl -n default get pods | grep ocelot-backend | awk '{ print $1 }') -- /bin/sh -c "node --experimental-repl-await build/src/db/clean.js && node --experimental-repl-await build/src/db/seed.js"
# or alternatively
# for production: set Neo4j database indexes, constrains, and initial admin account plus run migrate up via backend
$ kubectl -n default exec -it $(kubectl -n default get pods | grep ocelot-backend | awk '{ print $1 }') -- /bin/sh -c "yarn prod:migrate init && yarn prod:migrate up"
```

View File

@ -46,7 +46,7 @@ describe('DonationInfo.vue', () => {
// it looks to me that toLocaleString for some reason is not working as expected
it.skip('creates a label from the given amounts and a translation string', () => {
expect(mocks.$t).nthCalledWith(1, 'donations.amount-of-total', {
expect(mocks.$t).toHaveBeenNthCalledWith(1, 'donations.amount-of-total', {
amount: '10.000',
total: '50.000',
})
@ -55,7 +55,7 @@ describe('DonationInfo.vue', () => {
describe('given english locale', () => {
it('creates a label from the given amounts and a translation string', () => {
expect(mocks.$t).toBeCalledWith(
expect(mocks.$t).toHaveBeenCalledWith(
'donations.amount-of-total',
expect.objectContaining({
amount: '10,000',

View File

@ -3,7 +3,7 @@
<div class="filter-menu-options">
<div class="filter-header">
<h2 class="title">{{ $t('filter-menu.filter-by') }}</h2>
<div class="item-save-topics">
<div v-if="categoriesActive" class="item-save-topics">
<labeled-button
filled
:label="$t('actions.saveCategories')"
@ -62,18 +62,20 @@ export default {
},
methods: {
saveCategories() {
this.$apollo
.mutate({
mutation: SaveCategories(),
variables: { activeCategories: this.filteredCategoryIds },
})
.then(() => {
this.$emit('showFilterMenu')
this.$toast.success(this.$t('filter-menu.save.success'))
})
.catch(() => {
this.$toast.error(this.$t('filter-menu.save.error'))
})
if (this.categoriesActive) {
this.$apollo
.mutate({
mutation: SaveCategories(),
variables: { activeCategories: this.filteredCategoryIds },
})
.then(() => {
this.$emit('showFilterMenu')
this.$toast.success(this.$t('filter-menu.save.success'))
})
.catch(() => {
this.$toast.error(this.$t('filter-menu.save.error'))
})
}
},
},
}

View File

@ -94,20 +94,20 @@ describe('GroupMember', () => {
describe('with server error', () => {
it('toasts an error message', () => {
expect(toastErrorMock).toBeCalledWith('Oh no!')
expect(toastErrorMock).toHaveBeenCalledWith('Oh no!')
})
})
describe('with server success', () => {
it('calls the API', () => {
expect(apolloMock).toBeCalledWith({
expect(apolloMock).toHaveBeenCalledWith({
mutation: changeGroupMemberRoleMutation(),
variables: { groupId: 'group-id', userId: 'user', roleInGroup: 'admin' },
})
})
it('toasts a success message', () => {
expect(toastSuccessMock).toBeCalledWith('group.changeMemberRole')
expect(toastSuccessMock).toHaveBeenCalledWith('group.changeMemberRole')
})
})
})
@ -150,7 +150,7 @@ describe('GroupMember', () => {
})
it('toasts an error message', () => {
expect(toastErrorMock).toBeCalledWith('Oh no!!')
expect(toastErrorMock).toHaveBeenCalledWith('Oh no!!')
})
it('closes the modal', () => {
@ -165,7 +165,7 @@ describe('GroupMember', () => {
})
it('calls the API', () => {
expect(apolloMock).toBeCalledWith({
expect(apolloMock).toHaveBeenCalledWith({
mutation: removeUserFromGroupMutation(),
variables: { groupId: 'group-id', userId: 'user' },
})
@ -176,7 +176,7 @@ describe('GroupMember', () => {
})
it('toasts a success message', () => {
expect(toastSuccessMock).toBeCalledWith('group.memberRemoved')
expect(toastSuccessMock).toHaveBeenCalledWith('group.memberRemoved')
})
it('closes the modal', () => {

View File

@ -75,8 +75,8 @@ describe('LoginForm', () => {
describe('no categories saved', () => {
it('resets the categories', async () => {
await fillIn(Wrapper())
expect(storeMocks.mutations['posts/RESET_CATEGORIES']).toBeCalled()
expect(storeMocks.mutations['posts/TOGGLE_CATEGORY']).not.toBeCalled()
expect(storeMocks.mutations['posts/RESET_CATEGORIES']).toHaveBeenCalled()
expect(storeMocks.mutations['posts/TOGGLE_CATEGORY']).not.toHaveBeenCalled()
})
})
@ -84,11 +84,11 @@ describe('LoginForm', () => {
it('sets the categories', async () => {
authUserMock.mockReturnValue({ activeCategories: ['cat1', 'cat9', 'cat12'] })
await fillIn(Wrapper())
expect(storeMocks.mutations['posts/RESET_CATEGORIES']).toBeCalled()
expect(storeMocks.mutations['posts/TOGGLE_CATEGORY']).toBeCalledTimes(3)
expect(storeMocks.mutations['posts/TOGGLE_CATEGORY']).toBeCalledWith({}, 'cat1')
expect(storeMocks.mutations['posts/TOGGLE_CATEGORY']).toBeCalledWith({}, 'cat9')
expect(storeMocks.mutations['posts/TOGGLE_CATEGORY']).toBeCalledWith({}, 'cat12')
expect(storeMocks.mutations['posts/RESET_CATEGORIES']).toHaveBeenCalled()
expect(storeMocks.mutations['posts/TOGGLE_CATEGORY']).toHaveBeenCalledTimes(3)
expect(storeMocks.mutations['posts/TOGGLE_CATEGORY']).toHaveBeenCalledWith({}, 'cat1')
expect(storeMocks.mutations['posts/TOGGLE_CATEGORY']).toHaveBeenCalledWith({}, 'cat9')
expect(storeMocks.mutations['posts/TOGGLE_CATEGORY']).toHaveBeenCalledWith({}, 'cat12')
})
})
})

View File

@ -73,7 +73,7 @@ describe('PostTeaser', () => {
it('has no validation errors', () => {
const spy = jest.spyOn(global.console, 'error')
Wrapper()
expect(spy).not.toBeCalled()
expect(spy).not.toHaveBeenCalled()
spy.mockReset()
})

View File

@ -114,11 +114,11 @@ describe('PostIndex', () => {
})
it('resets the category filter', () => {
expect(mutations['posts/RESET_CATEGORIES']).toBeCalled()
expect(mutations['posts/RESET_CATEGORIES']).toHaveBeenCalled()
})
it('sets the category', () => {
expect(mutations['posts/TOGGLE_CATEGORY']).toBeCalledWith({}, 'cat3')
expect(mutations['posts/TOGGLE_CATEGORY']).toHaveBeenCalledWith({}, 'cat3')
})
})
})

View File

@ -19,7 +19,7 @@
</client-only>
</div>
<div>
<div v-if="categoriesActive && SHOW_CONTENT_FILTER_MASONRY_GRID" class="top-filter-menu">
<div v-if="SHOW_CONTENT_FILTER_MASONRY_GRID" class="top-filter-menu">
<div class="filterButtonBox">
<div class="filterButtonMenu" :class="{ 'hide-filter': hideByScroll }">
<base-button

View File

@ -71,7 +71,7 @@ describe('Login.vue', () => {
asyncData = true
tosVersion = '0.0.4'
wrapper = await Wrapper()
expect(redirect).toBeCalledWith('/')
expect(redirect).toHaveBeenCalledWith('/')
})
})
})

View File

@ -36,8 +36,8 @@ describe('logout.vue', () => {
})
it('logs out and redirects to login', () => {
expect(mocks.$store.dispatch).toBeCalledWith('auth/logout')
expect(mocks.$router.replace).toBeCalledWith('/login')
expect(mocks.$store.dispatch).toHaveBeenCalledWith('auth/logout')
expect(mocks.$router.replace).toHaveBeenCalledWith('/login')
})
})
})

View File

@ -114,19 +114,19 @@ describe('map', () => {
})
it('initializes on style load', () => {
expect(mapOnMock).toBeCalledWith('style.load', expect.any(Function))
expect(mapOnMock).toHaveBeenCalledWith('style.load', expect.any(Function))
})
it('initializes on mouseenter', () => {
expect(mapOnMock).toBeCalledWith('mouseenter', 'markers', expect.any(Function))
expect(mapOnMock).toHaveBeenCalledWith('mouseenter', 'markers', expect.any(Function))
})
it('initializes on mouseleave', () => {
expect(mapOnMock).toBeCalledWith('mouseleave', 'markers', expect.any(Function))
expect(mapOnMock).toHaveBeenCalledWith('mouseleave', 'markers', expect.any(Function))
})
it('calls add map control', () => {
expect(mapAddControlMock).toBeCalled()
expect(mapAddControlMock).toHaveBeenCalled()
})
describe('trigger style load event', () => {
@ -137,7 +137,7 @@ describe('map', () => {
})
it('calls loadMarkersIconsAndAddMarkers', () => {
expect(spy).toBeCalled()
expect(spy).toHaveBeenCalled()
})
})

View File

@ -68,7 +68,7 @@ describe('password-reset.vue', () => {
asyncData = true
isLoggedIn = true
wrapper = await Wrapper()
expect(redirect).toBeCalledWith('/')
expect(redirect).toHaveBeenCalledWith('/')
})
})
})

View File

@ -76,7 +76,10 @@ describe('post/_id.vue', () => {
authorId = 'some-author'
userId = 'some-user'
wrapper = await Wrapper()
expect(error).toBeCalledWith({ message: 'error-pages.cannot-edit-post', statusCode: 403 })
expect(error).toHaveBeenCalledWith({
message: 'error-pages.cannot-edit-post',
statusCode: 403,
})
})
it('renders with asyncData of same user', async () => {

View File

@ -327,7 +327,7 @@ describe('Registration', () => {
asyncData = true
isLoggedIn = true
wrapper = await Wrapper()
expect(redirect).toBeCalledWith('/')
expect(redirect).toHaveBeenCalledWith('/')
})
// copied from webapp/components/Registration/Signup.spec.js as testing template

View File

@ -71,7 +71,7 @@ describe('terms-and-conditions-confirm.vue', () => {
asyncData = true
tosAgree = true
wrapper = await Wrapper()
expect(redirect).toBeCalledWith('/')
expect(redirect).toHaveBeenCalledWith('/')
})
})
})