Update docs for deploying new server, env variables

This commit is contained in:
mattwr18 2019-11-11 11:10:02 +01:00
parent 1a685732ce
commit d105739c0b
12 changed files with 270 additions and 221 deletions

View File

@ -3,77 +3,64 @@ language: generic
addons:
apt:
packages:
- libgconf-2-4
- libgconf-2-4
snaps:
- docker
- chromium
- docker
- chromium
before_install:
- yarn global add wait-on
# Install Codecov
- yarn install
- cp cypress.env.template.json cypress.env.json
- yarn global add wait-on
- yarn install
- cp cypress.env.template.json cypress.env.json
install:
- docker-compose -f docker-compose.yml build --parallel
- docker-compose -f docker-compose.yml -f docker-compose.build-and-test.yml build # just tagging, just be quite fast
- docker-compose -f docker-compose.yml -f docker-compose.build-and-test.yml up -d
- wait-on http://localhost:7474
- docker-compose -f docker-compose.yml -f docker-compose.build-and-test.yml exec neo4j db_setup
- docker-compose -f docker-compose.yml build --parallel
- docker-compose -f docker-compose.yml -f docker-compose.build-and-test.yml build
- docker-compose -f docker-compose.yml -f docker-compose.build-and-test.yml up -d
- wait-on http://localhost:7474
- docker-compose -f docker-compose.yml -f docker-compose.build-and-test.yml exec
neo4j db_setup
script:
- export CYPRESS_RETRIES=1
- export BRANCH=$(if [ "$TRAVIS_PULL_REQUEST" == "false" ]; then echo $TRAVIS_BRANCH; else echo $TRAVIS_PULL_REQUEST_BRANCH; fi)
- echo "TRAVIS_BRANCH=$TRAVIS_BRANCH, PR=$PR, BRANCH=$BRANCH"
# Backend
- docker-compose exec backend yarn run lint
- docker-compose exec backend yarn run test --ci --verbose=false --coverage
- docker-compose exec backend yarn run db:seed
- docker-compose exec backend yarn run db:reset
# ActivityPub cucumber testing temporarily disabled because it's too buggy
# - docker-compose exec backend yarn run test:cucumber --tags "not @wip"
# - docker-compose exec backend yarn run db:reset
# - docker-compose exec backend yarn run db:seed
# Frontend
- docker-compose exec webapp yarn run lint
- docker-compose exec webapp yarn run test --ci --verbose=false --coverage
# Fullstack
- docker-compose down
- docker-compose -f docker-compose.yml up -d
- wait-on http://localhost:7474
- yarn run cypress:run
# Coverage
- yarn run codecov
- export CYPRESS_RETRIES=1
- export BRANCH=$(if [ "$TRAVIS_PULL_REQUEST" == "false" ]; then echo $TRAVIS_BRANCH;
else echo $TRAVIS_PULL_REQUEST_BRANCH; fi)
- echo "TRAVIS_BRANCH=$TRAVIS_BRANCH, PR=$PR, BRANCH=$BRANCH"
- docker-compose exec backend yarn run lint
- docker-compose exec backend yarn run test --ci --verbose=false --coverage
- docker-compose exec backend yarn run db:seed
- docker-compose exec backend yarn run db:reset
- docker-compose exec webapp yarn run lint
- docker-compose exec webapp yarn run test --ci --verbose=false --coverage
- docker-compose down
- docker-compose -f docker-compose.yml up -d
- wait-on http://localhost:7474
- yarn run cypress:run
- yarn run codecov
after_success:
- wget https://raw.githubusercontent.com/DiscordHooks/travis-ci-discord-webhook/master/send.sh
- chmod +x send.sh
- ./send.sh success $WEBHOOK_URL
- if [ $TRAVIS_BRANCH == "master" ] && [ $TRAVIS_EVENT_TYPE == "push" ]; then
wget https://raw.githubusercontent.com/Human-Connection/Discord-Bot/develop/tester.sh &&
chmod +x tester.sh &&
./tester.sh staging $WEBHOOK_URL;
fi
- wget https://raw.githubusercontent.com/DiscordHooks/travis-ci-discord-webhook/master/send.sh
- chmod +x send.sh
- "./send.sh success $WEBHOOK_URL"
- if [ $TRAVIS_BRANCH == "master" ] && [ $TRAVIS_EVENT_TYPE == "push" ]; then wget
https://raw.githubusercontent.com/Human-Connection/Discord-Bot/develop/tester.sh
&& chmod +x tester.sh && ./tester.sh staging $WEBHOOK_URL; fi
after_failure:
- wget https://raw.githubusercontent.com/DiscordHooks/travis-ci-discord-webhook/master/send.sh
- chmod +x send.sh
- ./send.sh failure $WEBHOOK_URL
- wget https://raw.githubusercontent.com/DiscordHooks/travis-ci-discord-webhook/master/send.sh
- chmod +x send.sh
- "./send.sh failure $WEBHOOK_URL"
before_deploy:
- go get -u github.com/tcnksm/ghr
- ./scripts/setup_kubernetes.sh
- go get -u github.com/tcnksm/ghr
- "./scripts/setup_kubernetes.sh"
deploy:
- provider: script
script: scripts/docker_push.sh
on:
branch: master
- provider: script
script: scripts/deploy.sh
on:
branch: master
- provider: script
script: scripts/github_release.sh
on:
branch: master
- provider: script
script: scripts/docker_push.sh
on:
branch: master
- provider: script
script: scripts/deploy.sh
on:
branch: master
- provider: script
script: scripts/github_release.sh
on:
branch: master
env:
global:
secure: FBhnnSKb1iA4/RrvN/k5aXRaC570cLZMcTKMV7yB6X93JmVR6HWD0iCVkkqfoqyCf07ijQO5bzf051O6LzbFjIFGN4sMcfJEVZ4z/VvuljBT3aLvKrH0M89U0a9F1hAswffvynkLuw53EdLF7ZfaMpFtxyUJe+uTA11xYOhbSUg7pdVAQ0x4aGwe35YxRkXC/V0DKU91qHi/FPRin9oJSNj/QQusN2wCvEZdrBX+PhzFfZKAqie6NcoYcqZoFbs/8ZjkhzWlkr77yt9/n8WfaWftPG0VpB6UT4mFy8iSLqKkOVSNHMoWsjjuuDcuOVT2pAY0FXM8iuALBSzLEP8QCntex8rkOn4CtMRo0TvOB8jtD/QCuz7K5YwDhHRkh6kiAvxcIGmecDgSswX4iiVdLXf0P3gDvBWj/4TTmgv6YqEwFjnxC/QCAg04JHqiXwrwifKsqGZ2E7tu7l/hJ916IjfVw9jvjsFJsMCihoxLIWprBbIfyrfP/eBnXrYRtBy8wJw7asVMya8uKpuJPIWYVXDy95Nnr3cLtCZH2gJ4UUo5cWGWT/SsGhqynxidTrafJoWtgyDm/gOIgFE3zkYyIS0esAu7xX9aw/bsPUSlXUwaXuWB8ehWhT5gFali/D9ipQh5kzd1L+yYOg1PXvOFqab4JnrMLmARIzhBUbjmr8w=

View File

@ -5,7 +5,7 @@ The kubernetes dashboard is optional but very helpful for debugging. If you want
```bash
# in folder deployment/digital-ocean/
$ kubectl apply -f dashboard/
$ kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/master/aio/deploy/recommended/kubernetes-dashboard.yaml
$ kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-beta4/aio/deploy/recommended.yaml
```
### Login to your dashboard
@ -18,7 +18,7 @@ $ kubectl proxy
Visit:
[http://localhost:8001/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/](http://localhost:8001/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/)
[http://localhost:8001/api/v1/namespaces/kubernetes-dashboard/services/https:kubernetes-dashboard:/proxy/](http://localhost:8001/api/v1/namespaces/kubernetes-dashboard/services/https:kubernetes-dashboard:/proxy/)
You should see a login screen.

View File

@ -1,15 +1,16 @@
# Setup Ingress and HTTPS
Follow [this quick start guide](https://docs.cert-manager.io/en/latest/tutorials/acme/quick-start/index.html) and install certmanager via helm and tiller:
[This resource was also helpful](https://docs.cert-manager.io/en/latest/getting-started/install/kubernetes.html#installing-with-helm)
```text
```bash
$ kubectl create serviceaccount tiller --namespace=kube-system
$ kubectl create clusterrolebinding tiller-admin --serviceaccount=kube-system:tiller --clusterrole=cluster-admin
$ helm init --service-account=tiller
$ helm repo add jetstack https://charts.jetstack.io
$ helm repo update
$ helm install stable/nginx-ingress
$ kubectl apply -f https://raw.githubusercontent.com/jetstack/cert-manager/release-0.6/deploy/manifests/00-crds.yaml
$ helm install --name cert-manager --namespace cert-manager stable/cert-manager
$ kubectl apply -f https://raw.githubusercontent.com/jetstack/cert-manager/release-0.11/deploy/manifests/00-crds.yaml
$ helm install --name cert-manager --namespace cert-manager --version v0.11.0 jetstack/cert-manager
```
## Create Letsencrypt Issuers and Ingress Services

View File

@ -12,20 +12,20 @@ spec:
tls:
- hosts:
# - nitro-mailserver.human-connection.org
- nitro-staging.human-connection.org
- develop.human-connection.org
secretName: tls
rules:
- host: nitro-staging.human-connection.org
- host: develop.human-connection.org
http:
paths:
- path: /
backend:
serviceName: nitro-web
serviceName: web
servicePort: 3000
# - host: nitro-mailserver.human-connection.org
# http:
# paths:
# - path: /
# backend:
# serviceName: mailserver
# servicePort: 80
- host: mailserver.human-connection.org
http:
paths:
- path: /
backend:
serviceName: mailserver
servicePort: 80

View File

@ -1,47 +1,60 @@
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: nitro-backend
namespace: human-connection
spec:
replicas: 1
minReadySeconds: 15
progressDeadlineSeconds: 60
strategy:
rollingUpdate:
maxSurge: 0
maxUnavailable: "100%"
selector:
matchLabels:
apiVersion: apps/v1
kind: Deployment
metadata:
creationTimestamp: null
labels:
human-connection.org/commit: COMMIT
human-connection.org/selector: deployment-human-connection-backend
name: backend
namespace: human-connection
spec:
minReadySeconds: 15
progressDeadlineSeconds: 60
replicas: 1
revisionHistoryLimit: 2147483647
selector:
matchLabels:
human-connection.org/selector: deployment-human-connection-backend
strategy:
rollingUpdate:
maxSurge: 0
maxUnavailable: 100%
type: RollingUpdate
template:
metadata:
annotations:
backup.velero.io/backup-volumes: uploads
creationTimestamp: null
labels:
human-connection.org/commit: COMMIT
human-connection.org/selector: deployment-human-connection-backend
template:
metadata:
annotations:
backup.velero.io/backup-volumes: uploads
labels:
human-connection.org/commit: COMMIT
human-connection.org/selector: deployment-human-connection-backend
name: "nitro-backend"
spec:
containers:
- name: nitro-backend
image: humanconnection/nitro-backend:latest
imagePullPolicy: Always
ports:
- containerPort: 4000
envFrom:
- configMapRef:
name: configmap
- secretRef:
name: human-connection
volumeMounts:
- mountPath: /nitro-backend/public/uploads
name: uploads
volumes:
- name: uploads
persistentVolumeClaim:
claimName: uploads-claim
restartPolicy: Always
terminationGracePeriodSeconds: 30
status: {}
name: backend
spec:
containers:
- envFrom:
- configMapRef:
name: configmap
- secretRef:
name: human-connection
image: humanconnection/nitro-backend:latest
imagePullPolicy: Always
name: nitro-backend
ports:
- containerPort: 4000
protocol: TCP
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /nitro-backend/public/uploads
name: uploads
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
terminationGracePeriodSeconds: 30
volumes:
- name: uploads
persistentVolumeClaim:
claimName: uploads-claim
status: {}

View File

@ -1,47 +1,61 @@
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: nitro-neo4j
namespace: human-connection
spec:
replicas: 1
strategy:
rollingUpdate:
maxSurge: 0
maxUnavailable: "100%"
selector:
matchLabels:
apiVersion: apps/v1
kind: Deployment
metadata:
creationTimestamp: null
labels:
human-connection.org/selector: deployment-human-connection-neo4j
name: neo4j
namespace: human-connection
spec:
progressDeadlineSeconds: 2147483647
replicas: 1
revisionHistoryLimit: 2147483647
selector:
matchLabels:
human-connection.org/selector: deployment-human-connection-neo4j
strategy:
rollingUpdate:
maxSurge: 0
maxUnavailable: 100%
type: RollingUpdate
template:
metadata:
annotations:
backup.velero.io/backup-volumes: neo4j-data
creationTimestamp: null
labels:
human-connection.org/selector: deployment-human-connection-neo4j
template:
metadata:
annotations:
backup.velero.io/backup-volumes: neo4j-data
labels:
human-connection.org/selector: deployment-human-connection-neo4j
name: nitro-neo4j
spec:
containers:
- name: nitro-neo4j
image: humanconnection/neo4j:latest
imagePullPolicy: Always
resources:
requests:
memory: "2G"
limits:
memory: "8G"
envFrom:
- configMapRef:
name: configmap
ports:
- containerPort: 7687
- containerPort: 7474
volumeMounts:
- mountPath: /data/
name: neo4j-data
volumes:
- name: neo4j-data
persistentVolumeClaim:
claimName: neo4j-data-claim
restartPolicy: Always
terminationGracePeriodSeconds: 30
name: neo4j
spec:
containers:
- envFrom:
- configMapRef:
name: configmap
image: humanconnection/neo4j:latest
imagePullPolicy: Always
name: neo4j
ports:
- containerPort: 7687
protocol: TCP
- containerPort: 7474
protocol: TCP
resources:
limits:
memory: 2G
requests:
memory: 1G
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /data/
name: neo4j-data
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
terminationGracePeriodSeconds: 30
volumes:
- name: neo4j-data
persistentVolumeClaim:
claimName: neo4j-data-claim
status: {}

View File

@ -1,37 +1,54 @@
apiVersion: extensions/v1beta1
apiVersion: apps/v1
kind: Deployment
metadata:
name: nitro-web
creationTimestamp: null
labels:
human-connection.org/commit: COMMIT
human-connection.org/selector: deployment-human-connection-web
name: web
namespace: human-connection
spec:
replicas: 2
minReadySeconds: 15
progressDeadlineSeconds: 60
replicas: 2
revisionHistoryLimit: 2147483647
selector:
matchLabels:
human-connection.org/selector: deployment-human-connection-web
strategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
creationTimestamp: null
labels:
human-connection.org/commit: COMMIT
human-connection.org/selector: deployment-human-connection-web
name: nitro-web
name: web
spec:
containers:
- name: web
- env:
- name: HOST
value: 0.0.0.0
envFrom:
- configMapRef:
name: configmap
- secretRef:
name: human-connection
env:
- name: HOST
value: 0.0.0.0
image: humanconnection/nitro-web:latest
imagePullPolicy: Always
name: web
ports:
- containerPort: 3000
protocol: TCP
resources: {}
imagePullPolicy: Always
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
terminationGracePeriodSeconds: 30
status: {}

View File

@ -1,34 +1,51 @@
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: mailserver
namespace: human-connection
spec:
replicas: 1
minReadySeconds: 15
progressDeadlineSeconds: 60
selector:
matchLabels:
apiVersion: apps/v1
kind: Deployment
metadata:
creationTimestamp: null
labels:
human-connection.org/selector: deployment-human-connection-mailserver
name: mailserver
namespace: human-connection
spec:
minReadySeconds: 15
progressDeadlineSeconds: 60
replicas: 1
revisionHistoryLimit: 2147483647
selector:
matchLabels:
human-connection.org/selector: deployment-human-connection-mailserver
strategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
creationTimestamp: null
labels:
human-connection.org/selector: deployment-human-connection-mailserver
template:
metadata:
labels:
human-connection.org/selector: deployment-human-connection-mailserver
name: "mailserver"
spec:
containers:
- name: mailserver
image: djfarrelly/maildev
imagePullPolicy: Always
ports:
- containerPort: 80
- containerPort: 25
envFrom:
- configMapRef:
name: configmap
- secretRef:
name: human-connection
restartPolicy: Always
terminationGracePeriodSeconds: 30
status: {}
name: mailserver
spec:
containers:
- envFrom:
- configMapRef:
name: configmap
- secretRef:
name: human-connection
image: djfarrelly/maildev
imagePullPolicy: Always
name: mailserver
ports:
- containerPort: 80
protocol: TCP
- containerPort: 25
protocol: TCP
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
terminationGracePeriodSeconds: 30
status: {}

View File

@ -1,7 +1,7 @@
apiVersion: v1
kind: Service
metadata:
name: nitro-backend
name: backend
namespace: human-connection
labels:
human-connection.org/selector: deployment-human-connection-backend

View File

@ -1,7 +1,7 @@
apiVersion: v1
kind: Service
metadata:
name: nitro-neo4j
name: neo4j
namespace: human-connection
labels:
human-connection.org/selector: deployment-human-connection-neo4j

View File

@ -1,7 +1,7 @@
apiVersion: v1
kind: Service
metadata:
name: nitro-web
name: web
namespace: human-connection
labels:
human-connection.org/selector: deployment-human-connection-web

View File

@ -13,6 +13,6 @@ tar xf doctl-1.14.0-linux-amd64.tar.gz
chmod +x ./doctl
sudo mv ./doctl /usr/local/bin/doctl
doctl auth init --access-token $DOCTL_ACCESS_TOKEN
doctl auth --context develop init --access-token $DIGITALOCEAN_ACCESS_TOKEN
mkdir -p ~/.kube/
doctl kubernetes cluster kubeconfig show nitro-staging > ~/.kube/config
doctl k8s --context develop cluster kubeconfig show develop > ~/.kube/config