From cbdbe276cd73657a59a86de67ad9f1b8a133dce4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Wolfgang=20Hu=C3=9F?= Date: Tue, 8 Dec 2020 08:50:30 +0100 Subject: [PATCH] Change namespace - from '--namespace=human-connection' to '-n ocelot-social'. --- deployment/digital-ocean/https/README.md | 6 +++--- deployment/legacy-migration/README.md | 14 +++++++------- deployment/volumes/neo4j-offline-backup/README.md | 10 +++++----- deployment/volumes/neo4j-online-backup/README.md | 4 ++-- scripts/deploy.sh | 6 +++--- 5 files changed, 20 insertions(+), 20 deletions(-) diff --git a/deployment/digital-ocean/https/README.md b/deployment/digital-ocean/https/README.md index 347582e5b..cc98dfe89 100644 --- a/deployment/digital-ocean/https/README.md +++ b/deployment/digital-ocean/https/README.md @@ -49,8 +49,8 @@ If the response looks good, configure your domain registrar for the new IP addre Now let's get a valid HTTPS certificate. According to the tutorial above, check your tls certificate for staging: ```bash -$ kubectl describe --namespace=human-connection certificate tls -$ kubectl describe --namespace=human-connection secret tls +$ kubectl describe -n ocelot-social certificate tls +$ kubectl describe -n ocelot-social secret tls ``` If everything looks good, update the issuer of your ingress. Change the annotation `certmanager.k8s.io/issuer` from `letsencrypt-develop` to `letsencrypt-production` in your ingress configuration in `ingress.yaml`. @@ -63,7 +63,7 @@ $ kubectl apply -f ingress.yaml Delete the former secret to force a refresh: ```text -$ kubectl --namespace=human-connection delete secret tls +$ kubectl -n ocelot-social delete secret tls ``` Now, HTTPS should be configured on your domain. Congrats. diff --git a/deployment/legacy-migration/README.md b/deployment/legacy-migration/README.md index 8dd91287f..b692305db 100644 --- a/deployment/legacy-migration/README.md +++ b/deployment/legacy-migration/README.md @@ -11,7 +11,7 @@ Create a configmap with the specific connection data of your legacy server: ```bash $ kubectl create configmap maintenance-worker \ - --namespace=human-connection \ + -n ocelot-social \ --from-literal=SSH_USERNAME=someuser \ --from-literal=SSH_HOST=yourhost \ --from-literal=MONGODB_USERNAME=hc-api \ @@ -25,7 +25,7 @@ Create a secret with your public and private ssh keys. As the [kubernetes docume ```bash $ kubectl create secret generic ssh-keys \ - --namespace=human-connection \ + -n ocelot-social \ --from-file=id_rsa=/path/to/.ssh/id_rsa \ --from-file=id_rsa.pub=/path/to/.ssh/id_rsa.pub \ --from-file=known_hosts=/path/to/.ssh/known_hosts @@ -41,14 +41,14 @@ Bring the application into maintenance mode. Then temporarily delete backend and database deployments ```bash -$ kubectl --namespace=human-connection get deployments +$ kubectl -n ocelot-social get deployments NAME READY UP-TO-DATE AVAILABLE AGE develop-backend 1/1 1 1 3d11h develop-neo4j 1/1 1 1 3d11h develop-webapp 2/2 2 2 73d -$ kubectl --namespace=human-connection delete deployment develop-neo4j +$ kubectl -n ocelot-social delete deployment develop-neo4j deployment.extensions "develop-neo4j" deleted -$ kubectl --namespace=human-connection delete deployment develop-backend +$ kubectl -n ocelot-social delete deployment develop-backend deployment.extensions "develop-backend" deleted ``` @@ -63,7 +63,7 @@ pod/develop-maintenance-worker created Import legacy database and uploads: ```bash -$ kubectl --namespace=human-connection exec -it develop-maintenance-worker bash +$ kubectl -n ocelot-social exec -it develop-maintenance-worker bash $ import_legacy_db $ import_legacy_uploads $ exit @@ -72,7 +72,7 @@ $ exit Delete the pod when you're done: ```bash -$ kubectl --namespace=human-connection delete pod develop-maintenance-worker +$ kubectl -n ocelot-social delete pod develop-maintenance-worker ``` Oh, and of course you have to get those deleted deployments back. One way of diff --git a/deployment/volumes/neo4j-offline-backup/README.md b/deployment/volumes/neo4j-offline-backup/README.md index e4269a1b6..7c34aa764 100644 --- a/deployment/volumes/neo4j-offline-backup/README.md +++ b/deployment/volumes/neo4j-offline-backup/README.md @@ -29,7 +29,7 @@ database connections left and nobody can access the application. Run the following: ```sh -$ kubectl --namespace=human-connection edit deployment develop-neo4j +$ kubectl -n ocelot-social edit deployment develop-neo4j ``` Add the following to `spec.template.spec.containers`: @@ -55,9 +55,9 @@ file and trigger an update of the deployment. First stop your Neo4J database, see above. Then: ```sh -$ kubectl --namespace=human-connection get pods +$ kubectl -n ocelot-social get pods # Copy the ID of the pod running Neo4J. -$ kubectl --namespace=human-connection exec -it bash +$ kubectl -n ocelot-social exec -it bash # Once you're in the pod, dump the db to a file e.g. `/root/neo4j-backup`. > neo4j-admin dump --to=/root/neo4j-backup > exit @@ -72,12 +72,12 @@ Revert your changes to deployment `develop-neo4j` which will restart the databas First stop your Neo4J database. Then: ```sh -$ kubectl --namespace=human-connection get pods +$ kubectl -n ocelot-social get pods # Copy the ID of the pod running Neo4J. # Then upload your local backup to the pod. Note that once the pod gets deleted # e.g. if you change the deployment, the backup file is gone with it. $ kubectl cp ./neo4j-backup human-connection/:/root/ -$ kubectl --namespace=human-connection exec -it bash +$ kubectl -n ocelot-social exec -it bash # Once you're in the pod restore the backup and overwrite the default database # called `graph.db` with `--force`. # This will delete all existing data in database `graph.db`! diff --git a/deployment/volumes/neo4j-online-backup/README.md b/deployment/volumes/neo4j-online-backup/README.md index babb68d26..602bbd577 100644 --- a/deployment/volumes/neo4j-online-backup/README.md +++ b/deployment/volumes/neo4j-online-backup/README.md @@ -43,12 +43,12 @@ Restoration must be done while the database is not running, see [our docs](https After, you have stopped the database, and have the pod running, you can restore the database by running these commands: ```sh -$ kubectl --namespace=human-connection get pods +$ kubectl -n ocelot-social get pods # Copy the ID of the pod running Neo4J. # Then upload your local backup to the pod. Note that once the pod gets deleted # e.g. if you change the deployment, the backup file is gone with it. $ kubectl cp ./neo4j-backup/ human-connection/:/root/ -$ kubectl --namespace=human-connection exec -it bash +$ kubectl -n ocelot-social exec -it bash # Once you're in the pod restore the backup and overwrite the default database # called `graph.db` with `--force`. # This will delete all existing data in database `graph.db`! diff --git a/scripts/deploy.sh b/scripts/deploy.sh index f52e053f1..77b5501ca 100755 --- a/scripts/deploy.sh +++ b/scripts/deploy.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash sed -i "s//${TRAVIS_COMMIT}/g" $TRAVIS_BUILD_DIR/scripts/patches/patch-deployment.yaml sed -i "s//${TRAVIS_COMMIT}/g" $TRAVIS_BUILD_DIR/scripts/patches/patch-configmap.yaml -kubectl --namespace=human-connection patch configmap develop-configmap -p "$(cat $TRAVIS_BUILD_DIR/scripts/patches/patch-configmap.yaml)" -kubectl --namespace=human-connection patch deployment develop-backend -p "$(cat $TRAVIS_BUILD_DIR/scripts/patches/patch-deployment.yaml)" -kubectl --namespace=human-connection patch deployment develop-webapp -p "$(cat $TRAVIS_BUILD_DIR/scripts/patches/patch-deployment.yaml)" +kubectl -n ocelot-social patch configmap develop-configmap -p "$(cat $TRAVIS_BUILD_DIR/scripts/patches/patch-configmap.yaml)" +kubectl -n ocelot-social patch deployment develop-backend -p "$(cat $TRAVIS_BUILD_DIR/scripts/patches/patch-deployment.yaml)" +kubectl -n ocelot-social patch deployment develop-webapp -p "$(cat $TRAVIS_BUILD_DIR/scripts/patches/patch-deployment.yaml)"