123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334 |
- version: 2.1
- jobs:
- build:
- docker:
- # Custom image; see tools/build/Dockerfile
- - image: gcr.io/neo4j-helm/build:latest
- environment:
- PROJECT: neo4j-helm
- CLUSTER: ci-test
- ZONE: us-central1-a
- NODES: 3
- NAME_CLUSTERED: testrunc
- NAME_STANDALONE: testrunsa
- NAME_RESTORE: testrevive
- BUILD_ARTIFACTS: build
- SERVICE_KEY_FILE: /tmp/service-key.json
- BUCKET: gs://circleci-build-system
- AZURE_CONTAINER_NAME: circleci-build-system
- steps:
- - checkout
- - setup_remote_docker
- - run:
- name: Tooling pre-requisites
- command: |
- # We will install local tools so add those to path.
- echo "export PATH=./tools:.:$PATH" >> $BASH_ENV
- mkdir -p $BUILD_ARTIFACTS
- mkdir -p tools
- - restore_cache:
- name: Restore NPM Package Cache
- keys:
- - npm-packages-{{ checksum "doc/package.json" }}
- - run:
- name: Setup GCP Tooling
- command: |
- echo $GCLOUD_SERVICE_KEY > $SERVICE_KEY_FILE
- gcloud auth activate-service-account \
- $GCLOUD_SERVICE_ACCOUNT \
- --key-file=$SERVICE_KEY_FILE
- gcloud auth configure-docker --quiet
-
- - run:
- name: Setup Azure Tooling
- command: |
- # az command is built into the testing container.
- az login --service-principal --username "$SP_ID" --password "$SP_PASSWORD" --tenant "$TENANT_ID"
- - run:
- name: Generate Docs
- command: |
- cd doc
- npm install
- echo "Generating docs"
- ./node_modules/.bin/antora --stacktrace docs.yml
- - save_cache:
- name: Save Yarn Package Cache
- key: npm-packages-{{ checksum "doc/package.json" }}
- paths:
- - ~/.cache/npm
- - run:
- name: Lint
- command: helm lint .
- - run:
- name: GKE Setup / Auth
- command: |
- echo "GKE SETUP"
- export CLUSTER_NAME=$CLUSTER-$CIRCLE_BUILD_NUM
- ./tools/test/provision-k8s.sh $CLUSTER_NAME
-
- - run:
- name: Create test namespace
- command: |
- cat \<<EOF | kubectl apply -f -
- apiVersion: v1
- kind: Namespace
- metadata:
- name: ns-$CIRCLE_BUILD_NUM
- EOF
- - run:
- name: Install secrets for maintenance ops and testing
- command: |
- NAMESPACE=ns-$CIRCLE_BUILD_NUM
- kubectl create secret generic neo4j-service-key \
- --namespace $NAMESPACE \
- --from-file=credentials=$SERVICE_KEY_FILE
-
- # This secret is injected in the test process to demonstrate that
- # config works. This is just any valid config we can check inside of
- # a running system.
- kubectl create secret generic my-secret-config \
- --namespace $NAMESPACE \
- --from-literal=NEO4J_dbms_transaction_concurrent_maximum=100
- echo "export ACCOUNT_NAME=$ACCOUNT_NAME" >> azure-credentials.sh
- echo "export ACCOUNT_KEY=$ACCOUNT_KEY" >> azure-credentials.sh
- kubectl create secret generic azure-credentials \
- --namespace $NAMESPACE \
- --from-file=credentials=azure-credentials.sh
- - run:
- name: Package and Install Neo4j-Helm Chart
- command: |
- NAMESPACE=ns-$CIRCLE_BUILD_NUM
- helm package .
- chart_archive=$(ls neo4j*.tgz)
- cp *.tgz $BUILD_ARTIFACTS/
- echo "Installing $chart_archive (CAUSAL CLUSTER TEST SCENARIO)"
- helm install $NAME_CLUSTERED -f deployment-scenarios/ci/cluster.yaml $chart_archive \
- --namespace $NAMESPACE \
- -v 3 | tee -a $BUILD_ARTIFACTS/INSTALL-cluster.txt
- echo "Installing $chart_archive (STANDALONE SCENARIO)"
- helm install $NAME_STANDALONE -f deployment-scenarios/ci/standalone.yaml $chart_archive \
- --namespace $NAMESPACE \
- -v 3 | tee -a $BUILD_ARTIFACTS/INSTALL-standalone.txt
- echo "Installing $chart_archive (AZURE RESTORE SCENARIO)"
- helm install $NAME_RESTORE -f deployment-scenarios/ci/single-instance-restore.yaml $chart_archive \
- --namespace $NAMESPACE \
- -v 3 | tee -a $BUILD_ARTIFACTS/INSTALL-restore.txt
- #- run:
- # name: Twiddling our Thumbs
- # command: |
- # sleep 60
- # NAMESPACE=ns-$CIRCLE_BUILD_NUM
- # kubectl logs --namespace $NAMESPACE \
- # -l "app.kubernetes.io/name=neo4j,app.kubernetes.io/component=core" | tee -a $BUILD_ARTIFACTS/startlogs.txt
- - run:
- name: Wait for GKE STANDALONE deployment to succeed and become ready
- command: |
- NAMESPACE=ns-$CIRCLE_BUILD_NUM
- kubectl rollout status --namespace $NAMESPACE StatefulSet/$NAME_STANDALONE-neo4j-core --watch | tee -a $BUILD_ARTIFACTS/wait-standalone.txt
-
- # We're going to test standalone first because it forms faster than cluster. In the background,
- # cluster is still forming....
- - run:
- name: Test STANDALONE
- command: |
- NAMESPACE=ns-$CIRCLE_BUILD_NUM
- helm test $NAME_STANDALONE --namespace $NAMESPACE --logs | tee -a $BUILD_ARTIFACTS/TEST-STANDALONE.txt
- - run:
- name: Package backup chart
- command: |
- NAMESPACE=ns-$CIRCLE_BUILD_NUM
-
- helm package tools/backup
- chart_archive=$(ls neo4j*.tgz)
- cp *.tgz $BUILD_ARTIFACTS/
- - run:
- name: Kubectl update
- command: |
- kubectl version
- - run:
- name: Install GCP Backup chart on standalone
- command: |
- NAMESPACE=ns-$CIRCLE_BUILD_NUM
-
- helm install standalone-backup-gcp tools/backup \
- --namespace $NAMESPACE \
- --set neo4jaddr=$NAME_STANDALONE-neo4j.$NAMESPACE.svc.cluster.local:6362 \
- --set bucket=$BUCKET/$CIRCLE_BUILD_NUM/ \
- --set database="neo4j\,system" \
- --set cloudProvider=gcp \
- --set jobSchedule="0 */12 * * *" \
- --set secretName=neo4j-service-key \
-
- sleep 5
- kubectl get all -n $NAMESPACE
- echo "Taking a backup"
- kubectl create job --namespace $NAMESPACE --from=cronjob/standalone-backup-gcp-job gcp-hot-backup
- - run:
- name: Install Azure Backup chart on standalone
- command: |
- NAMESPACE=ns-$CIRCLE_BUILD_NUM
-
- helm install standalone-backup-azure tools/backup \
- --namespace $NAMESPACE \
- --set neo4jaddr=$NAME_STANDALONE-neo4j.$NAMESPACE.svc.cluster.local:6362 \
- --set bucket=$AZURE_CONTAINER_NAME/build-$CIRCLE_BUILD_NUM/ \
- --set database="neo4j\,system" \
- --set cloudProvider=azure \
- --set jobSchedule="0 */12 * * *" \
- --set secretName=azure-credentials
-
- sleep 5
- kubectl get all -n $NAMESPACE
- echo "Taking a backup"
- kubectl create job --namespace $NAMESPACE --from=cronjob/standalone-backup-azure-job azure-hot-backup
- - run:
- name: Wait for GKE CLUSTERED deployment to succeed and become ready
- command: |
- NAMESPACE=ns-$CIRCLE_BUILD_NUM
- kubectl rollout status --namespace $NAMESPACE StatefulSet/$NAME_CLUSTERED-neo4j-core --watch | tee -a $BUILD_ARTIFACTS/wait-cluster.txt
- kubectl rollout status --namespace $NAMESPACE StatefulSet/$NAME_CLUSTERED-neo4j-replica --watch | tee -a $BUILD_ARTIFACTS/wait-cluster-rr.txt
- - run:
- name: Test
- command: |
- NAMESPACE=ns-$CIRCLE_BUILD_NUM
- helm test $NAME_CLUSTERED --namespace $NAMESPACE --logs | tee -a $BUILD_ARTIFACTS/TEST-CLUSTER.txt
- - run:
- name: Harvest Logs from cluster
- when: always
- command: |
- # Prove log persistence
- NAMESPACE=ns-$CIRCLE_BUILD_NUM
- for idx in 0 1 2 ; do
- kubectl --namespace $NAMESPACE exec $NAME_CLUSTERED-neo4j-core-$idx -- /bin/cat /data/logs/debug.log > $BUILD_ARTIFACTS/core-$idx-debug.log ;
- done
- kubectl --namespace $NAMESPACE exec $NAME_CLUSTERED-neo4j-replica-0 -- /bin/cat /data/logs/debug.log > $BUILD_ARTIFACTS/replica-0-debug.log
- - run:
- name: Settle
- command: |
- sleep 120
- - run:
- name: Verify that GCP backup succeeded
- command: |
- # If "latest" backup pointer files exist in a dir that is specific to this
- # build number, we should be good.
- NAMESPACE=ns-$CIRCLE_BUILD_NUM
- kubectl get all -n $NAMESPACE
- export LOGFILE=$BUILD_ARTIFACTS/gcp-backup.log
- kubectl get job --namespace $NAMESPACE | tee -a $LOGFILE
- helm status standalone-backup-gcp --namespace $NAMESPACE | tee -a $LOGFILE
- backup_pods=$(kubectl get pods --namespace $NAMESPACE | grep gcp-hot-backup | sed 's/ .*$//' | head -n 1)
- echo "Backup pods $backup_pods" | tee -a $LOGFILE
- kubectl describe pod --namespace $NAMESPACE "$backup_pods" | tee -a $LOGFILE
- kubectl logs --namespace $NAMESPACE "$backup_pods" | tee -a $LOGFILE
- gsutil ls "$BUCKET/$CIRCLE_BUILD_NUM/neo4j/neo4j-latest.tar.gz" 2>&1 | tee -a $BUILD_ARTIFACTS/gcp-backup.log
- gsutil ls "$BUCKET/$CIRCLE_BUILD_NUM/system/system-latest.tar.gz" 2>&1 | tee -a $BUILD_ARTIFACTS/gcp-backup.log
- - run:
- name: Verify that Azure backup succeeded
- command: |
- # If "latest" backup pointer files exist in a dir that is specific to this
- # build number, we should be good.
- NAMESPACE=ns-$CIRCLE_BUILD_NUM
- kubectl get all -n $NAMESPACE
- export LOGFILE=$BUILD_ARTIFACTS/azure-backup.log
- kubectl get job --namespace $NAMESPACE | tee -a $LOGFILE
- helm status standalone-backup-azure --namespace $NAMESPACE | tee -a $LOGFILE
- backup_pods=$(kubectl get pods --namespace $NAMESPACE | grep azure-hot-backup | sed 's/ .*$//' | head -n 1)
- echo "Backup pods $backup_pods" | tee -a $LOGFILE
- kubectl describe pod --namespace $NAMESPACE "$backup_pods" | tee -a $LOGFILE
- kubectl logs --namespace $NAMESPACE "$backup_pods" | tee -a $LOGFILE
- az storage blob list -c $AZURE_CONTAINER_NAME --account-name helmbackups --prefix build-$CIRCLE_BUILD_NUM/ | tee -a files.txt
- cat files.txt >> $LOGFILE
- total_files=$(cat files.txt | grep name | wc -l)
- if [ $total_files = 6 ] ; then
- echo "Test pass" ;
- else
- echo "$total_files total files on Azure storage; failed"
- exit 1
- fi
- - run:
- name: Verify Azure RESTORE succeeded to single instance
- command: |
- NAMESPACE=ns-$CIRCLE_BUILD_NUM
- export NEO4J_PASSWORD=mySecretPassword
- kubectl logs --namespace $NAMESPACE $NAME_RESTORE-neo4j-core-0 \
- -c restore-from-backup | tee -a $BUILD_ARTIFACTS/restore.log
- # Wait for instance to come alive.
- kubectl rollout status --namespace $NAMESPACE StatefulSet/$NAME_RESTORE-neo4j-core --watch | tee -a $BUILD_ARTIFACTS/wait-standalone-restore.txt
- echo "MATCH (n) RETURN count(n) as n;" | kubectl run -i --rm cypher-shell \
- --namespace $NAMESPACE \
- --image=neo4j:4.4.10-enterprise --restart=Never \
- --command -- ./bin/cypher-shell -u neo4j -p "$NEO4J_PASSWORD" \
- -a bolt://$NAME_RESTORE-neo4j.$NAMESPACE.svc.cluster.local 2>&1 | tee restore-result.log
- echo "MATCH (n) RETURN count(n) as n;" | kubectl run -i --rm cypher-shell \
- --namespace $NAMESPACE \
- --image=neo4j:4.4.10-enterprise --restart=Never \
- --command -- ./bin/cypher-shell -u neo4j -p "$NEO4J_PASSWORD" \
- -a neo4j://$NAME_RESTORE-neo4j.$NAMESPACE.svc.cluster.local 2>&1 | tee restore-result.log
- cp restore-result.log $BUILD_ARTIFACTS/
- # Strip all cypher shell output down to a single integer result cound for n
- export record_count=$(cat restore-result.log | egrep '^[0-9]+$')
- echo "record_count=$record_count"
- if [ "$record_count" -gt "1000" ] ; then
- echo "Test pass" ;
- else
- echo "Test FAIL with record count $record_count"
- exit 1
- fi
- - run:
- name: Uninstall / Cleanup
- # Make sure to always run this, particularly if the test fails,
- # to avoid clogging our cluster.
- when: always
- command: |
- echo "TEAR DOWN GKE INSTANCE"
- gcloud container clusters delete $CLUSTER-$CIRCLE_BUILD_NUM \
- --async \
- --zone "$ZONE" \
- --project $PROJECT \
- --quiet
- - store_artifacts:
- path: build
-
- - store_artifacts:
- path: doc/build/site
|