version: 2.1 jobs: build: docker: # Custom image; see tools/build/Dockerfile - image: gcr.io/neo4j-helm/build:latest environment: PROJECT: neo4j-helm CLUSTER: ci-test ZONE: us-central1-a NODES: 3 NAME_CLUSTERED: testrunc NAME_STANDALONE: testrunsa NAME_RESTORE: testrevive BUILD_ARTIFACTS: build SERVICE_KEY_FILE: /tmp/service-key.json BUCKET: gs://circleci-build-system AZURE_CONTAINER_NAME: circleci-build-system steps: - checkout - setup_remote_docker - run: name: Tooling pre-requisites command: | # We will install local tools so add those to path. echo "export PATH=./tools:.:$PATH" >> $BASH_ENV mkdir -p $BUILD_ARTIFACTS mkdir -p tools - restore_cache: name: Restore NPM Package Cache keys: - npm-packages-{{ checksum "doc/package.json" }} - run: name: Setup GCP Tooling command: | echo $GCLOUD_SERVICE_KEY > $SERVICE_KEY_FILE gcloud auth activate-service-account \ $GCLOUD_SERVICE_ACCOUNT \ --key-file=$SERVICE_KEY_FILE gcloud auth configure-docker --quiet - run: name: Setup Azure Tooling command: | # az command is built into the testing container. az login --service-principal --username "$SP_ID" --password "$SP_PASSWORD" --tenant "$TENANT_ID" - run: name: Generate Docs command: | cd doc npm install echo "Generating docs" ./node_modules/.bin/antora --stacktrace docs.yml - save_cache: name: Save Yarn Package Cache key: npm-packages-{{ checksum "doc/package.json" }} paths: - ~/.cache/npm - run: name: Lint command: helm lint . - run: name: GKE Setup / Auth command: | echo "GKE SETUP" export CLUSTER_NAME=$CLUSTER-$CIRCLE_BUILD_NUM ./tools/test/provision-k8s.sh $CLUSTER_NAME - run: name: Create test namespace command: | cat \<> azure-credentials.sh echo "export ACCOUNT_KEY=$ACCOUNT_KEY" >> azure-credentials.sh kubectl create secret generic azure-credentials \ --namespace $NAMESPACE \ --from-file=credentials=azure-credentials.sh - run: name: Package and Install Neo4j-Helm Chart command: | NAMESPACE=ns-$CIRCLE_BUILD_NUM helm package . chart_archive=$(ls neo4j*.tgz) cp *.tgz $BUILD_ARTIFACTS/ echo "Installing $chart_archive (CAUSAL CLUSTER TEST SCENARIO)" helm install $NAME_CLUSTERED -f deployment-scenarios/ci/cluster.yaml $chart_archive \ --namespace $NAMESPACE \ -v 3 | tee -a $BUILD_ARTIFACTS/INSTALL-cluster.txt echo "Installing $chart_archive (STANDALONE SCENARIO)" helm install $NAME_STANDALONE -f deployment-scenarios/ci/standalone.yaml $chart_archive \ --namespace $NAMESPACE \ -v 3 | tee -a $BUILD_ARTIFACTS/INSTALL-standalone.txt echo "Installing $chart_archive (AZURE RESTORE SCENARIO)" helm install $NAME_RESTORE -f deployment-scenarios/ci/single-instance-restore.yaml $chart_archive \ --namespace $NAMESPACE \ -v 3 | tee -a $BUILD_ARTIFACTS/INSTALL-restore.txt #- run: # name: Twiddling our Thumbs # command: | # sleep 60 # NAMESPACE=ns-$CIRCLE_BUILD_NUM # kubectl logs --namespace $NAMESPACE \ # -l "app.kubernetes.io/name=neo4j,app.kubernetes.io/component=core" | tee -a $BUILD_ARTIFACTS/startlogs.txt - run: name: Wait for GKE STANDALONE deployment to succeed and become ready command: | NAMESPACE=ns-$CIRCLE_BUILD_NUM kubectl rollout status --namespace $NAMESPACE StatefulSet/$NAME_STANDALONE-neo4j-core --watch | tee -a $BUILD_ARTIFACTS/wait-standalone.txt # We're going to test standalone first because it forms faster than cluster. In the background, # cluster is still forming.... - run: name: Test STANDALONE command: | NAMESPACE=ns-$CIRCLE_BUILD_NUM helm test $NAME_STANDALONE --namespace $NAMESPACE --logs | tee -a $BUILD_ARTIFACTS/TEST-STANDALONE.txt - run: name: Package backup chart command: | NAMESPACE=ns-$CIRCLE_BUILD_NUM helm package tools/backup chart_archive=$(ls neo4j*.tgz) cp *.tgz $BUILD_ARTIFACTS/ - run: name: Kubectl update command: | kubectl version - run: name: Install GCP Backup chart on standalone command: | NAMESPACE=ns-$CIRCLE_BUILD_NUM helm install standalone-backup-gcp tools/backup \ --namespace $NAMESPACE \ --set neo4jaddr=$NAME_STANDALONE-neo4j.$NAMESPACE.svc.cluster.local:6362 \ --set bucket=$BUCKET/$CIRCLE_BUILD_NUM/ \ --set database="neo4j\,system" \ --set cloudProvider=gcp \ --set jobSchedule="0 */12 * * *" \ --set secretName=neo4j-service-key \ sleep 5 kubectl get all -n $NAMESPACE echo "Taking a backup" kubectl create job --namespace $NAMESPACE --from=cronjob/standalone-backup-gcp-job gcp-hot-backup - run: name: Install Azure Backup chart on standalone command: | NAMESPACE=ns-$CIRCLE_BUILD_NUM helm install standalone-backup-azure tools/backup \ --namespace $NAMESPACE \ --set neo4jaddr=$NAME_STANDALONE-neo4j.$NAMESPACE.svc.cluster.local:6362 \ --set bucket=$AZURE_CONTAINER_NAME/build-$CIRCLE_BUILD_NUM/ \ --set database="neo4j\,system" \ --set cloudProvider=azure \ --set jobSchedule="0 */12 * * *" \ --set secretName=azure-credentials sleep 5 kubectl get all -n $NAMESPACE echo "Taking a backup" kubectl create job --namespace $NAMESPACE --from=cronjob/standalone-backup-azure-job azure-hot-backup - run: name: Wait for GKE CLUSTERED deployment to succeed and become ready command: | NAMESPACE=ns-$CIRCLE_BUILD_NUM kubectl rollout status --namespace $NAMESPACE StatefulSet/$NAME_CLUSTERED-neo4j-core --watch | tee -a $BUILD_ARTIFACTS/wait-cluster.txt kubectl rollout status --namespace $NAMESPACE StatefulSet/$NAME_CLUSTERED-neo4j-replica --watch | tee -a $BUILD_ARTIFACTS/wait-cluster-rr.txt - run: name: Test command: | NAMESPACE=ns-$CIRCLE_BUILD_NUM helm test $NAME_CLUSTERED --namespace $NAMESPACE --logs | tee -a $BUILD_ARTIFACTS/TEST-CLUSTER.txt - run: name: Harvest Logs from cluster when: always command: | # Prove log persistence NAMESPACE=ns-$CIRCLE_BUILD_NUM for idx in 0 1 2 ; do kubectl --namespace $NAMESPACE exec $NAME_CLUSTERED-neo4j-core-$idx -- /bin/cat /data/logs/debug.log > $BUILD_ARTIFACTS/core-$idx-debug.log ; done kubectl --namespace $NAMESPACE exec $NAME_CLUSTERED-neo4j-replica-0 -- /bin/cat /data/logs/debug.log > $BUILD_ARTIFACTS/replica-0-debug.log - run: name: Settle command: | sleep 120 - run: name: Verify that GCP backup succeeded command: | # If "latest" backup pointer files exist in a dir that is specific to this # build number, we should be good. NAMESPACE=ns-$CIRCLE_BUILD_NUM kubectl get all -n $NAMESPACE export LOGFILE=$BUILD_ARTIFACTS/gcp-backup.log kubectl get job --namespace $NAMESPACE | tee -a $LOGFILE helm status standalone-backup-gcp --namespace $NAMESPACE | tee -a $LOGFILE backup_pods=$(kubectl get pods --namespace $NAMESPACE | grep gcp-hot-backup | sed 's/ .*$//' | head -n 1) echo "Backup pods $backup_pods" | tee -a $LOGFILE kubectl describe pod --namespace $NAMESPACE "$backup_pods" | tee -a $LOGFILE kubectl logs --namespace $NAMESPACE "$backup_pods" | tee -a $LOGFILE gsutil ls "$BUCKET/$CIRCLE_BUILD_NUM/neo4j/neo4j-latest.tar.gz" 2>&1 | tee -a $BUILD_ARTIFACTS/gcp-backup.log gsutil ls "$BUCKET/$CIRCLE_BUILD_NUM/system/system-latest.tar.gz" 2>&1 | tee -a $BUILD_ARTIFACTS/gcp-backup.log - run: name: Verify that Azure backup succeeded command: | # If "latest" backup pointer files exist in a dir that is specific to this # build number, we should be good. NAMESPACE=ns-$CIRCLE_BUILD_NUM kubectl get all -n $NAMESPACE export LOGFILE=$BUILD_ARTIFACTS/azure-backup.log kubectl get job --namespace $NAMESPACE | tee -a $LOGFILE helm status standalone-backup-azure --namespace $NAMESPACE | tee -a $LOGFILE backup_pods=$(kubectl get pods --namespace $NAMESPACE | grep azure-hot-backup | sed 's/ .*$//' | head -n 1) echo "Backup pods $backup_pods" | tee -a $LOGFILE kubectl describe pod --namespace $NAMESPACE "$backup_pods" | tee -a $LOGFILE kubectl logs --namespace $NAMESPACE "$backup_pods" | tee -a $LOGFILE az storage blob list -c $AZURE_CONTAINER_NAME --account-name helmbackups --prefix build-$CIRCLE_BUILD_NUM/ | tee -a files.txt cat files.txt >> $LOGFILE total_files=$(cat files.txt | grep name | wc -l) if [ $total_files = 6 ] ; then echo "Test pass" ; else echo "$total_files total files on Azure storage; failed" exit 1 fi - run: name: Verify Azure RESTORE succeeded to single instance command: | NAMESPACE=ns-$CIRCLE_BUILD_NUM export NEO4J_PASSWORD=mySecretPassword kubectl logs --namespace $NAMESPACE $NAME_RESTORE-neo4j-core-0 \ -c restore-from-backup | tee -a $BUILD_ARTIFACTS/restore.log # Wait for instance to come alive. kubectl rollout status --namespace $NAMESPACE StatefulSet/$NAME_RESTORE-neo4j-core --watch | tee -a $BUILD_ARTIFACTS/wait-standalone-restore.txt echo "MATCH (n) RETURN count(n) as n;" | kubectl run -i --rm cypher-shell \ --namespace $NAMESPACE \ --image=neo4j:4.4.10-enterprise --restart=Never \ --command -- ./bin/cypher-shell -u neo4j -p "$NEO4J_PASSWORD" \ -a bolt://$NAME_RESTORE-neo4j.$NAMESPACE.svc.cluster.local 2>&1 | tee restore-result.log echo "MATCH (n) RETURN count(n) as n;" | kubectl run -i --rm cypher-shell \ --namespace $NAMESPACE \ --image=neo4j:4.4.10-enterprise --restart=Never \ --command -- ./bin/cypher-shell -u neo4j -p "$NEO4J_PASSWORD" \ -a neo4j://$NAME_RESTORE-neo4j.$NAMESPACE.svc.cluster.local 2>&1 | tee restore-result.log cp restore-result.log $BUILD_ARTIFACTS/ # Strip all cypher shell output down to a single integer result cound for n export record_count=$(cat restore-result.log | egrep '^[0-9]+$') echo "record_count=$record_count" if [ "$record_count" -gt "1000" ] ; then echo "Test pass" ; else echo "Test FAIL with record count $record_count" exit 1 fi - run: name: Uninstall / Cleanup # Make sure to always run this, particularly if the test fails, # to avoid clogging our cluster. when: always command: | echo "TEAR DOWN GKE INSTANCE" gcloud container clusters delete $CLUSTER-$CIRCLE_BUILD_NUM \ --async \ --zone "$ZONE" \ --project $PROJECT \ --quiet - store_artifacts: path: build - store_artifacts: path: doc/build/site