From 9194d9274bd68460afc4fd22191c313c8ff1880e Mon Sep 17 00:00:00 2001 From: shivaji-dgraph Date: Mon, 24 Feb 2025 17:56:30 +0530 Subject: [PATCH 1/2] remove and relocate contrib folder content --- contrib/README.md | 3 - contrib/config/backups/README.md | 17 - contrib/config/backups/azure/.env | 5 - contrib/config/backups/azure/.gitignore | 2 - contrib/config/backups/azure/README.md | 246 -- .../config/backups/azure/azure_cli/.gitignore | 1 - .../config/backups/azure/azure_cli/README.md | 76 - .../backups/azure/azure_cli/create_blob.sh | 135 - .../backups/azure/azure_cli/create_secrets.sh | 104 - .../config/backups/azure/charts/.gitignore | 2 - .../backups/azure/charts/dgraph_config.yaml | 9 - .../config/backups/azure/charts/helmfile.yaml | 28 - .../backups/azure/charts/minio_config.yaml | 8 - .../config/backups/azure/docker-compose.yml | 37 - contrib/config/backups/azure/helmfile.yaml | 2 - .../config/backups/azure/terraform/.gitignore | 4 - .../config/backups/azure/terraform/README.md | 59 - .../config/backups/azure/terraform/main.tf | 55 - .../backups/azure/terraform/provider.tf | 4 - .../templates/dgraph_secrets.yaml.tmpl | 5 - .../azure/terraform/templates/minio.env.tmpl | 2 - .../templates/minio_secrets.yaml.tmpl | 2 - contrib/config/backups/client/.gitignore | 10 - contrib/config/backups/client/README.md | 274 -- .../config/backups/client/backup_helper.sh | 234 -- .../config/backups/client/compose-setup.sh | 276 -- .../backups/client/data/acl/hmac_secret_file | 1 - .../backups/client/data/backups/.gitkeep | 0 .../backups/client/data/enc/enc_key_file | 1 - .../config/backups/client/data/tls/.gitkeep | 0 .../backups/client/data/token/auth_token_file | 1 - .../config/backups/client/dgraph-backup.sh | 218 -- .../config/backups/client/docker-compose.yml | 44 - contrib/config/backups/gcp/.env | 5 - contrib/config/backups/gcp/.gitignore | 4 - contrib/config/backups/gcp/README.md | 292 --- contrib/config/backups/gcp/charts/.gitignore | 2 - .../backups/gcp/charts/dgraph_config.yaml | 9 - .../config/backups/gcp/charts/helmfile.yaml | 31 - .../backups/gcp/charts/minio_config.yaml | 8 - contrib/config/backups/gcp/docker-compose.yml | 44 - contrib/config/backups/gcp/helmfile.yaml | 2 - .../config/backups/gcp/terraform/.gitignore | 4 - .../config/backups/gcp/terraform/README.md | 74 - contrib/config/backups/gcp/terraform/main.tf | 116 - .../backups/gcp/terraform/modules/gsa/main.tf | 54 - .../config/backups/gcp/terraform/provider.tf | 9 - .../templates/dgraph_secrets.yaml.tmpl | 5 - .../gcp/terraform/templates/env.sh.tmpl | 3 - .../gcp/terraform/templates/minio.env.tmpl | 3 - .../templates/minio_secrets.yaml.tmpl | 6 - contrib/config/backups/nfs/.env | 4 - contrib/config/backups/nfs/.gitignore | 15 - contrib/config/backups/nfs/README.md | 333 --- contrib/config/backups/nfs/Vagrantfile | 57 - .../config/backups/nfs/charts/dgraph_nfs.yaml | 16 - .../backups/nfs/charts/dgraph_volume.yaml | 16 - .../config/backups/nfs/charts/helmfile.yaml | 28 - contrib/config/backups/nfs/charts/rook/env.sh | 14 - .../backups/nfs/charts/rook/fetch-operator.sh | 18 - .../backups/nfs/charts/rook/helmfile.yaml | 69 - .../config/backups/nfs/charts/rook/helmify.sh | 55 - .../base/.gitignore | 3 - .../base/kustomization.yaml | 7 - .../overlays/default/kustomization.yaml | 7 - .../base/kustomization.yaml | 6 - .../rook-nfs-server-kustomize/base/nfs.yaml | 30 - .../rook-nfs-server-kustomize/base/sa.yaml | 5 - .../overlays/default/kustomization.yaml | 7 - .../base/kustomization.yaml | 5 - .../base/sc.yaml | 14 - .../overlays/default/kustomization.yaml | 8 - .../overlays/default/pvc.yaml | 13 - contrib/config/backups/nfs/docker-compose.yml | 39 - .../backups/nfs/efs-terraform/README.md | 146 -- .../config/backups/nfs/efs-terraform/main.tf | 115 - .../backups/nfs/efs-terraform/output.tf | 55 - .../backups/nfs/efs-terraform/provider.tf | 6 - .../nfs/efs-terraform/templates/env.sh.tmpl | 3 - .../backups/nfs/efs-terraform/variables.tf | 73 - contrib/config/backups/nfs/gcfs-cli/README.md | 77 - .../backups/nfs/gcfs-cli/create_gcfs.sh | 88 - .../backups/nfs/gcfs-terraform/README.md | 73 - .../config/backups/nfs/gcfs-terraform/main.tf | 43 - .../modules/simple_gcfs/main.tf | 47 - .../backups/nfs/gcfs-terraform/provider.tf | 9 - .../nfs/gcfs-terraform/templates/env.sh.tmpl | 3 - contrib/config/backups/nfs/helmfile.yaml | 2 - contrib/config/backups/nfs/vagrant/helper.rb | 23 - contrib/config/backups/nfs/vagrant/hosts | 2 - .../config/backups/nfs/vagrant/provision.sh | 161 -- contrib/config/backups/s3/.env | 3 - contrib/config/backups/s3/.gitignore | 3 - contrib/config/backups/s3/README.md | 228 -- contrib/config/backups/s3/charts/.gitignore | 2 - .../backups/s3/charts/dgraph_config.yaml | 9 - .../config/backups/s3/charts/helmfile.yaml | 15 - contrib/config/backups/s3/docker-compose.yml | 28 - contrib/config/backups/s3/helmfile.yaml | 2 - .../config/backups/s3/terraform/.gitignore | 5 - contrib/config/backups/s3/terraform/README.md | 65 - contrib/config/backups/s3/terraform/main.tf | 62 - .../config/backups/s3/terraform/provider.tf | 6 - .../templates/dgraph_secrets.yaml.tmpl | 5 - .../s3/terraform/templates/env.sh.tmpl | 2 - .../s3/terraform/templates/s3.env.tmpl | 3 - .../monitoring/fluentd/fluent-docker.conf | 51 - .../monitoring/fluentd/fluentd-config.yaml | 62 - .../config/monitoring/fluentd/fluentd.yaml | 113 - .../dgraph-kubernetes-grafana-dashboard.json | 1194 --------- contrib/config/monitoring/jaeger/README.md | 10 - .../config/monitoring/jaeger/chart/README.md | 101 - .../jaeger/chart/dgraph_jaeger.yaml | 8 - .../monitoring/jaeger/chart/helmfile.yaml | 24 - .../jaeger/chart/jaeger_cassandra.yaml | 34 - .../jaeger/chart/jaeger_elasticsearch.yaml | 33 - .../monitoring/jaeger/operator/.gitignore | 2 - .../monitoring/jaeger/operator/README.md | 100 - .../jaeger/operator/dgraph_jaeger.yaml | 8 - .../monitoring/jaeger/operator/helmfile.yaml | 39 - .../monitoring/jaeger/operator/helmify.sh | 55 - .../jaeger-kustomize/base/jaeger.yaml | 6 - .../jaeger-kustomize/base/kustomization.yaml | 6 - .../overlays/badger/kustomization.yaml | 7 - .../overlays/badger/storage.yaml | 18 - .../config/monitoring/prometheus/README.md | 34 - .../monitoring/prometheus/alert-rules.yaml | 74 - .../prometheus/alertmanager-config.yaml | 25 - .../monitoring/prometheus/alertmanager.yaml | 31 - .../prometheus/chart-values/README.md | 148 -- .../alertmanager-pagerduty.yaml.gotmpl | 25 - .../dgraph-app-alert-rules.yaml.gotmpl | 59 - .../dgraph-backup-alert-rules.yaml | 42 - .../dgraph-prometheus-operator.yaml | 82 - .../prometheus/chart-values/helmfile.yaml | 28 - .../monitoring/prometheus/prometheus.yaml | 100 - contrib/config/terraform/.gitignore | 4 - contrib/config/terraform/aws/ha/README.md | 40 - .../aws/ha/aws/auto_scaling_group/main.tf | 26 - .../aws/ha/aws/auto_scaling_group/outputs.tf | 4 - .../ha/aws/auto_scaling_group/variables.tf | 24 - .../terraform/aws/ha/aws/instance/main.tf | 55 - .../terraform/aws/ha/aws/instance/outputs.tf | 4 - .../aws/ha/aws/instance/variables.tf | 58 - .../aws/ha/aws/launch_template/main.tf | 58 - .../aws/ha/aws/launch_template/outputs.tf | 4 - .../aws/ha/aws/launch_template/variables.tf | 44 - .../ha/aws/load_balancer/lb_listner/main.tf | 10 - .../aws/load_balancer/lb_listner/variables.tf | 20 - .../aws/ha/aws/load_balancer/main.tf | 22 - .../aws/ha/aws/load_balancer/outputs.tf | 14 - .../aws/ha/aws/load_balancer/variables.tf | 19 - .../terraform/aws/ha/aws/target_group/main.tf | 17 - .../aws/ha/aws/target_group/outputs.tf | 9 - .../aws/ha/aws/target_group/variables.tf | 38 - .../config/terraform/aws/ha/aws/vpc/data.tf | 3 - .../config/terraform/aws/ha/aws/vpc/main.tf | 231 -- .../terraform/aws/ha/aws/vpc/outputs.tf | 34 - .../terraform/aws/ha/aws/vpc/variables.tf | 19 - .../terraform/aws/ha/dgraph/alpha/data.tf | 17 - .../terraform/aws/ha/dgraph/alpha/main.tf | 48 - .../terraform/aws/ha/dgraph/alpha/outputs.tf | 18 - .../aws/ha/dgraph/alpha/variables.tf | 64 - .../config/terraform/aws/ha/dgraph/main.tf | 105 - .../config/terraform/aws/ha/dgraph/outputs.tf | 54 - .../terraform/aws/ha/dgraph/ratel/data.tf | 22 - .../terraform/aws/ha/dgraph/ratel/main.tf | 49 - .../terraform/aws/ha/dgraph/ratel/outputs.tf | 17 - .../aws/ha/dgraph/ratel/variables.tf | 65 - .../terraform/aws/ha/dgraph/variables.tf | 84 - .../terraform/aws/ha/dgraph/zero/data.tf | 32 - .../terraform/aws/ha/dgraph/zero/main.tf | 23 - .../terraform/aws/ha/dgraph/zero/outputs.tf | 12 - .../terraform/aws/ha/dgraph/zero/variables.tf | 54 - contrib/config/terraform/aws/ha/main.tf | 60 - contrib/config/terraform/aws/ha/outputs.tf | 54 - .../ha/templates/dgraph-alpha.service.tmpl | 15 - .../ha/templates/dgraph-ratel.service.tmpl | 15 - .../templates/dgraph-zero-init.service.tmpl | 16 - .../aws/ha/templates/dgraph-zero.service.tmpl | 17 - .../templates/setup-systemd-service.sh.tmpl | 23 - .../terraform/aws/ha/terraform.tfvars.example | 5 - contrib/config/terraform/aws/ha/variables.tf | 126 - .../config/terraform/aws/standalone/README.md | 34 - .../config/terraform/aws/standalone/data.tf | 14 - .../config/terraform/aws/standalone/main.tf | 92 - .../config/terraform/aws/standalone/output.tf | 3 - .../standalone/templates/dgraph-ui.service | 15 - .../standalone/templates/dgraph-zero.service | 16 - .../aws/standalone/templates/dgraph.service | 16 - .../aws/standalone/templates/setup.tmpl | 24 - .../aws/standalone/terraform.tfvars.example | 5 - .../terraform/aws/standalone/variables.tf | 79 - .../config/terraform/gcp/standalone/README.md | 45 - .../config/terraform/gcp/standalone/data.tf | 14 - .../config/terraform/gcp/standalone/main.tf | 50 - .../terraform/gcp/standalone/outputs.tf | 7 - .../standalone/templates/dgraph-ui.service | 15 - .../standalone/templates/dgraph-zero.service | 16 - .../gcp/standalone/templates/dgraph.service | 16 - .../gcp/standalone/templates/setup.tmpl | 24 - .../terraform/gcp/standalone/variables.tf | 58 - .../config/terraform/kubernetes/.gitignore | 32 - contrib/config/terraform/kubernetes/README.md | 121 - contrib/config/terraform/kubernetes/main.tf | 47 - .../terraform/kubernetes/modules/aws/main.tf | 26 - .../modules/aws/modules/eks/data.tf | 3 - .../modules/aws/modules/eks/eks-cluster.tf | 75 - .../aws/modules/eks/eks-worker-nodes.tf | 63 - .../modules/aws/modules/eks/outputs.tf | 62 - .../modules/aws/modules/eks/provider.tf | 4 - .../modules/aws/modules/eks/variables.tf | 44 - .../modules/aws/modules/vpc/data.tf | 3 - .../modules/aws/modules/vpc/nacl-config.tf | 151 -- .../modules/aws/modules/vpc/outputs.tf | 11 - .../modules/aws/modules/vpc/provider.tf | 4 - .../modules/aws/modules/vpc/routes-config.tf | 45 - .../modules/aws/modules/vpc/subnets-config.tf | 44 - .../modules/aws/modules/vpc/variables.tf | 22 - .../modules/aws/modules/vpc/versions.tf | 3 - .../modules/aws/modules/vpc/vpc-config.tf | 39 - .../kubernetes/modules/aws/outputs.tf | 3 - .../kubernetes/modules/aws/variables.tf | 41 - .../kubernetes/modules/dgraph/main.tf | 59 - .../modules/dgraph/modules/alpha/.gitignore | 29 - .../modules/dgraph/modules/alpha/main.tf | 301 --- .../modules/dgraph/modules/alpha/outputs.tf | 7 - .../modules/dgraph/modules/alpha/provider.tf | 3 - .../dgraph/modules/alpha/templates/alpha.tpl | 6 - .../modules/alpha/templates/alpha_init.sh | 5 - .../modules/dgraph/modules/alpha/variables.tf | 183 -- .../modules/dgraph/modules/ratel/.gitignore | 29 - .../modules/dgraph/modules/ratel/main.tf | 72 - .../modules/dgraph/modules/ratel/outputs.tf | 3 - .../modules/dgraph/modules/ratel/provider.tf | 3 - .../modules/dgraph/modules/ratel/variables.tf | 52 - .../modules/dgraph/modules/zero/.gitignore | 29 - .../modules/dgraph/modules/zero/main.tf | 238 -- .../modules/dgraph/modules/zero/outputs.tf | 3 - .../modules/dgraph/modules/zero/provider.tf | 3 - .../dgraph/modules/zero/templates/zero-ha.tpl | 14 - .../dgraph/modules/zero/templates/zero.tpl | 3 - .../modules/dgraph/modules/zero/variables.tf | 148 -- .../kubernetes/modules/dgraph/outputs.tf | 19 - .../kubernetes/modules/dgraph/provider.tf | 3 - .../kubernetes/modules/dgraph/variables.tf | 81 - .../config/terraform/kubernetes/outputs.tf | 19 - .../kubernetes/terraform.tfvars.example | 31 - .../config/terraform/kubernetes/variables.tf | 111 - contrib/config/vault/README.md | 5 - contrib/config/vault/docker/.env | 2 - contrib/config/vault/docker/.gitignore | 5 - contrib/config/vault/docker/README.md | 355 --- .../vault/docker/dgraph_alpha_config.yaml | 11 - .../config/vault/docker/docker-compose.yaml | 36 - contrib/config/vault/docker/vault/.gitkeep | 0 contrib/config/vault/docker/vault/config.hcl | 14 - .../docker/vault/payload_alpha_secrets.json | 9 - .../vault/docker/vault/policy_admin.hcl | 22 - .../vault/docker/vault/policy_dgraph.hcl | 3 - contrib/docker-build/Makefile | 4 - contrib/docker-build/README.md | 12 - contrib/docker-build/build.sh | 7 - contrib/docker-build/docker-compose.yml | 14 - contrib/integration/acctupsert/.gitignore | 1 - contrib/integration/acctupsert/main.go | 238 -- contrib/integration/bank/.gitignore | 1 - contrib/integration/bank/Dockerfile | 3 - contrib/integration/bank/Makefile | 7 - contrib/integration/bank/main.go | 379 --- contrib/integration/bigdata/main.go | 243 -- contrib/integration/mutates/.gitignore | 1 - contrib/integration/mutates/main.go | 110 - contrib/integration/swap/.gitignore | 1 - contrib/integration/swap/main.go | 380 --- contrib/integration/swap/words.go | 2252 ----------------- contrib/integration/testtxn/.gitignore | 1 - contrib/local-test/Makefile | 76 - contrib/local-test/README.md | 347 --- contrib/local-test/docker-compose-lambda.yml | 25 - contrib/local-test/docker-compose.yml | 51 - contrib/local-test/scripts/script.js | 1 - contrib/manual_tests/.gitignore | 1 - contrib/manual_tests/README.md | 18 - contrib/manual_tests/log.sh | 21 - contrib/manual_tests/test.sh | 600 ----- contrib/scripts/README.txt | 2 - contrib/scripts/cover.sh | 34 - contrib/scripts/functions.sh | 59 - contrib/scripts/goldendata-queries.sh | 55 - contrib/scripts/install-dependencies.sh | 13 - contrib/scripts/load-test.sh | 16 - contrib/scripts/loader.sh | 42 - contrib/scripts/queries/allof_the.in | 12 - contrib/scripts/queries/allof_the_a.in | 12 - contrib/scripts/queries/allof_the_first.in | 12 - contrib/scripts/queries/basic.in | 12 - contrib/scripts/queries/gen_anyof_good_bad.in | 6 - contrib/scripts/queries/releasedate.in | 12 - contrib/scripts/queries/releasedate_geq.in | 12 - contrib/scripts/queries/releasedate_sort.in | 12 - .../queries/releasedate_sort_first_offset.in | 12 - contrib/scripts/transactions.sh | 31 - contrib/systemd/centos/README.md | 82 - contrib/systemd/centos/add_dgraph_account.sh | 16 - contrib/systemd/centos/dgraph-alpha.service | 18 - contrib/systemd/centos/dgraph-ui.service | 16 - contrib/systemd/centos/dgraph-zero.service | 18 - contrib/systemd/ha_cluster/README.md | 193 -- .../systemd/ha_cluster/dgraph-alpha.service | 17 - .../systemd/ha_cluster/dgraph-zero-0.service | 17 - .../systemd/ha_cluster/dgraph-zero-1.service | 17 - .../systemd/ha_cluster/dgraph-zero-2.service | 17 - contrib/systemd/ha_cluster/tests/.gitignore | 1 - contrib/systemd/ha_cluster/tests/README.md | 147 -- .../ha_cluster/tests/centos8/Vagrantfile | 36 - .../systemd/ha_cluster/tests/centos8/hosts | 6 - .../ha_cluster/tests/centos8/provision.sh | 186 -- .../tests/centos8/vagrant_helper.rb | 22 - .../ha_cluster/tests/ubuntu1804/Vagrantfile | 36 - .../systemd/ha_cluster/tests/ubuntu1804/hosts | 6 - .../ha_cluster/tests/ubuntu1804/provision.sh | 186 -- .../tests/ubuntu1804/vagrant_helper.rb | 22 - contrib/tlstest/Makefile | 48 - contrib/tlstest/README.md | 29 - contrib/tlstest/alpha_notls.sh | 3 - contrib/tlstest/alpha_tls.sh | 4 - contrib/tlstest/alpha_tls_auth.sh | 3 - contrib/tlstest/data.rdf.gz | Bin 1543 -> 0 bytes contrib/tlstest/live_notls.sh | 3 - contrib/tlstest/live_tls.sh | 3 - contrib/tlstest/live_tls_auth.sh | 3 - contrib/tlstest/openssl.cnf | 350 --- contrib/tlstest/run.sh | 7 - contrib/tlstest/server_11.sh | 3 - contrib/tlstest/server_nopass.sh | 4 - contrib/tlstest/server_nopass_client_auth.sh | 3 - contrib/tlstest/server_pass.sh | 3 - contrib/tlstest/test.sh | 33 - contrib/tlstest/test_reload.sh | 35 - contrib/wait-for-it.sh | 193 -- .../cmd/alpha/txn_test.go | 248 +- t/t.go | 1 - test.sh | 298 --- 344 files changed, 108 insertions(+), 19615 deletions(-) delete mode 100644 contrib/README.md delete mode 100644 contrib/config/backups/README.md delete mode 100644 contrib/config/backups/azure/.env delete mode 100644 contrib/config/backups/azure/.gitignore delete mode 100644 contrib/config/backups/azure/README.md delete mode 100644 contrib/config/backups/azure/azure_cli/.gitignore delete mode 100644 contrib/config/backups/azure/azure_cli/README.md delete mode 100755 contrib/config/backups/azure/azure_cli/create_blob.sh delete mode 100755 contrib/config/backups/azure/azure_cli/create_secrets.sh delete mode 100644 contrib/config/backups/azure/charts/.gitignore delete mode 100644 contrib/config/backups/azure/charts/dgraph_config.yaml delete mode 100644 contrib/config/backups/azure/charts/helmfile.yaml delete mode 100644 contrib/config/backups/azure/charts/minio_config.yaml delete mode 100644 contrib/config/backups/azure/docker-compose.yml delete mode 100644 contrib/config/backups/azure/helmfile.yaml delete mode 100644 contrib/config/backups/azure/terraform/.gitignore delete mode 100644 contrib/config/backups/azure/terraform/README.md delete mode 100644 contrib/config/backups/azure/terraform/main.tf delete mode 100644 contrib/config/backups/azure/terraform/provider.tf delete mode 100644 contrib/config/backups/azure/terraform/templates/dgraph_secrets.yaml.tmpl delete mode 100644 contrib/config/backups/azure/terraform/templates/minio.env.tmpl delete mode 100644 contrib/config/backups/azure/terraform/templates/minio_secrets.yaml.tmpl delete mode 100644 contrib/config/backups/client/.gitignore delete mode 100644 contrib/config/backups/client/README.md delete mode 100644 contrib/config/backups/client/backup_helper.sh delete mode 100755 contrib/config/backups/client/compose-setup.sh delete mode 100644 contrib/config/backups/client/data/acl/hmac_secret_file delete mode 100644 contrib/config/backups/client/data/backups/.gitkeep delete mode 100644 contrib/config/backups/client/data/enc/enc_key_file delete mode 100644 contrib/config/backups/client/data/tls/.gitkeep delete mode 100644 contrib/config/backups/client/data/token/auth_token_file delete mode 100755 contrib/config/backups/client/dgraph-backup.sh delete mode 100644 contrib/config/backups/client/docker-compose.yml delete mode 100644 contrib/config/backups/gcp/.env delete mode 100644 contrib/config/backups/gcp/.gitignore delete mode 100644 contrib/config/backups/gcp/README.md delete mode 100644 contrib/config/backups/gcp/charts/.gitignore delete mode 100644 contrib/config/backups/gcp/charts/dgraph_config.yaml delete mode 100644 contrib/config/backups/gcp/charts/helmfile.yaml delete mode 100644 contrib/config/backups/gcp/charts/minio_config.yaml delete mode 100644 contrib/config/backups/gcp/docker-compose.yml delete mode 100644 contrib/config/backups/gcp/helmfile.yaml delete mode 100644 contrib/config/backups/gcp/terraform/.gitignore delete mode 100644 contrib/config/backups/gcp/terraform/README.md delete mode 100644 contrib/config/backups/gcp/terraform/main.tf delete mode 100644 contrib/config/backups/gcp/terraform/modules/gsa/main.tf delete mode 100644 contrib/config/backups/gcp/terraform/provider.tf delete mode 100644 contrib/config/backups/gcp/terraform/templates/dgraph_secrets.yaml.tmpl delete mode 100644 contrib/config/backups/gcp/terraform/templates/env.sh.tmpl delete mode 100644 contrib/config/backups/gcp/terraform/templates/minio.env.tmpl delete mode 100644 contrib/config/backups/gcp/terraform/templates/minio_secrets.yaml.tmpl delete mode 100644 contrib/config/backups/nfs/.env delete mode 100644 contrib/config/backups/nfs/.gitignore delete mode 100644 contrib/config/backups/nfs/README.md delete mode 100644 contrib/config/backups/nfs/Vagrantfile delete mode 100644 contrib/config/backups/nfs/charts/dgraph_nfs.yaml delete mode 100644 contrib/config/backups/nfs/charts/dgraph_volume.yaml delete mode 100644 contrib/config/backups/nfs/charts/helmfile.yaml delete mode 100644 contrib/config/backups/nfs/charts/rook/env.sh delete mode 100755 contrib/config/backups/nfs/charts/rook/fetch-operator.sh delete mode 100644 contrib/config/backups/nfs/charts/rook/helmfile.yaml delete mode 100755 contrib/config/backups/nfs/charts/rook/helmify.sh delete mode 100644 contrib/config/backups/nfs/charts/rook/rook-nfs-operator-kustomize/base/.gitignore delete mode 100644 contrib/config/backups/nfs/charts/rook/rook-nfs-operator-kustomize/base/kustomization.yaml delete mode 100644 contrib/config/backups/nfs/charts/rook/rook-nfs-operator-kustomize/overlays/default/kustomization.yaml delete mode 100644 contrib/config/backups/nfs/charts/rook/rook-nfs-server-kustomize/base/kustomization.yaml delete mode 100644 contrib/config/backups/nfs/charts/rook/rook-nfs-server-kustomize/base/nfs.yaml delete mode 100644 contrib/config/backups/nfs/charts/rook/rook-nfs-server-kustomize/base/sa.yaml delete mode 100644 contrib/config/backups/nfs/charts/rook/rook-nfs-server-kustomize/overlays/default/kustomization.yaml delete mode 100644 contrib/config/backups/nfs/charts/rook/rook-nfs-storageclass-kustomize/base/kustomization.yaml delete mode 100644 contrib/config/backups/nfs/charts/rook/rook-nfs-storageclass-kustomize/base/sc.yaml delete mode 100644 contrib/config/backups/nfs/charts/rook/rook-nfs-storageclass-kustomize/overlays/default/kustomization.yaml delete mode 100644 contrib/config/backups/nfs/charts/rook/rook-nfs-storageclass-kustomize/overlays/default/pvc.yaml delete mode 100644 contrib/config/backups/nfs/docker-compose.yml delete mode 100644 contrib/config/backups/nfs/efs-terraform/README.md delete mode 100644 contrib/config/backups/nfs/efs-terraform/main.tf delete mode 100644 contrib/config/backups/nfs/efs-terraform/output.tf delete mode 100644 contrib/config/backups/nfs/efs-terraform/provider.tf delete mode 100644 contrib/config/backups/nfs/efs-terraform/templates/env.sh.tmpl delete mode 100644 contrib/config/backups/nfs/efs-terraform/variables.tf delete mode 100644 contrib/config/backups/nfs/gcfs-cli/README.md delete mode 100755 contrib/config/backups/nfs/gcfs-cli/create_gcfs.sh delete mode 100644 contrib/config/backups/nfs/gcfs-terraform/README.md delete mode 100644 contrib/config/backups/nfs/gcfs-terraform/main.tf delete mode 100644 contrib/config/backups/nfs/gcfs-terraform/modules/simple_gcfs/main.tf delete mode 100644 contrib/config/backups/nfs/gcfs-terraform/provider.tf delete mode 100644 contrib/config/backups/nfs/gcfs-terraform/templates/env.sh.tmpl delete mode 100644 contrib/config/backups/nfs/helmfile.yaml delete mode 100644 contrib/config/backups/nfs/vagrant/helper.rb delete mode 100644 contrib/config/backups/nfs/vagrant/hosts delete mode 100644 contrib/config/backups/nfs/vagrant/provision.sh delete mode 100644 contrib/config/backups/s3/.env delete mode 100644 contrib/config/backups/s3/.gitignore delete mode 100644 contrib/config/backups/s3/README.md delete mode 100644 contrib/config/backups/s3/charts/.gitignore delete mode 100644 contrib/config/backups/s3/charts/dgraph_config.yaml delete mode 100644 contrib/config/backups/s3/charts/helmfile.yaml delete mode 100644 contrib/config/backups/s3/docker-compose.yml delete mode 100644 contrib/config/backups/s3/helmfile.yaml delete mode 100644 contrib/config/backups/s3/terraform/.gitignore delete mode 100644 contrib/config/backups/s3/terraform/README.md delete mode 100644 contrib/config/backups/s3/terraform/main.tf delete mode 100644 contrib/config/backups/s3/terraform/provider.tf delete mode 100644 contrib/config/backups/s3/terraform/templates/dgraph_secrets.yaml.tmpl delete mode 100644 contrib/config/backups/s3/terraform/templates/env.sh.tmpl delete mode 100644 contrib/config/backups/s3/terraform/templates/s3.env.tmpl delete mode 100644 contrib/config/monitoring/fluentd/fluent-docker.conf delete mode 100644 contrib/config/monitoring/fluentd/fluentd-config.yaml delete mode 100644 contrib/config/monitoring/fluentd/fluentd.yaml delete mode 100644 contrib/config/monitoring/grafana/dgraph-kubernetes-grafana-dashboard.json delete mode 100644 contrib/config/monitoring/jaeger/README.md delete mode 100644 contrib/config/monitoring/jaeger/chart/README.md delete mode 100644 contrib/config/monitoring/jaeger/chart/dgraph_jaeger.yaml delete mode 100644 contrib/config/monitoring/jaeger/chart/helmfile.yaml delete mode 100644 contrib/config/monitoring/jaeger/chart/jaeger_cassandra.yaml delete mode 100644 contrib/config/monitoring/jaeger/chart/jaeger_elasticsearch.yaml delete mode 100644 contrib/config/monitoring/jaeger/operator/.gitignore delete mode 100644 contrib/config/monitoring/jaeger/operator/README.md delete mode 100644 contrib/config/monitoring/jaeger/operator/dgraph_jaeger.yaml delete mode 100644 contrib/config/monitoring/jaeger/operator/helmfile.yaml delete mode 100755 contrib/config/monitoring/jaeger/operator/helmify.sh delete mode 100644 contrib/config/monitoring/jaeger/operator/jaeger-kustomize/base/jaeger.yaml delete mode 100644 contrib/config/monitoring/jaeger/operator/jaeger-kustomize/base/kustomization.yaml delete mode 100644 contrib/config/monitoring/jaeger/operator/jaeger-kustomize/overlays/badger/kustomization.yaml delete mode 100644 contrib/config/monitoring/jaeger/operator/jaeger-kustomize/overlays/badger/storage.yaml delete mode 100644 contrib/config/monitoring/prometheus/README.md delete mode 100644 contrib/config/monitoring/prometheus/alert-rules.yaml delete mode 100644 contrib/config/monitoring/prometheus/alertmanager-config.yaml delete mode 100644 contrib/config/monitoring/prometheus/alertmanager.yaml delete mode 100644 contrib/config/monitoring/prometheus/chart-values/README.md delete mode 100644 contrib/config/monitoring/prometheus/chart-values/alertmanager-pagerduty.yaml.gotmpl delete mode 100644 contrib/config/monitoring/prometheus/chart-values/dgraph-app-alert-rules.yaml.gotmpl delete mode 100644 contrib/config/monitoring/prometheus/chart-values/dgraph-backup-alert-rules.yaml delete mode 100644 contrib/config/monitoring/prometheus/chart-values/dgraph-prometheus-operator.yaml delete mode 100644 contrib/config/monitoring/prometheus/chart-values/helmfile.yaml delete mode 100644 contrib/config/monitoring/prometheus/prometheus.yaml delete mode 100644 contrib/config/terraform/.gitignore delete mode 100644 contrib/config/terraform/aws/ha/README.md delete mode 100644 contrib/config/terraform/aws/ha/aws/auto_scaling_group/main.tf delete mode 100644 contrib/config/terraform/aws/ha/aws/auto_scaling_group/outputs.tf delete mode 100644 contrib/config/terraform/aws/ha/aws/auto_scaling_group/variables.tf delete mode 100644 contrib/config/terraform/aws/ha/aws/instance/main.tf delete mode 100644 contrib/config/terraform/aws/ha/aws/instance/outputs.tf delete mode 100644 contrib/config/terraform/aws/ha/aws/instance/variables.tf delete mode 100644 contrib/config/terraform/aws/ha/aws/launch_template/main.tf delete mode 100644 contrib/config/terraform/aws/ha/aws/launch_template/outputs.tf delete mode 100644 contrib/config/terraform/aws/ha/aws/launch_template/variables.tf delete mode 100644 contrib/config/terraform/aws/ha/aws/load_balancer/lb_listner/main.tf delete mode 100644 contrib/config/terraform/aws/ha/aws/load_balancer/lb_listner/variables.tf delete mode 100644 contrib/config/terraform/aws/ha/aws/load_balancer/main.tf delete mode 100644 contrib/config/terraform/aws/ha/aws/load_balancer/outputs.tf delete mode 100644 contrib/config/terraform/aws/ha/aws/load_balancer/variables.tf delete mode 100644 contrib/config/terraform/aws/ha/aws/target_group/main.tf delete mode 100644 contrib/config/terraform/aws/ha/aws/target_group/outputs.tf delete mode 100644 contrib/config/terraform/aws/ha/aws/target_group/variables.tf delete mode 100644 contrib/config/terraform/aws/ha/aws/vpc/data.tf delete mode 100644 contrib/config/terraform/aws/ha/aws/vpc/main.tf delete mode 100644 contrib/config/terraform/aws/ha/aws/vpc/outputs.tf delete mode 100644 contrib/config/terraform/aws/ha/aws/vpc/variables.tf delete mode 100644 contrib/config/terraform/aws/ha/dgraph/alpha/data.tf delete mode 100644 contrib/config/terraform/aws/ha/dgraph/alpha/main.tf delete mode 100644 contrib/config/terraform/aws/ha/dgraph/alpha/outputs.tf delete mode 100644 contrib/config/terraform/aws/ha/dgraph/alpha/variables.tf delete mode 100644 contrib/config/terraform/aws/ha/dgraph/main.tf delete mode 100644 contrib/config/terraform/aws/ha/dgraph/outputs.tf delete mode 100644 contrib/config/terraform/aws/ha/dgraph/ratel/data.tf delete mode 100644 contrib/config/terraform/aws/ha/dgraph/ratel/main.tf delete mode 100644 contrib/config/terraform/aws/ha/dgraph/ratel/outputs.tf delete mode 100644 contrib/config/terraform/aws/ha/dgraph/ratel/variables.tf delete mode 100644 contrib/config/terraform/aws/ha/dgraph/variables.tf delete mode 100644 contrib/config/terraform/aws/ha/dgraph/zero/data.tf delete mode 100644 contrib/config/terraform/aws/ha/dgraph/zero/main.tf delete mode 100644 contrib/config/terraform/aws/ha/dgraph/zero/outputs.tf delete mode 100644 contrib/config/terraform/aws/ha/dgraph/zero/variables.tf delete mode 100644 contrib/config/terraform/aws/ha/main.tf delete mode 100644 contrib/config/terraform/aws/ha/outputs.tf delete mode 100644 contrib/config/terraform/aws/ha/templates/dgraph-alpha.service.tmpl delete mode 100644 contrib/config/terraform/aws/ha/templates/dgraph-ratel.service.tmpl delete mode 100644 contrib/config/terraform/aws/ha/templates/dgraph-zero-init.service.tmpl delete mode 100644 contrib/config/terraform/aws/ha/templates/dgraph-zero.service.tmpl delete mode 100644 contrib/config/terraform/aws/ha/templates/setup-systemd-service.sh.tmpl delete mode 100644 contrib/config/terraform/aws/ha/terraform.tfvars.example delete mode 100644 contrib/config/terraform/aws/ha/variables.tf delete mode 100644 contrib/config/terraform/aws/standalone/README.md delete mode 100644 contrib/config/terraform/aws/standalone/data.tf delete mode 100644 contrib/config/terraform/aws/standalone/main.tf delete mode 100644 contrib/config/terraform/aws/standalone/output.tf delete mode 100644 contrib/config/terraform/aws/standalone/templates/dgraph-ui.service delete mode 100644 contrib/config/terraform/aws/standalone/templates/dgraph-zero.service delete mode 100644 contrib/config/terraform/aws/standalone/templates/dgraph.service delete mode 100644 contrib/config/terraform/aws/standalone/templates/setup.tmpl delete mode 100644 contrib/config/terraform/aws/standalone/terraform.tfvars.example delete mode 100644 contrib/config/terraform/aws/standalone/variables.tf delete mode 100644 contrib/config/terraform/gcp/standalone/README.md delete mode 100644 contrib/config/terraform/gcp/standalone/data.tf delete mode 100644 contrib/config/terraform/gcp/standalone/main.tf delete mode 100644 contrib/config/terraform/gcp/standalone/outputs.tf delete mode 100644 contrib/config/terraform/gcp/standalone/templates/dgraph-ui.service delete mode 100644 contrib/config/terraform/gcp/standalone/templates/dgraph-zero.service delete mode 100644 contrib/config/terraform/gcp/standalone/templates/dgraph.service delete mode 100644 contrib/config/terraform/gcp/standalone/templates/setup.tmpl delete mode 100644 contrib/config/terraform/gcp/standalone/variables.tf delete mode 100644 contrib/config/terraform/kubernetes/.gitignore delete mode 100644 contrib/config/terraform/kubernetes/README.md delete mode 100644 contrib/config/terraform/kubernetes/main.tf delete mode 100644 contrib/config/terraform/kubernetes/modules/aws/main.tf delete mode 100644 contrib/config/terraform/kubernetes/modules/aws/modules/eks/data.tf delete mode 100644 contrib/config/terraform/kubernetes/modules/aws/modules/eks/eks-cluster.tf delete mode 100644 contrib/config/terraform/kubernetes/modules/aws/modules/eks/eks-worker-nodes.tf delete mode 100644 contrib/config/terraform/kubernetes/modules/aws/modules/eks/outputs.tf delete mode 100644 contrib/config/terraform/kubernetes/modules/aws/modules/eks/provider.tf delete mode 100644 contrib/config/terraform/kubernetes/modules/aws/modules/eks/variables.tf delete mode 100644 contrib/config/terraform/kubernetes/modules/aws/modules/vpc/data.tf delete mode 100644 contrib/config/terraform/kubernetes/modules/aws/modules/vpc/nacl-config.tf delete mode 100644 contrib/config/terraform/kubernetes/modules/aws/modules/vpc/outputs.tf delete mode 100644 contrib/config/terraform/kubernetes/modules/aws/modules/vpc/provider.tf delete mode 100644 contrib/config/terraform/kubernetes/modules/aws/modules/vpc/routes-config.tf delete mode 100644 contrib/config/terraform/kubernetes/modules/aws/modules/vpc/subnets-config.tf delete mode 100644 contrib/config/terraform/kubernetes/modules/aws/modules/vpc/variables.tf delete mode 100644 contrib/config/terraform/kubernetes/modules/aws/modules/vpc/versions.tf delete mode 100644 contrib/config/terraform/kubernetes/modules/aws/modules/vpc/vpc-config.tf delete mode 100644 contrib/config/terraform/kubernetes/modules/aws/outputs.tf delete mode 100644 contrib/config/terraform/kubernetes/modules/aws/variables.tf delete mode 100644 contrib/config/terraform/kubernetes/modules/dgraph/main.tf delete mode 100644 contrib/config/terraform/kubernetes/modules/dgraph/modules/alpha/.gitignore delete mode 100644 contrib/config/terraform/kubernetes/modules/dgraph/modules/alpha/main.tf delete mode 100644 contrib/config/terraform/kubernetes/modules/dgraph/modules/alpha/outputs.tf delete mode 100644 contrib/config/terraform/kubernetes/modules/dgraph/modules/alpha/provider.tf delete mode 100644 contrib/config/terraform/kubernetes/modules/dgraph/modules/alpha/templates/alpha.tpl delete mode 100644 contrib/config/terraform/kubernetes/modules/dgraph/modules/alpha/templates/alpha_init.sh delete mode 100644 contrib/config/terraform/kubernetes/modules/dgraph/modules/alpha/variables.tf delete mode 100644 contrib/config/terraform/kubernetes/modules/dgraph/modules/ratel/.gitignore delete mode 100644 contrib/config/terraform/kubernetes/modules/dgraph/modules/ratel/main.tf delete mode 100644 contrib/config/terraform/kubernetes/modules/dgraph/modules/ratel/outputs.tf delete mode 100644 contrib/config/terraform/kubernetes/modules/dgraph/modules/ratel/provider.tf delete mode 100644 contrib/config/terraform/kubernetes/modules/dgraph/modules/ratel/variables.tf delete mode 100644 contrib/config/terraform/kubernetes/modules/dgraph/modules/zero/.gitignore delete mode 100644 contrib/config/terraform/kubernetes/modules/dgraph/modules/zero/main.tf delete mode 100644 contrib/config/terraform/kubernetes/modules/dgraph/modules/zero/outputs.tf delete mode 100644 contrib/config/terraform/kubernetes/modules/dgraph/modules/zero/provider.tf delete mode 100644 contrib/config/terraform/kubernetes/modules/dgraph/modules/zero/templates/zero-ha.tpl delete mode 100644 contrib/config/terraform/kubernetes/modules/dgraph/modules/zero/templates/zero.tpl delete mode 100644 contrib/config/terraform/kubernetes/modules/dgraph/modules/zero/variables.tf delete mode 100644 contrib/config/terraform/kubernetes/modules/dgraph/outputs.tf delete mode 100644 contrib/config/terraform/kubernetes/modules/dgraph/provider.tf delete mode 100644 contrib/config/terraform/kubernetes/modules/dgraph/variables.tf delete mode 100644 contrib/config/terraform/kubernetes/outputs.tf delete mode 100644 contrib/config/terraform/kubernetes/terraform.tfvars.example delete mode 100644 contrib/config/terraform/kubernetes/variables.tf delete mode 100644 contrib/config/vault/README.md delete mode 100644 contrib/config/vault/docker/.env delete mode 100644 contrib/config/vault/docker/.gitignore delete mode 100644 contrib/config/vault/docker/README.md delete mode 100644 contrib/config/vault/docker/dgraph_alpha_config.yaml delete mode 100644 contrib/config/vault/docker/docker-compose.yaml delete mode 100644 contrib/config/vault/docker/vault/.gitkeep delete mode 100644 contrib/config/vault/docker/vault/config.hcl delete mode 100644 contrib/config/vault/docker/vault/payload_alpha_secrets.json delete mode 100644 contrib/config/vault/docker/vault/policy_admin.hcl delete mode 100644 contrib/config/vault/docker/vault/policy_dgraph.hcl delete mode 100644 contrib/docker-build/Makefile delete mode 100644 contrib/docker-build/README.md delete mode 100755 contrib/docker-build/build.sh delete mode 100644 contrib/docker-build/docker-compose.yml delete mode 100644 contrib/integration/acctupsert/.gitignore delete mode 100644 contrib/integration/acctupsert/main.go delete mode 100644 contrib/integration/bank/.gitignore delete mode 100644 contrib/integration/bank/Dockerfile delete mode 100644 contrib/integration/bank/Makefile delete mode 100644 contrib/integration/bank/main.go delete mode 100644 contrib/integration/bigdata/main.go delete mode 100644 contrib/integration/mutates/.gitignore delete mode 100644 contrib/integration/mutates/main.go delete mode 100644 contrib/integration/swap/.gitignore delete mode 100644 contrib/integration/swap/main.go delete mode 100644 contrib/integration/swap/words.go delete mode 100644 contrib/integration/testtxn/.gitignore delete mode 100644 contrib/local-test/Makefile delete mode 100644 contrib/local-test/README.md delete mode 100644 contrib/local-test/docker-compose-lambda.yml delete mode 100644 contrib/local-test/docker-compose.yml delete mode 100644 contrib/local-test/scripts/script.js delete mode 100644 contrib/manual_tests/.gitignore delete mode 100644 contrib/manual_tests/README.md delete mode 100755 contrib/manual_tests/log.sh delete mode 100755 contrib/manual_tests/test.sh delete mode 100644 contrib/scripts/README.txt delete mode 100755 contrib/scripts/cover.sh delete mode 100755 contrib/scripts/functions.sh delete mode 100755 contrib/scripts/goldendata-queries.sh delete mode 100755 contrib/scripts/install-dependencies.sh delete mode 100755 contrib/scripts/load-test.sh delete mode 100755 contrib/scripts/loader.sh delete mode 100644 contrib/scripts/queries/allof_the.in delete mode 100644 contrib/scripts/queries/allof_the_a.in delete mode 100644 contrib/scripts/queries/allof_the_first.in delete mode 100644 contrib/scripts/queries/basic.in delete mode 100644 contrib/scripts/queries/gen_anyof_good_bad.in delete mode 100644 contrib/scripts/queries/releasedate.in delete mode 100644 contrib/scripts/queries/releasedate_geq.in delete mode 100644 contrib/scripts/queries/releasedate_sort.in delete mode 100644 contrib/scripts/queries/releasedate_sort_first_offset.in delete mode 100755 contrib/scripts/transactions.sh delete mode 100644 contrib/systemd/centos/README.md delete mode 100755 contrib/systemd/centos/add_dgraph_account.sh delete mode 100644 contrib/systemd/centos/dgraph-alpha.service delete mode 100644 contrib/systemd/centos/dgraph-ui.service delete mode 100644 contrib/systemd/centos/dgraph-zero.service delete mode 100644 contrib/systemd/ha_cluster/README.md delete mode 100644 contrib/systemd/ha_cluster/dgraph-alpha.service delete mode 100644 contrib/systemd/ha_cluster/dgraph-zero-0.service delete mode 100644 contrib/systemd/ha_cluster/dgraph-zero-1.service delete mode 100644 contrib/systemd/ha_cluster/dgraph-zero-2.service delete mode 100644 contrib/systemd/ha_cluster/tests/.gitignore delete mode 100644 contrib/systemd/ha_cluster/tests/README.md delete mode 100644 contrib/systemd/ha_cluster/tests/centos8/Vagrantfile delete mode 100644 contrib/systemd/ha_cluster/tests/centos8/hosts delete mode 100755 contrib/systemd/ha_cluster/tests/centos8/provision.sh delete mode 100644 contrib/systemd/ha_cluster/tests/centos8/vagrant_helper.rb delete mode 100644 contrib/systemd/ha_cluster/tests/ubuntu1804/Vagrantfile delete mode 100644 contrib/systemd/ha_cluster/tests/ubuntu1804/hosts delete mode 100755 contrib/systemd/ha_cluster/tests/ubuntu1804/provision.sh delete mode 100644 contrib/systemd/ha_cluster/tests/ubuntu1804/vagrant_helper.rb delete mode 100644 contrib/tlstest/Makefile delete mode 100644 contrib/tlstest/README.md delete mode 100755 contrib/tlstest/alpha_notls.sh delete mode 100755 contrib/tlstest/alpha_tls.sh delete mode 100755 contrib/tlstest/alpha_tls_auth.sh delete mode 100644 contrib/tlstest/data.rdf.gz delete mode 100755 contrib/tlstest/live_notls.sh delete mode 100755 contrib/tlstest/live_tls.sh delete mode 100755 contrib/tlstest/live_tls_auth.sh delete mode 100644 contrib/tlstest/openssl.cnf delete mode 100755 contrib/tlstest/run.sh delete mode 100755 contrib/tlstest/server_11.sh delete mode 100755 contrib/tlstest/server_nopass.sh delete mode 100755 contrib/tlstest/server_nopass_client_auth.sh delete mode 100755 contrib/tlstest/server_pass.sh delete mode 100755 contrib/tlstest/test.sh delete mode 100755 contrib/tlstest/test_reload.sh delete mode 100755 contrib/wait-for-it.sh rename contrib/integration/testtxn/main_test.go => dgraph/cmd/alpha/txn_test.go (84%) delete mode 100755 test.sh diff --git a/contrib/README.md b/contrib/README.md deleted file mode 100644 index 4c7967e6d5f..00000000000 --- a/contrib/README.md +++ /dev/null @@ -1,3 +0,0 @@ -The `contrib` directory contains scripts, images, and other helpful things which are not part of the -core dgraph distribution. Please note that they could be out of date, since they do not receive the -same attention as the rest of the repository. diff --git a/contrib/config/backups/README.md b/contrib/config/backups/README.md deleted file mode 100644 index 14917d629d2..00000000000 --- a/contrib/config/backups/README.md +++ /dev/null @@ -1,17 +0,0 @@ -# Binary Backups - -These will be a collection of scripts to assist backup process for Binary Backups (Enterprise -feature). - -- Client - - [Client](client/README.md) - a client `dgraph-backup.sh` that can used to automate backups. -- Cloud Object Storage - - [Azure Blob Storage](azure/README.md) - use `minio` destination scheme with MinIO Azure Gateway - to backup to Azure Blob Storage. - - [GCS (Google Cloud Storage)](gcp/README.md) - use `minio` destination scheme with MinIO GCS - Gateway to a GCS bucket. - - [AWS S3 (Simple Storage Service)](s3/README.md) - use `s3` destination scheme to backup to an S3 - bucket. -- File Storage - - [NFS (Network File System)](nfs/README.md) - use file destination to backup to remote file - storage diff --git a/contrib/config/backups/azure/.env b/contrib/config/backups/azure/.env deleted file mode 100644 index 7626f055b55..00000000000 --- a/contrib/config/backups/azure/.env +++ /dev/null @@ -1,5 +0,0 @@ -## IMPORTANT: Though `latest` should be alright for local dev environments, -## never use `latest` for production environments as this can lead to -## inconsistent versions -DGRAPH_VERSION=latest -MINIO_VERSION=latest diff --git a/contrib/config/backups/azure/.gitignore b/contrib/config/backups/azure/.gitignore deleted file mode 100644 index 254931d4e3a..00000000000 --- a/contrib/config/backups/azure/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -# Artifacts Are Automatically Generated -minio.env diff --git a/contrib/config/backups/azure/README.md b/contrib/config/backups/azure/README.md deleted file mode 100644 index eca58310113..00000000000 --- a/contrib/config/backups/azure/README.md +++ /dev/null @@ -1,246 +0,0 @@ -# Binary Backups to Azure Blob - -Binary backups can use Azure Blob Storage for object storage using -[MinIO Azure Gateway](https://docs.min.io/docs/minio-gateway-for-azure.html). - -## Provisioning Azure Blob - -Some example scripts have been provided to illustrate how to create Azure Blob. - -- [azure_cli](azure_cli/README.md) - shell scripts to provision Azure Blob -- [terraform](terraform/README.md) - terraform scripts to provision Azure Blob - -## Setting up the Environment - -### Prerequisites - -You will need these tools: - -- Docker Environment - - [Docker](https://docs.docker.com/get-docker/) - container engine platform - - [Docker Compose](https://docs.docker.com/compose/install/) - orchestrates running dokcer - containers -- Kubernetes Environment - - [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) - required for interacting - with Kubenetes platform - - [helm](https://helm.sh/docs/intro/install/) - deploys Kuberetes packages called helm charts - - [helm-diff](https://github.com/databus23/helm-diff) [optional] - displays differences that - will be applied to Kubernetes cluster - - [helmfile](https://github.com/roboll/helmfile#installation) [optional] - orchestrates helm chart - deployments - -### Using Docker Compose - -A `docker-compose.yml` configuration is provided that will run the -[MinIO Azure Gateway](https://docs.min.io/docs/minio-gateway-for-azure.html) and Dgraph cluster. - -#### Configuring Docker Compose - -You will need to create a `minio.env` first: - -```bash -MINIO_ACCESS_KEY= -MINIO_SECRET_KEY= -``` - -These values are used to both access the -[MinIO Azure Gateway](https://docs.min.io/docs/minio-gateway-for-azure.html) using the same -credentials used to access Azure Storage Account. As a convenience, both example -[Terraform](terraform/README.md) and [azure_cli](azure_cli/README.md) scripts will auto-generate the -`minio.env`. - -```bash -## Run Minio Azure Gateway and Dgraph Cluster -docker-compose up --detach -``` - -#### Access Minio and Ratel UI - -- MinIO UI: http://localhost:9000 -- Ratel UI: http://localhost:8000 - -#### Clean Up Docker Environment - -```bash -docker-compose stop -docker-compose rm -``` - -### Using Kubernetes with Helm Charts - -For Kubernetes, you can deploy -[MinIO Azure Gateway](https://docs.min.io/docs/minio-gateway-for-azure.html), Dgraph cluster, and a -Kubernetes Cronjob that triggers backups using [helm](https://helm.sh/docs/intro/install/). - -#### Configuring Secrets Values - -These values are auto-generated if you used either [terraform](terraform/README.md) and -[azure_cli](azure_cli/README.md) scripts. If you already an existing Azure Blob you would like to -use, you will need to create `charts/dgraph_secrets.yaml` and `charts/minio_secrets.yaml` files. - -For the `charts/dgraph_secrets.yaml`, you would create a file like this: - -```yaml -backups: - keys: - minio: - access: - secret: -``` - -For the `charts/minio_secrets.yaml`, you would create a file like this: - -```yaml -accessKey: -secretKey: -``` - -#### Deploy Using Helmfile - -If you have [helmfile](https://github.com/roboll/helmfile#installation) and -[helm-diff](https://github.com/databus23/helm-diff) installed, you can deploy -[MinIO Azure Gateway](https://docs.min.io/docs/minio-gateway-for-azure.html) and Dgraph cluster with -the following: - -```bash -export BACKUP_BUCKET_NAME= # corresponds to Azure Container Name -helmfile apply -``` - -#### Deploy Using Helm - -```bash -export BACKUP_BUCKET_NAME= # corresponds to Azure Container Name -kubectl create namespace "minio" -helm repo add "minio" https://helm.min.io/ -helm install "azuregw" \ - --namespace minio \ - --values ./charts/minio_config.yaml \ - --values ./charts/minio_secrets.yaml \ - minio/minio - -helm repo add "dgraph" https://charts.dgraph.io -helm install "my-release" \ - --namespace default \ - --values ./charts/dgraph_config.yaml \ - --values ./charts/dgraph_secrets.yaml \ - --set backups.destination="minio://azuregw-minio.minio.svc:9000/${BACKUP_BUCKET_NAME}" \ - dgraph/dgraph -``` - -#### Access Resources - -For MinIO UI, you can use this to access it at http://localhost:9000: - -```bash -export MINIO_POD_NAME=$( - kubectl get pods \ - --namespace minio \ - --selector "release=azuregw" \ - --output jsonpath="{.items[0].metadata.name}" -) -kubectl --namespace minio port-forward $MINIO_POD_NAME 9000:9000 -``` - -For Dgraph Alpha, you can use this to access it at http://localhost:8080: - -```bash -export ALPHA_POD_NAME=$( - kubectl get pods \ - --namespace default \ - --selector "statefulset.kubernetes.io/pod-name=my-release-dgraph-alpha-0,release=my-release" \ - --output jsonpath="{.items[0].metadata.name}" -) -kubectl --namespace default port-forward $ALPHA_POD_NAME 8080:8080 -``` - -For Dgraph Ratel UI, you can use this to access it at http://localhost:8000: - -```bash -export RATEL_POD_NAME=$( - kubectl get pods \ - --namespace default \ - --selector "component=ratel,release=my-release" \ - --output jsonpath="{.items[0].metadata.name}" -) -kubectl --namespace default port-forward $RATEL_POD_NAME 8000:8000 -``` - -#### Cleanup Kubernetes Environment - -If you are using helmfile, you can delete the resources with: - -```bash -export BACKUP_BUCKET_NAME= # corresponds ot Azure Container Name -helmfile delete -kubectl delete pvc --selector release=my-release # release dgraph name specified in charts/helmfile.yaml -``` - -If you are just helm, you can delete the resources with: - -```bash -helm delete my-release --namespace default "my-release" # dgraph release name used earlier -kubectl delete pvc --selector release=my-release # dgraph release name used earlier -helm delete azuregw --namespace minio -``` - -## Triggering a Backup - -This is run from the host with the alpha node accessible on localhost at port `8080`. Can be done by -running the docker-compose environment, or running -`kubectl port-forward pod/dgraph-dgraph-alpha-0 8080:8080`. In the docker-compose environment, the -host for `MINIO_HOST` is `gateway`. In the Kubernetes environment, using the scripts above, the -`MINIO_HOST` is `azuregw-minio.minio.svc`. - -### Using GraphQL - -For versions of Dgraph that support GraphQL, you can use this: - -```bash -ALPHA_HOST="localhost" # hostname to connect to alpha1 container -MINIO_HOST="gateway" # hostname from alpha1 container -BACKUP_BUCKET_NAME="" # azure storage container name, e.g. dgraph-backups -BACKUP_PATH=minio://${MINIO_HOST}:9000/${BACKUP_BUCKET_NAME}?secure=false - -GRAPHQL="{\"query\": \"mutation { backup(input: {destination: \\\"$BACKUP_PATH\\\" forceFull: true}) { response { message code } } }\"}" -HEADER="Content-Type: application/json" - -curl --silent --header "$HEADER" --request POST $ALPHA_HOST:8080/admin --data "$GRAPHQL" -``` - -This should return a response in JSON that will look like this if successful: - -```JSON -{ - "data": { - "backup": { - "response": { - "message": "Backup completed.", - "code": "Success" - } - } - } -} -``` - -### Using REST API - -For earlier Dgraph versions that support the REST admin port, you can do this: - -```bash -ALPHA_HOST="localhost" # hostname to connect to alpha1 container -MINIO_HOST="gateway" # hostname from alpha1 container -BACKUP_BUCKET_NAME="" # azure storage container name, e.g. dgraph-backups -BACKUP_PATH=minio://${MINIO_HOST}:9000/${BACKUP_BUCKET_NAME}?secure=false - -curl --silent --request POST $ALPHA_HOST:8080/admin/backup?force_full=true --data "destination=$BACKUP_PATH" -``` - -This should return a response in JSON that will look like this if successful: - -```JSON -{ - "code": "Success", - "message": "Backup completed." -} -``` diff --git a/contrib/config/backups/azure/azure_cli/.gitignore b/contrib/config/backups/azure/azure_cli/.gitignore deleted file mode 100644 index 137e6783309..00000000000 --- a/contrib/config/backups/azure/azure_cli/.gitignore +++ /dev/null @@ -1 +0,0 @@ -env.sh diff --git a/contrib/config/backups/azure/azure_cli/README.md b/contrib/config/backups/azure/azure_cli/README.md deleted file mode 100644 index 0ce1d2fe974..00000000000 --- a/contrib/config/backups/azure/azure_cli/README.md +++ /dev/null @@ -1,76 +0,0 @@ -# Provisioning Azure Blob with Azure CLI - -## About - -This script will create the required resources needed to create Azure Blob Storage using -(`simple-azure-blob`)[https://github.com/darkn3rd/simple-azure-blob] module. - -## Prerequisites - -You need the following installed to use this automation: - -- [Azure CLI](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli?view=azure-cli-latest) - with an active Azure subscription configured. -- [jq](https://stedolan.github.io/jq/) - command-line JSON process that makes it easy to parse JSON - output from Azure CLI. -- [bash](https://www.gnu.org/software/bash/) - shell environment - -## Configuration - -You will need to define these environment variables: - -- Required Variables: - - `MY_RESOURCE_GROUP` (required) - Azure resource group that contains the resources. If the - resource group does not exist, this script will create it. - - `MY_STORAGE_ACCT` (required) - Azure storage account (unique global name) to contain storage. If - the storage account does not exist, this script will create it. - - `MY_CONTAINER_NAME` (required) - Azure container to host the blob storage. -- Optional Variables: - - `MY_LOCATION` (default = `eastus2`)- the location where to create the resource group if it - doesn't exist - -## Steps - -### Define Variables - -You can create a `env.sh` with the desired values, for example: - -```bash -cat <<-EOF > env.sh -export MY_RESOURCE_GROUP="my-organization-resources" -export MY_STORAGE_ACCT="myorguniquestorage12345" -export MY_CONTAINER_NAME="my-backups" -EOF -``` - -### Run the Script - -```bash -## source env vars setup earlier -. env.sh -./create_blob.sh -``` - -## Cleanup - -You can run these commands to delete the resources (with prompts) on Azure. - -```bash -## source env vars setup earlier -. env.sh - -if az storage account list | jq '.[].name' -r | grep -q ${MY_STORAGE_ACCT}; then - az storage container delete \ - --account-name ${MY_STORAGE_ACCT} \ - --name ${MY_CONTAINER_NAME} \ - --auth-mode login - - az storage account delete \ - --name ${MY_STORAGE_ACCT} \ - --resource-group ${MY_RESOURCE_GROUP} -fi - -if az group list | jq '.[].name' -r | grep -q ${MY_RESOURCE_GROUP}; then - az group delete --name=${MY_RESOURCE_GROUP} -fi -``` diff --git a/contrib/config/backups/azure/azure_cli/create_blob.sh b/contrib/config/backups/azure/azure_cli/create_blob.sh deleted file mode 100755 index 4f7909d9cc0..00000000000 --- a/contrib/config/backups/azure/azure_cli/create_blob.sh +++ /dev/null @@ -1,135 +0,0 @@ -#!/usr/bin/env bash - -##### -# main -################## -main() { - check_environment $@ - create_resource_group - create_storage_acct - authorize_ad_user - create_storage_container - create_config_files -} - -##### -# check_environment -################## -check_environment() { - ## Check for Azure CLI command - command -v az >/dev/null || - { - echo "[ERROR]: 'az' command not not found" 1>&2 - exit 1 - } - command -v jq >/dev/null || - { - echo "[ERROR]: 'jq' command not not found" 1>&2 - exit 1 - } - - ## Defaults - MY_CONTAINER_NAME=${MY_CONTAINER_NAME:-$1} - MY_STORAGE_ACCT=${MY_STORAGE_ACCT:-""} - MY_RESOURCE_GROUP=${MY_RESOURCE_GROUP:=""} - MY_LOCATION=${MY_LOCATION:-"eastus2"} - MY_ACCOUNT_ID="$(az account show | jq '.id' -r)" - CREATE_MINIO_ENV=${CREATE_MINIO_ENV:-"true"} - CREATE_MINIO_CHART_SECRETS=${CREATE_MINIO_CHART_SECRETS:-"true"} - CREATE_DGRAPH_CHART_SECRETS=${CREATE_DGRAPH_CHART_SECRETS:-"true"} - - if [[ -z ${MY_CONTAINER_NAME} ]]; then - if (($# < 1)); then - printf "[ERROR]: Need at least one parameter or define 'MY_CONTAINER_NAME'\n\n" 1>&2 - printf "Usage:\n\t$0 \n\tMY_CONTAINER_NAME= $0\n" 1>&2 - exit 1 - fi - fi - - if [[ -z ${MY_STORAGE_ACCT} ]]; then - printf "[ERROR]: The env var of 'MY_STORAGE_ACCT' was not defined. Exiting\n" 1>&2 - exit 1 - fi - - if [[ -z ${MY_RESOURCE_GROUP} ]]; then - printf "[ERROR]: The env var of 'MY_RESOURCE_GROUP' was not defined. Exiting\n" 1>&2 - exit 1 - fi -} - -##### -# create_resource_group -################## -create_resource_group() { - ## create resource (idempotently) - if ! az group list | jq '.[].name' -r | grep -q "${MY_RESOURCE_GROUP}"; then - echo "[INFO]: Creating Resource Group '${MY_RESOURCE_GROUP}' at Location '${MY_LOCATION}'" - az group create --name="${MY_RESOURCE_GROUP}" --location="${MY_LOCATION}" >/dev/null - fi -} - -##### -# create_storage_acct -################## -create_storage_acct() { - ## create globally unique storage account (idempotently) - if ! az storage account list | jq '.[].name' -r | grep -q "${MY_STORAGE_ACCT}"; then - echo "[INFO]: Creating Storage Account '${MY_STORAGE_ACCT}'" - az storage account create \ - --name "${MY_STORAGE_ACCT}" \ - --resource-group "${MY_RESOURCE_GROUP}" \ - --location "${MY_LOCATION}" \ - --sku Standard_ZRS \ - --encryption-services blob >/dev/null - fi -} - -##### -# authorize_ad_user -################## -authorize_ad_user() { - ## Use Azure AD Account to Authorize Operation - az ad signed-in-user show --query objectId -o tsv | az role assignment create \ - --role "Storage Blob Data Contributor" \ - --assignee @- \ - --scope "/subscriptions/${MY_ACCOUNT_ID}/resourceGroups/${MY_RESOURCE_GROUP}/providers/Microsoft.Storage/storageAccounts/${MY_STORAGE_ACCT}" >/dev/null -} - -##### -# create_storage_container -################## -create_storage_container() { - ## Create Container Using Credentials - if ! az storage container list \ - --account-name "${MY_STORAGE_ACCT}" \ - --auth-mode login | jq '.[].name' -r | grep -q "${MY_CONTAINER_NAME}"; then - echo "[INFO]: Creating Storage Container '${MY_CONTAINER_NAME}'" - az storage container create \ - --account-name "${MY_STORAGE_ACCT}" \ - --name "${MY_CONTAINER_NAME}" \ - --auth-mode login >/dev/null - fi -} - -##### -# create_config_files -################## -create_config_files() { - ## Create Minio env file and Helm Chart secret files - if [[ ${CREATE_MINIO_ENV} =~ true|(y)es ]]; then - echo "[INFO]: Creating Docker Compose 'minio.env' file" - ./create_secrets.sh minio_env - fi - - if [[ ${CREATE_MINIO_CHART_SECRETS} =~ true|(y)es ]]; then - echo "[INFO]: Creating Helm Chart 'minio_secrets.yaml' file" - ./create_secrets.sh minio_chart - fi - - if [[ ${CREATE_DGRAPH_CHART_SECRETS} =~ true|(y)es ]]; then - echo "[INFO]: Creating Helm Chart 'dgraph_secrets.yaml' file" - ./create_secrets.sh dgraph_chart - fi -} - -main $@ diff --git a/contrib/config/backups/azure/azure_cli/create_secrets.sh b/contrib/config/backups/azure/azure_cli/create_secrets.sh deleted file mode 100755 index ba126e6dafe..00000000000 --- a/contrib/config/backups/azure/azure_cli/create_secrets.sh +++ /dev/null @@ -1,104 +0,0 @@ -#!/usr/bin/env bash - -##### -# main -################## -main() { - check_environment $@ - - ## Fetch Secrets from Azure - get_secrets - - ## Create Configuration with Secrets - case $1 in - minio_env) - create_minio_env - ;; - minio_chart) - create_minio_secrets - ;; - dgraph_chart) - create_dgraph_secrets - ;; - esac -} - -##### -# check_environment -################## -check_environment() { - ## Check for Azure CLI command - command -v az >/dev/null || - { - echo "[ERROR]: 'az' command not not found" 1>&2 - exit 1 - } - command -v jq >/dev/null || - { - echo "[ERROR]: 'jq' command not not found" 1>&2 - exit 1 - } - - MY_STORAGE_ACCT=${MY_STORAGE_ACCT:-""} - MY_RESOURCE_GROUP=${MY_RESOURCE_GROUP:=""} - - if [[ -z ${MY_STORAGE_ACCT} ]]; then - printf "[ERROR]: The env var of 'MY_STORAGE_ACCT' was not defined. Exiting\n" 1>&2 - exit 1 - fi - - if [[ -z ${MY_RESOURCE_GROUP} ]]; then - printf "[ERROR]: The env var of 'MY_RESOURCE_GROUP' was not defined. Exiting\n" 1>&2 - exit 1 - fi -} - -##### -# get_secrets -################## -get_secrets() { - CONN_STR=$( - az storage account show-connection-string \ - --name "${MY_STORAGE_ACCT}" \ - --resource-group "${MY_RESOURCE_GROUP}" | - jq .connectionString -r - ) - - export MINIO_SECRET_KEY=$(grep -oP '(?<=AccountKey=).*' <<<"${CONN_STR}") - export MINIO_ACCESS_KEY=$(grep -oP '(?<=AccountName=)[^;]*' <<<"${CONN_STR}") -} - -##### -# create_minio_env -################## -create_minio_env() { - cat <<-EOF >../minio.env - MINIO_SECRET_KEY=$(grep -oP '(?<=AccountKey=).*' <<<"${CONN_STR}") - MINIO_ACCESS_KEY=$(grep -oP '(?<=AccountName=)[^;]*' <<<"${CONN_STR}") - EOF -} - -##### -# create_minio_secrets -################## -create_minio_secrets() { - cat <<-EOF >../charts/minio_secrets.yaml - accessKey: ${MINIO_ACCESS_KEY} - secretKey: ${MINIO_SECRET_KEY} - EOF -} - -##### -# create_dgraph_secrets -################## -create_dgraph_secrets() { - cat <<-EOF >../charts/dgraph_secrets.yaml - backups: - keys: - minio: - access: ${MINIO_ACCESS_KEY} - secret: ${MINIO_SECRET_KEY} - EOF -} - -main $@ diff --git a/contrib/config/backups/azure/charts/.gitignore b/contrib/config/backups/azure/charts/.gitignore deleted file mode 100644 index f4b6b916ec4..00000000000 --- a/contrib/config/backups/azure/charts/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -minio_secrets.yaml -dgraph_secrets.yaml diff --git a/contrib/config/backups/azure/charts/dgraph_config.yaml b/contrib/config/backups/azure/charts/dgraph_config.yaml deleted file mode 100644 index 83fe869f53d..00000000000 --- a/contrib/config/backups/azure/charts/dgraph_config.yaml +++ /dev/null @@ -1,9 +0,0 @@ -backups: - full: - enabled: true - debug: true - schedule: "*/15 * * * *" -alpha: - configFile: - config.hcl: | - whitelist = "10.0.0.0/8,172.0.0.0/8,192.168.0.0/16,127.0.0.1" diff --git a/contrib/config/backups/azure/charts/helmfile.yaml b/contrib/config/backups/azure/charts/helmfile.yaml deleted file mode 100644 index 7a2d7176c71..00000000000 --- a/contrib/config/backups/azure/charts/helmfile.yaml +++ /dev/null @@ -1,28 +0,0 @@ -repositories: - - name: minio - url: https://helm.min.io/ - - name: dgraph - url: https://charts.dgraph.io - -releases: - - name: azuregw - namespace: minio - chart: minio/minio - version: 6.3.1 - values: - - minio_config.yaml - ## generated by terraform or azure cli shell scripts - - minio_secrets.yaml - - - name: my-release - namespace: default - chart: dgraph/dgraph - version: 0.0.11 - values: - - ./dgraph_config.yaml - ## generated by terraform or azure cli shell scripts - - ./dgraph_secrets.yaml - ## minio server configured - - backups: - ## Format -minio.namespace.svc:9000/ - destination: minio://azuregw-minio.minio.svc:9000/{{ requiredEnv "BACKUP_BUCKET_NAME" }} diff --git a/contrib/config/backups/azure/charts/minio_config.yaml b/contrib/config/backups/azure/charts/minio_config.yaml deleted file mode 100644 index a99a078d4ee..00000000000 --- a/contrib/config/backups/azure/charts/minio_config.yaml +++ /dev/null @@ -1,8 +0,0 @@ -image: - repository: minio/minio - tag: RELEASE.2020-09-17T04-49-20Z -persistence: - enabled: false -azuregateway: - enabled: true - replicas: 1 diff --git a/contrib/config/backups/azure/docker-compose.yml b/contrib/config/backups/azure/docker-compose.yml deleted file mode 100644 index f4aefa542d4..00000000000 --- a/contrib/config/backups/azure/docker-compose.yml +++ /dev/null @@ -1,37 +0,0 @@ -version: "3.5" -services: - zero1: - image: dgraph/dgraph:${DGRAPH_VERSION} - container_name: zero1 - working_dir: /data/zero1 - ports: - - 5080:5080 - - 6080:6080 - command: dgraph zero --my=zero1:5080 --replicas 1 --raft="idx=1;" - - alpha1: - image: dgraph/dgraph:${DGRAPH_VERSION} - container_name: alpha1 - working_dir: /data/alpha1 - env_file: - - minio.env - ports: - - 8080:8080 - - 9080:9080 - command: dgraph alpha --my=alpha1:7080 --zero=zero1:5080 - --security "whitelist=10.0.0.0/8,172.0.0.0/8,192.168.0.0/16,127.0.0.1;" - - ratel: - image: dgraph/ratel:${DGRAPH_VERSION} - ports: - - 8000:8000 - container_name: ratel - - minio: - image: minio/minio:${MINIO_VERSION} - command: gateway azure - container_name: gateway - env_file: - - minio.env - ports: - - 9000:9000 diff --git a/contrib/config/backups/azure/helmfile.yaml b/contrib/config/backups/azure/helmfile.yaml deleted file mode 100644 index 78b0eeffd54..00000000000 --- a/contrib/config/backups/azure/helmfile.yaml +++ /dev/null @@ -1,2 +0,0 @@ -helmfiles: - - ./charts/helmfile.yaml diff --git a/contrib/config/backups/azure/terraform/.gitignore b/contrib/config/backups/azure/terraform/.gitignore deleted file mode 100644 index 578082f808e..00000000000 --- a/contrib/config/backups/azure/terraform/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -# terraform files -terraform.tfvars -.terraform -*.tfstate* diff --git a/contrib/config/backups/azure/terraform/README.md b/contrib/config/backups/azure/terraform/README.md deleted file mode 100644 index c3384ec224d..00000000000 --- a/contrib/config/backups/azure/terraform/README.md +++ /dev/null @@ -1,59 +0,0 @@ -# Azure Blob with Terraform - -## About - -This script will create the required resources needed to create Azure Blob Storage using -[`simple-azure-blob`](https://github.com/darkn3rd/simple-azure-blob) module. - -## Prerequisites - -You need the following installed to use this automation: - -- [Azure CLI](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli?view=azure-cli-latest) - with an active Azure subscription configured. -- [Terraform](https://www.terraform.io/downloads.html) tool that is used to provision resources and - create configuration files from templates - -## Configuration - -You will need to define the following variables: - -- Required Variables: - - `resource_group_name` (required) - Azure resource group that contains the resources - - `storage_account_name` (required) - Azure storage account (unique global name) to contain - storage - - `storage_container_name` (default = `dgraph-backups`) - Azure container to host the blob storage - -## Steps - -### Define Variables - -You can define these when prompted, or in `terrafrom.tfvars` file, or through command line -variables, e.g. `TF_VAR_resource_group_name`, `TF_VAR_storage_account_name`. - -```terraform -# terraform.tfvars -resource_group_name = "my-organization-resources" -storage_account_name = "myorguniquestorage12345" -``` - -### Download Plugins and Modules - -```bash -terraform init -``` - -### Prepare and Provision Resources - -```bash -## get a list of changes that will be made -terraform plan -## apply the changes -terraform apply -``` - -## Cleanup - -```bash -terraform destroy -``` diff --git a/contrib/config/backups/azure/terraform/main.tf b/contrib/config/backups/azure/terraform/main.tf deleted file mode 100644 index 3d276eead56..00000000000 --- a/contrib/config/backups/azure/terraform/main.tf +++ /dev/null @@ -1,55 +0,0 @@ -variable "resource_group_name" {} -variable "storage_account_name" {} -variable "storage_container_name" { default = "dgraph-backups" } -variable "create_minio_env" { default = true } -variable "create_minio_secrets" { default = true } -variable "create_dgraph_secrets" { default = true } - -## Create a Resource Group, a Storage Account, and a Storage Container -module "dgraph_backups" { - source = "git::https://github.com/darkn3rd/simple-azure-blob.git?ref=v0.1" - resource_group_name = var.resource_group_name - create_resource_group = true - storage_account_name = var.storage_account_name - create_storage_account = true - storage_container_name = var.storage_container_name -} - -##################################################################### -# Locals -##################################################################### - -locals { - minio_vars = { - accessKey = module.dgraph_backups.AccountName - secretKey = module.dgraph_backups.AccountKey - } - - dgraph_secrets = templatefile("${path.module}/templates/dgraph_secrets.yaml.tmpl", local.minio_vars) - minio_secrets = templatefile("${path.module}/templates/minio_secrets.yaml.tmpl", local.minio_vars) - minio_env = templatefile("${path.module}/templates/minio.env.tmpl", local.minio_vars) -} - -##################################################################### -# File Resources -##################################################################### -resource "local_file" "minio_env" { - count = var.create_minio_env != "" ? 1 : 0 - content = local.minio_env - filename = "${path.module}/../minio.env" - file_permission = "0644" -} - -resource "local_file" "minio_secrets" { - count = var.create_minio_secrets != "" ? 1 : 0 - content = local.minio_secrets - filename = "${path.module}/../charts/minio_secrets.yaml" - file_permission = "0644" -} - -resource "local_file" "dgraph_secrets" { - count = var.create_dgraph_secrets != "" ? 1 : 0 - content = local.dgraph_secrets - filename = "${path.module}/../charts/dgraph_secrets.yaml" - file_permission = "0644" -} diff --git a/contrib/config/backups/azure/terraform/provider.tf b/contrib/config/backups/azure/terraform/provider.tf deleted file mode 100644 index 7fbc7d19fa7..00000000000 --- a/contrib/config/backups/azure/terraform/provider.tf +++ /dev/null @@ -1,4 +0,0 @@ -provider "azurerm" { - version = "=2.20.0" - features {} -} diff --git a/contrib/config/backups/azure/terraform/templates/dgraph_secrets.yaml.tmpl b/contrib/config/backups/azure/terraform/templates/dgraph_secrets.yaml.tmpl deleted file mode 100644 index f4f39d7b732..00000000000 --- a/contrib/config/backups/azure/terraform/templates/dgraph_secrets.yaml.tmpl +++ /dev/null @@ -1,5 +0,0 @@ -backups: - keys: - minio: - access: ${accessKey} - secret: ${secretKey} diff --git a/contrib/config/backups/azure/terraform/templates/minio.env.tmpl b/contrib/config/backups/azure/terraform/templates/minio.env.tmpl deleted file mode 100644 index 3f3bec38553..00000000000 --- a/contrib/config/backups/azure/terraform/templates/minio.env.tmpl +++ /dev/null @@ -1,2 +0,0 @@ -MINIO_ACCESS_KEY=${accessKey} -MINIO_SECRET_KEY=${secretKey} diff --git a/contrib/config/backups/azure/terraform/templates/minio_secrets.yaml.tmpl b/contrib/config/backups/azure/terraform/templates/minio_secrets.yaml.tmpl deleted file mode 100644 index 1d159bf0cad..00000000000 --- a/contrib/config/backups/azure/terraform/templates/minio_secrets.yaml.tmpl +++ /dev/null @@ -1,2 +0,0 @@ -accessKey: ${accessKey} -secretKey: ${secretKey} diff --git a/contrib/config/backups/client/.gitignore b/contrib/config/backups/client/.gitignore deleted file mode 100644 index 2396f857981..00000000000 --- a/contrib/config/backups/client/.gitignore +++ /dev/null @@ -1,10 +0,0 @@ -backups/* -logs/* -!data/acl/ -!data/enc/ -!data/token/ -!**/.gitkeep -.env -!/data/backup.sh -!/data/backup_helper.sh -data/* diff --git a/contrib/config/backups/client/README.md b/contrib/config/backups/client/README.md deleted file mode 100644 index 1582d628a26..00000000000 --- a/contrib/config/backups/client/README.md +++ /dev/null @@ -1,274 +0,0 @@ -# Backup Script - -This backup script that supports many of the features in Dgraph, such as ACLs, Mutual TLS, REST or -GraphQL API. See `./dgraph-backup.sh --help` for all of the options. - -## Requirements - -- The scripts (`dgraph-backup.sh` and `compose-setup.sh`) require the following tools to run - properly: - - GNU `bash` - - GNU `getopt` - - GNU `grep` -- These scripts were tested on the following environments: - - macOS with Homebrew [gnu-getopt](https://formulae.brew.sh/formula/gnu-getopt) bottle and - [grep](https://formulae.brew.sh/formula/grep) bottle, - - [Ubuntu 20.04.1 (Focal Fossa)](https://releases.ubuntu.com/20.04/) (any modern Linux distro - should work, such as the [dgraph/dgraph](https://hub.docker.com/r/dgraph/dgraph/) docker - container), and - - Windows with [MSYS2](https://www.msys2.org/). -- For the test demo environment, both [docker](https://docs.docker.com/engine/) and - [docker-compose](https://docs.docker.com/compose/) are required. - -† Some versions of macOS 10.x do not include have a compatible version of `grep`. You need to have -GNU grep in the path for this script to work. - -## Important Notes - -If you are using this script on a system other than alpha, we'll call this _backup workstation_, you -should be aware of the following: - -- **General** - - the _backup workstation_ will need to have access to the alpha server, e.g. `localhost:8080` -- **TLS** - - when accessing alpha server secured by TLS, the _backup workstation_ will need access to - `ca.crt` created with `dgraph cert` in the path. - - if Mutual TLS is used, the _backup workstation_ will also need access to the client cert and key - in the path. -- **`subpath` option** - - when specifying sub-path that uses a datestamp, the _backup workstation_ needs to have the same - timestamp as the alpha server. - - when backing up to a file path, such as NFS, the _backup workstation_ will need access to the - same file path at the same mount point, e.g. if `/dgraph/backups` is used on alpha, the same - path `/dgraph/backups` has to be accessible on the _backup workstation_ - -## Demo (Test) with local file path - -You can try out these features using [Docker Compose](https://docs.docker.com/compose/). There's a -`./compose-setup.sh` script that can configure the environment with the desired features. As you -need to have a common shared directory for file paths, you can use `ratel` container as the _backup -workstation_ to run the backup script. - -As an example of performing backups with a local mounted file path using ACLs, Encryption, and TLS, -you can follow these steps: - -1. Setup Environment and log into _backup workstation_ (ratel container): - - ```bash - ## configure docker-compose environment - ./compose-setup.sh --acl --enc --tls --make_tls_cert - ## run demo - docker-compose up --detach - ## login into Ratel to use for backups - docker exec --tty --interactive ratel bash - ``` - -2. Trigger a full backup: - - ```bash - ## trigger a backup on alpha1:8080 - ./dgraph-backup.sh \ - --alpha alpha1:8080 \ - --tls_cacert /dgraph/tls/ca.crt \ - --force_full \ - --location /dgraph/backups \ - --user groot \ - --password password - ``` - -3. Verify Results - - ```bash - ## check for backup files - ls /dgraph/backups - ``` - -4. Logout of the Ratel container - - ```bash - exit - ``` - -5. Cleanup when finished - - ```bash - docker-compose stop && docker-compose rm - ``` - -### Demo (Test) with S3 Buckets - -This will have requirements for [Terraform](https://www.terraform.io/) and -[AWS CLI](https://aws.amazon.com/cli/). See [s3/README.md](../s3/README.md) for further information. -Because we do not need to share the same file path, we can use the host as the _backup workstation_: - -1. Setup the S3 Bucket environment. Make sure to replace `` to an - appropriate name. - - ```bash - ## create the S3 Bucket + Credentials - pushd ../s3/terraform - cat <<-TFVARS > terraform.tfvars - name = "" - region = "us-west-2" - TFVARS - terraform init && terraform apply - cd .. - ## start Dgraph cluster with S3 bucket support - docker-compose up --detach - ## set $BACKUP_PATH env var for triggering backups - source env.sh - popd - ``` - -2. Trigger a backup - - ```bash - ./dgraph-backup.sh \ - --alpha localhost:8080 \ - --force_full \ - --location $BACKUP_PATH - ``` - -3. Verify backups were finished - - ```bash - aws s3 ls s3://${BACKUP_PATH##*/} - ``` - -4. Clean up when completed: - - ```bash - ## remove the local Dgraph cluster - pushd ../s3 - docker-compose stop && docker-compose rm - - ## empty the bucket of contents - aws s3 rm s3://${BACKUP_PATH##*/}/ --recursive - - ## destroy the s3 bucket and IAM user - cd terraform - terraform destroy - - popd - ``` - -### Demo (Test) with GCP via Minio Gateway - -This will have requirements for [Terraform](https://www.terraform.io/) and -[Google Cloud SDK](https://cloud.google.com/sdk). See [gcp/README.md](../gcp/README.md) for further -information. Because we do not need to share the same file path, we can use the host as the _backup -workstation_: - -1. Setup the GCS Bucket environment. Make sure to replace `` and - ` terraform.tfvars - region = "us-central1" - project_id = "" - name = "" - TFVARS - terraform init && terraform apply - cd .. - ## set $PROJECT_ID and $BACKUP_BUCKET_NAME env vars - source env.sh - ## start the Dgraph cluster with MinIO Gateway support - docker-compose up --detach - - popd - ``` - -2. Trigger a full backup - - ```bash - ./dgraph-backup.sh \ - --alpha localhost:8080 \ - --force_full \ - --location minio://gateway:9000/${BACKUP_BUCKET_NAME} - ``` - -3. Verify backups were created - - ```bash - gsutil ls gs://${BACKUP_BUCKET_NAME}/ - ``` - -4. Clean up when finished: - - ```bash - ## remove the local Dgraph cluster - pushd ../gcp - docker-compose stop && docker-compose rm - - ## empty the bucket contents - gsutil rm -r gs://${BACKUP_BUCKET_NAME}/* - - ## destroy the gcs bucket and google service account - cd terraform - terraform destroy - - popd - ``` - -### Demo (Test) with Azure Blob via Minio Gateway - -This will have requirements for [Terraform](https://www.terraform.io/) and -[Azure CLI](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli). See -[azure/README.md](../azure/README.md) for further information. Because we do not need to share the -same file path, we can use the host as the _backup workstation_: - -1. Setup Azure Storage Blob environment. Replace ``, - ``, and `` to something - appropriate. - - ```bash - ## create Resource Group, Storage Account, authorize Storage Account, Create Storage Container - pushd ../azure/terraform - export STORAGE_ACCOUNT_NAME="" - export CONTAINER_NAME="" - cat <<-TFVARS > terraform.tfvars - resource_group_name = "" - storage_account_name = "$STORAGE_ACCOUNT_NAME" - storage_container_name = "$CONTAINER_NAME" - TFVARS - terraform init && terraform apply - cd .. - ## start the Dgraph cluster with MinIO Gateway support - docker-compose up --detach - - popd - ``` - -2. Trigger a backup - - ```bash - ./dgraph-backup.sh \ - --alpha localhost:8080 \ - --force_full \ - --location minio://gateway:9000/${CONTAINER_NAME} - ``` - -3. Verify backups were created - - ```bash - az storage blob list \ - --account-name ${STORAGE_ACCOUNT_NAME} \ - --container-name ${CONTAINER_NAME} \ - --output table - ``` - -4. Clean up when finished: - - ```bash - ## remove the local Dgraph cluster - pushd ../azure - docker-compose stop && docker-compose rm - - ## destroy the storage account, the storage container, and the resource group - cd terraform - terraform destroy - - popd - ``` diff --git a/contrib/config/backups/client/backup_helper.sh b/contrib/config/backups/client/backup_helper.sh deleted file mode 100644 index 1cb1d422c38..00000000000 --- a/contrib/config/backups/client/backup_helper.sh +++ /dev/null @@ -1,234 +0,0 @@ -###### -## backup_helper.sh - general purpose shell script library used to support -## Dgraph binary backups enterprise feature. -########################## - -###### -# get_token_rest - get accessJWT token with REST command for Dgraph 1.x -########################## -get_token_rest() { - JSON="{\"userid\": \"${USER}\", \"password\": \"${PASSWORD}\" }" - RESULT=$( - /usr/bin/curl --silent \ - "${HEADERS[@]}" \ - "${CERTOPTS[@]}" \ - --request POST \ - "${ALPHA_HOST}"/login \ - --data "${JSON}" - ) - - if grep -q errors <<<"${RESULT}"; then - ERROR=$(grep -oP '(?<=message":")[^"]*' <<<"${RESULT}") - echo "ERROR: ${ERROR}" - return 1 - fi - - grep -oP '(?<=accessJWT":")[^"]*' <<<"${RESULT}" - -} - -###### -# get_token_graphql - get accessJWT token using GraphQL for Dgraph 20.03.1+ -########################## -get_token_graphql() { - GQL="{\"query\": \"mutation { login(userId: \\\"${USER}\\\" password: \\\"${PASSWORD}\\\") { response { accessJWT } } }\"}" - RESULT=$( - /usr/bin/curl --silent \ - "${HEADERS[@]}" \ - "${CERTOPTS[@]}" \ - --request POST \ - "${ALPHA_HOST}"/admin \ - --data "${GQL}" - ) - - if grep -q errors <<<"${RESULT}"; then - ERROR=$(grep -oP '(?<=message":")[^"]*' <<<"${RESULT}") - echo "ERROR: ${ERROR}" - return 1 - fi - - grep -oP '(?<=accessJWT":")[^"]*' <<<"${RESULT}" - -} - -###### -# get_token - get accessJWT using GraphQL /admin or REST /login -# params: -# 1: user (required) -# 2: password (required) -# envvars: -# ALPHA_HOST (default: localhost:8080) - dns name of dgraph alpha node -# CACERT_PATH - path to dgraph root ca (e.g. ca.crt) if TLS is enabled -# CLIENT_CERT_PATH - path to client cert (e.g. client.dgraphuser.crt) for client TLS -# CLIENT_KEY_PATH - path to client cert (e.g. client.dgraphuser.key) for client TLS -########################## -get_token() { - USER=${1} - PASSWORD=${2} - AUTH_TOKEN=${3:-""} - CACERT_PATH=${CACERT_PATH:-""} - CLIENT_CERT_PATH=${CLIENT_CERT_PATH:-""} - CLIENT_KEY_PATH=${CLIENT_KEY_PATH:-""} - - ## user/password required for login - if [[ -z ${USER} || -z ${PASSWORD} ]]; then - return 1 - fi - - if [[ -n ${AUTH_TOKEN} ]]; then - HEADERS+=('--header' "X-Dgraph-AuthToken: ${AUTH_TOKEN}") - fi - - if [[ -n ${CACERT_PATH} ]]; then - CERTOPTS+=('--cacert' "${CACERT_PATH}") - if [[ ! -z ${CLIENT_CERT_PATH} || -n ${CLIENT_KEY_PATH} ]]; then - CERTOPTS+=( - '--cert' "${CLIENT_CERT_PATH}" - '--key' "${CLIENT_KEY_PATH}" - ) - fi - ALPHA_HOST=https://${ALPHA_HOST:-"localhost:8080"} - else - ALPHA_HOST=${ALPHA_HOST:-"localhost:8080"} - fi - - API_TYPE=${API_TYPE:-"graphql"} - if [[ ${API_TYPE} == "graphql" ]]; then - HEADERS+=('--header' "Content-Type: application/json") - get_token_graphql - else - get_token_rest - fi -} - -###### -# backup - trigger binary backup GraphQL /admin or REST /login -# params: -# 1: token (optional) - if ACL enabled pass token from get_token() -# envvars: -# BACKUP_DESTINATION (required) - filepath ("/path/to/backup"), s3://, or minio:// -# ALPHA_HOST (default: localhost:8080) - dns name of dgraph alpha node -# MINIO_SECURE (default: false) - set to true if minio service supports https -# FORCE_FULL (default: false) - set to true if forcing a full backup -# CACERT_PATH - path to dgraph root ca (e.g. ca.crt) if TLS is enabled -# CLIENT_CERT_PATH - path to client cert (e.g. client.dgraphuser.crt) for client TLS -# CLIENT_KEY_PATH - path to client cert (e.g. client.dgraphuser.key) for client TLS -########################## -backup() { - ACCESS_TOKEN=${1:-""} - AUTH_TOKEN=${2:-""} - CACERT_PATH=${CACERT_PATH:-""} - CLIENT_CERT_PATH=${CLIENT_CERT_PATH:-""} - CLIENT_KEY_PATH=${CLIENT_KEY_PATH:-""} - - API_TYPE=${API_TYPE:-"graphql"} - - MINIO_SECURE=${MINIO_SECURE:-"false"} - FORCE_FULL=${FORCE_FULL:-"false"} - - [[ -z ${BACKUP_DESTINATION} ]] && - { - echo "'BACKUP_DESTINATION' is not set. Exiting" >&2 - return 1 - } - - if [[ -n ${ACCESS_TOKEN} ]]; then - HEADERS+=('--header' "X-Dgraph-AccessToken: ${ACCESS_TOKEN}") - fi - - if [[ -n ${AUTH_TOKEN} ]]; then - HEADERS+=('--header' "X-Dgraph-AuthToken: ${AUTH_TOKEN}") - fi - - if [[ -n ${CACERT_PATH} ]]; then - CERTOPTS+=('--cacert' "${CACERT_PATH}") - if [[ ! -z ${CLIENT_CERT_PATH} || -n ${CLIENT_KEY_PATH} ]]; then - CERTOPTS+=( - '--cert' "${CLIENT_CERT_PATH}" - '--key' "${CLIENT_KEY_PATH}" - ) - fi - ALPHA_HOST=https://${ALPHA_HOST:-"localhost:8080"} - else - ALPHA_HOST=${ALPHA_HOST:-"localhost:8080"} - fi - - ## Configure destination with date stamp folder - BACKUP_DESTINATION="${BACKUP_DESTINATION}/${SUBPATH}" - ## Configure Minio Configuration - if [[ ${MINIO_SECURE} == "false" && ${BACKUP_DESTINATION} =~ ^minio ]]; then - BACKUP_DESTINATION="${BACKUP_DESTINATION}?secure=false" - fi - - ## Create date-stamped directory for file system - if [[ ! ${BACKUP_DESTINATION} =~ ^minio|^s3 ]]; then - ## Check destination directory exist - if [[ -d ${BACKUP_DESTINATION%/*} ]]; then - mkdir -p "${BACKUP_DESTINATION}" - else - echo "Designated Backup Destination '${BACKUP_DESTINATION%/*}' does not exist. Aborting." - return 1 - fi - fi - - if [[ ${API_TYPE} == "graphql" ]]; then - HEADERS+=('--header' "Content-Type: application/json") - backup_graphql - else - backup_rest - fi - -} - -###### -# backup_rest - trigger backup using REST command for Dgraph 1.x -########################## -backup_rest() { - URL_PATH="admin/backup?force_full=${FORCE_FULL}" - - RESULT=$( - /usr/bin/curl --silent \ - "${HEADERS[@]}" \ - "${CERTOPTS[@]}" \ - --request POST \ - "${ALPHA_HOST}"/"${URL_PATH}" \ - --data "destination=${BACKUP_DESTINATION}" - ) - - if grep -q errors <<<"${RESULT}"; then - ERROR=$(grep -oP '(?<=message":")[^"]*' <<<"${RESULT}") - MESSAGE="ERROR: ${ERROR}" - if grep -q code <<<"${RESULT}"; then - CODE=$(grep -oP '(?<=code":")[^"]*' <<<"${RESULT}") - echo "${MESSAGE} REASON='${CODE}'" - fi - return 1 - fi - - echo "${RESULT}" - -} - -###### -# backup_graphql - trigger backup using GraphQL for Dgraph 20.03.1+ -########################## -backup_graphql() { - GQL="{\"query\": \"mutation { backup(input: {destination: \\\"${BACKUP_DESTINATION}\\\" forceFull: ${FORCE_FULL} }) { response { message code } } }\"}" - - RESULT=$( - /usr/bin/curl --silent \ - "${HEADERS[@]}" \ - "${CERTOPTS[@]}" \ - --request POST \ - "${ALPHA_HOST}"/admin \ - --data "${GQL}" - ) - - if grep -q errors <<<"${RESULT}"; then - ERROR=$(grep -oP '(?<=message":")[^"]*' <<<"${RESULT}") - echo "ERROR: ${ERROR}" - return 1 - fi - - echo "${RESULT}" -} diff --git a/contrib/config/backups/client/compose-setup.sh b/contrib/config/backups/client/compose-setup.sh deleted file mode 100755 index ce94bc84b62..00000000000 --- a/contrib/config/backups/client/compose-setup.sh +++ /dev/null @@ -1,276 +0,0 @@ -#!/usr/bin/env bash -###### -## compose-setup.sh - configure a docker compose configuration and generate -## private certs/keys using `dgraph cert` command. -## -## This will also fetch an explicit Dgraph version that is tagged as `latest` -## online if DGRAPH_VERSION environment variable is not specified. -## -## This can be used to setup an environment that can be used explore Dgraph -## backup functionality for operators -########################## - -###### -# main - runs the script -########################## -main() { - parse_command $@ - config_compose - create_certs -} - -###### -# usage - print friendly usage statement -########################## -usage() { - cat <<-USAGE 1>&2 - Setup Docker Compose Environment - - Usage: - $0 [FLAGS] --location [LOCATION] - - Flags: - -j, --acl Enable Access Control List - -t, --auth_token Enable auth token - -e, --enc Enable Encryption - -k, --tls Enable TLS - -c, --tls_client_auth string Set TLS Auth String (default VERIFYIFGIVEN) - -m, --make_tls_cert Create TLS Certificates and Key - -v, --dgraph_version Set Dgraph Version - -d, --debug Enable debug in output - -h, --help Help for $0 - - USAGE -} - -###### -# get_grep - find grep that supports look-ahead/behind regex -########################## -get_grep() { - unset GREP_CMD - - ## Check for GNU grep compatibility - if ! grep --version | head -1 | fgrep -q GNU; then - local SYSTEM="$(uname -s)" - if [[ ${SYSTEM,,} == "freebsd" ]]; then - ## Check FreeBSD install location - if [[ -f "/usr/local/bin/grep" ]]; then - GREP_CMD="/usr/local/bin/grep" - else - ## Save FreeBSD Instructions - local MESSAGE="On FreeBSD, compatible grep can be installed with 'sudo pkg install gnugrep'" - fi - elif [[ ${SYSTEM,,} == "darwin" ]]; then - ## Check HomeBrew install location - if [[ -f "/usr/local/opt/grep/libexec/gnubin/grep" ]]; then - GREP_CMD="/usr/local/opt/grep/libexec/gnubin/grep" - ## Check MacPorts install location - elif [[ -f "/opt/local/bin/grep" ]]; then - GREP_CMD="/opt/local/bin/grep" - else - ## Save MacPorts or HomeBrew Instructions - if command -v brew >/dev/null; then - local MESSAGE="On macOS, gnu-grep can be installed with 'brew install grep'\n" - elif command -v port >/dev/null; then - local MESSAGE="On macOS, grep can be installed with 'sudo port install grep'\n" - fi - fi - fi - else - GREP_CMD="$(command -v grep)" - fi - - ## Error if no suitable grep command found - if [[ -z ${GREP_CMD} ]]; then - printf "ERROR: GNU grep not found. Please install GNU compatible 'grep'\n\n%s" "${MESSAGE}" 1>&2 - exit 1 - fi -} - -###### -# get_getopt - find GNU getopt or print error message -########################## -get_getopt() { - unset GETOPT_CMD - - ## Check for GNU getopt compatibility - if [[ "$(getopt --version)" =~ "--" ]]; then - local SYSTEM="$(uname -s)" - if [[ ${SYSTEM,,} == "freebsd" ]]; then - ## Check FreeBSD install location - if [[ -f "/usr/local/bin/getopt" ]]; then - GETOPT_CMD="/usr/local/bin/getopt" - else - ## Save FreeBSD Instructions - local MESSAGE="On FreeBSD, compatible getopt can be installed with 'sudo pkg install getopt'" - fi - elif [[ ${SYSTEM,,} == "darwin" ]]; then - ## Check HomeBrew install location - if [[ -f "/usr/local/opt/gnu-getopt/bin/getopt" ]]; then - GETOPT_CMD="/usr/local/opt/gnu-getopt/bin/getopt" - ## Check MacPorts install location - elif [[ -f "/opt/local/bin/getopt" ]]; then - GETOPT_CMD="/opt/local/bin/getopt" - else - ## Save MacPorts or HomeBrew Instructions - if command -v brew >/dev/null; then - local MESSAGE="On macOS, gnu-getopt can be installed with 'brew install gnu-getopt'\n" - elif command -v port >/dev/null; then - local MESSAGE="On macOS, getopt can be installed with 'sudo port install getopt'\n" - fi - fi - fi - else - GETOPT_CMD="$(command -v getopt)" - fi - - ## Error if no suitable getopt command found - if [[ -z ${GETOPT_CMD} ]]; then - printf "ERROR: GNU getopt not found. Please install GNU compatible 'getopt'\n\n%s" "${MESSAGE}" 1>&2 - exit 1 - fi -} - -###### -# parse_command - parse command line options using GNU getopt -########################## -parse_command() { - get_getopt - - ## Parse Arguments with GNU getopt - PARSED_ARGUMENTS=$( - ${GETOPT_CMD} -o jtdhekmc:v: \ - --long acl,auth_token,enc,tls,make_tls_cert,tls_client_auth:,dgraph_version:,debug,help \ - -n 'compose-setup.sh' -- "$@" - ) - if [[ $? != 0 ]]; then - usage - exit 1 - fi - eval set -- "${PARSED_ARGUMENTS}" - - ## Defaults - DEBUG="false" - ACL_ENABLED="false" - TOKEN_ENABLED="false" - ENC_ENABLED="false" - TLS_ENABLED="false" - TLS_CLIENT_AUTH="VERIFYIFGIVEN" - TLS_MAKE_CERTS="false" - - ## Process Agurments - while true; do - case "$1" in - -j | --acl) - ACL_ENABLED="true" - shift - ;; - -t | --auth_token) - TOKEN_ENABLED=true - shift - ;; - -d | --debug) - DEBUG="true" - shift - ;; - -h | --help) - usage - exit - ;; - -e | --enc) - ENC_ENABLED="true" - shift - ;; - -k | --tls) - TLS_ENABLED="true" - shift - ;; - -m | --make_tls_cert) - TLS_MAKE_CERTS="true" - shift - ;; - -c | --tls_client_auth) - TLS_CLIENT_AUTH="$2" - shift 2 - ;; - -v | --dgraph_version) - DGRAPH_VERSION="$2" - shift 2 - ;; - --) - shift - break - ;; - *) break ;; - esac - done - - ## Set DGRAPH_VERSION to latest if it is not set yet - [[ -z ${DGRAPH_VERSION} ]] && get_grep && DGRAPH_VERSION=$(curl -s https://get.dgraph.io/latest | ${GREP_CMD} -oP '(?<=tag_name":")[^"]*') -} - -###### -# create_certs - creates cert and keys -########################## -create_certs() { - command -v docker >/dev/null || - { - echo "[ERROR]: 'docker' command not not found" 1>&2 - exit 1 - } - docker version >/dev/null || - { - echo "[ERROR]: docker not accessible for '${USER}'" 1>&2 - exit 1 - } - - if [[ ${TLS_MAKE_CERTS} == "true" ]]; then - [[ -z ${DGRAPH_VERSION} ]] && { - echo "[ERROR]: 'DGRAPH_VERSION' not set. Aborting." 1>&2 - exit 1 - } - rm --force "${PWD}"/data/tls/*.{crt,key} - docker run \ - --tty \ - --volume "${PWD}"/data/tls:/tls dgraph/dgraph:"${DGRAPH_VERSION}" \ - dgraph cert --dir /tls --client backupuser --nodes "localhost,alpha1,zero1,ratel" --duration 365 - fi -} - -###### -# config_compose - configures .env and data/config/config.tml -########################## -config_compose() { - if [[ ${DEBUG} == "true" ]]; then - set -ex - else - set -e - fi - - CFGPATH="./data/config" - mkdir -p ./data/config - [[ -f ${CFGPATH}/config.toml ]] && rm "${CFGPATH}"/config.toml - touch "${CFGPATH}"/config.toml - - ## configure defaults - echo "whitelist = '10.0.0.0/8,172.16.0.0/12,192.168.0.0/16'" >>"${CFGPATH}/config.toml" - echo "lru_mb = 1024" >>"${CFGPATH}/config.toml" - - ## configure if user specifies - [[ ${ACL_ENABLED} == "true" ]] && - echo '--acl "secret-file=/dgraph/acl/hmac_secret_file;"' >>"${CFGPATH}/config.toml" - [[ ${TOKEN_ENABLED} == "true" ]] && - echo "auth_token = '$(cat ./data/token/auth_token_file)'" >>"${CFGPATH}/config.toml" - [[ ${ENC_ENABLED} == "true" ]] && - echo '--encryption "key-file=/dgraph/enc/enc_key_file;"' >>"${CFGPATH}/config.toml" - [[ ${TLS_ENABLED} == "true" ]] && - cat <<-TLS_CONFIG >>"${CFGPATH}"/config.toml - tls_client_auth = '${TLS_CLIENT_AUTH}' - TLS_CONFIG - - ## configure dgraph version - echo "DGRAPH_VERSION=${DGRAPH_VERSION}" >.env - cp *backup*.sh data -} - -main $@ diff --git a/contrib/config/backups/client/data/acl/hmac_secret_file b/contrib/config/backups/client/data/acl/hmac_secret_file deleted file mode 100644 index 2add0c574b7..00000000000 --- a/contrib/config/backups/client/data/acl/hmac_secret_file +++ /dev/null @@ -1 +0,0 @@ -1234567890123456789012345678901 diff --git a/contrib/config/backups/client/data/backups/.gitkeep b/contrib/config/backups/client/data/backups/.gitkeep deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/contrib/config/backups/client/data/enc/enc_key_file b/contrib/config/backups/client/data/enc/enc_key_file deleted file mode 100644 index dc91b5673bb..00000000000 --- a/contrib/config/backups/client/data/enc/enc_key_file +++ /dev/null @@ -1 +0,0 @@ -123456789012345 diff --git a/contrib/config/backups/client/data/tls/.gitkeep b/contrib/config/backups/client/data/tls/.gitkeep deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/contrib/config/backups/client/data/token/auth_token_file b/contrib/config/backups/client/data/token/auth_token_file deleted file mode 100644 index 2add0c574b7..00000000000 --- a/contrib/config/backups/client/data/token/auth_token_file +++ /dev/null @@ -1 +0,0 @@ -1234567890123456789012345678901 diff --git a/contrib/config/backups/client/dgraph-backup.sh b/contrib/config/backups/client/dgraph-backup.sh deleted file mode 100755 index 5d1a4e95e02..00000000000 --- a/contrib/config/backups/client/dgraph-backup.sh +++ /dev/null @@ -1,218 +0,0 @@ -#!/usr/bin/env bash -###### -## dgraph-backup.sh - general purpose shell script that can be used to -## facilitate binary backups (an enterprise feature) with Dgraph. This script -## demonstrates how to use backups options available in either REST or -## GraphQL API using the curl command. -########################## - -###### -# main - runs the script -########################## -main() { - parse_command $@ - run_backup -} - -###### -# usage - print friendly usage statement -########################## -usage() { - cat <<-USAGE 1>&2 - Run Binary Backup - - Usage: - $0 [FLAGS] --location [LOCATION] - - Flags: - -a, --alpha string Dgraph alpha HTTP/S server (default "localhost:8080") - -i, --api_type API Type of REST or GraphQL (default "GraphQL") - -t, --auth_token string The auth token passed to the server - -d, --debug Enable debug in output - -f, --force_full Force a full backup instead of an incremental backup. - -h, --help Help for $0 - -l, --location Sets the source location URI (required). - --minio_secure Backups to MinIO will use https instead of http - -p, --password Password of the user if login is required. - --subpath Directory Path To Use to store backups, (default "dgraph_\$(date +%Y%m%d)") - --tls_cacert filepath The CA Cert file used to verify server certificates. Required for enabling TLS. - --tls_cert string (optional) The Cert file provided by the client to the server. - --tls_key string (optional) The private key file provided by the client to the server. - -u, --user Username if login is required. - - USAGE -} - -###### -# get_getopt - find GNU getopt or print error message -########################## -get_getopt() { - unset GETOPT_CMD - - ## Check for GNU getopt compatibility - if [[ "$(getopt --version)" =~ "--" ]]; then - local SYSTEM="$(uname -s)" - if [[ ${SYSTEM,,} == "freebsd" ]]; then - ## Check FreeBSD install location - if [[ -f "/usr/local/bin/getopt" ]]; then - GETOPT_CMD="/usr/local/bin/getopt" - else - ## Save FreeBSD Instructions - local MESSAGE="On FreeBSD, compatible getopt can be installed with 'sudo pkg install getopt'" - fi - elif [[ ${SYSTEM,,} == "darwin" ]]; then - ## Check HomeBrew install location - if [[ -f "/usr/local/opt/gnu-getopt/bin/getopt" ]]; then - GETOPT_CMD="/usr/local/opt/gnu-getopt/bin/getopt" - ## Check MacPorts install location - elif [[ -f "/opt/local/bin/getopt" ]]; then - GETOPT_CMD="/opt/local/bin/getopt" - else - ## Save MacPorts or HomeBrew Instructions - if command -v brew >/dev/null; then - local MESSAGE="On macOS, gnu-getopt can be installed with 'brew install gnu-getopt'\n" - elif command -v port >/dev/null; then - local MESSAGE="On macOS, getopt can be installed with 'sudo port install getopt'\n" - fi - fi - fi - else - GETOPT_CMD="$(command -v getopt)" - fi - - ## Error if no suitable getopt command found - if [[ -z ${GETOPT_CMD} ]]; then - printf "ERROR: GNU getopt not found. Please install GNU compatible 'getopt'\n\n%s" "${MESSAGE}" 1>&2 - exit 1 - fi -} - -###### -# parse_command - parse command line options using GNU getopt -########################## -parse_command() { - get_getopt - - ## Parse Arguments with GNU getopt - PARSED_ARGUMENTS=$( - ${GETOPT_CMD} -o a:i:t:dfhl:p:u: \ - --long alpha:,api_type:,auth_token:,debug,force_full,help,location:,minio_secure,password:,subpath:,tls_cacert:,tls_cert:,tls_key:,user: \ - -n 'dgraph-backup.sh' -- "$@" - ) - if [[ $? != 0 ]]; then - usage - exit 1 - fi - eval set -- "${PARSED_ARGUMENTS}" - - ## Defaults - DEBUG="false" - ALPHA_HOST="localhost:8080" - BACKUP_DESTINATION="" - SUBPATH=dgraph_$(date +%Y%m%d) - API_TYPE="graphql" - MINIO_SECURE=false - AUTH_TOKEN="" - FORCE_FULL="false" - - ## Process Agurments - while true; do - case "$1" in - -a | --alpha) - ALPHA_HOST="$2" - shift 2 - ;; - -i | --api_type) - API_TYPE=${2,,} - shift 2 - ;; - -t | --auth_token) - AUTH_TOKEN="$2" - shift 2 - ;; - -d | --debug) - DEBUG=true - shift - ;; - -f | --force_full) - FORCE_FULL=true - shift - ;; - -h | --help) - usage - exit - ;; - -m | --minio_secure) - MINIO_SECURE=true - shift - ;; - -l | --location) - BACKUP_DESTINATION="$2" - shift 2 - ;; - -p | --password) - ACL_PASSWORD="$2" - shift 2 - ;; - --subpath) - SUBPATH="$2" - shift 2 - ;; - --tls_cacert) - CACERT_PATH="$2" - shift 2 - ;; - --tls_cert) - CLIENT_CERT_PATH="$2" - shift 2 - ;; - --tls_key) - CLIENT_KEY_PATH="$2" - shift 2 - ;; - -u | --user) - ACL_USER="$2" - shift 2 - ;; - --) - shift - break - ;; - *) break ;; - esac - done - - ## Check required variable was set - if [[ -z ${BACKUP_DESTINATION} ]]; then - printf "ERROR: location was not specified!!\n\n" - usage - exit 1 - fi -} - -###### -# run_backup - using user specified options, execute backup -########################## -run_backup() { - if [[ ${DEBUG} == "true" ]]; then - set -ex - else - set -e - fi - - [[ -f ./backup_helper.sh ]] || { - echo "ERROR: Backup Script library (./backup_helper.sh) missing" 1>&2 - exit 1 - } - source ./backup_helper.sh - - ## login if user was specified - if ! [[ -z ${ACL_USER} ]]; then - ACCESS_TOKEN=$(get_token "${ACL_USER}" "${ACL_PASSWORD}" "${AUTH_TOKEN}") - fi - - ## perform backup with valid options set - backup "${ACCESS_TOKEN}" "${AUTH_TOKEN}" -} - -main $@ diff --git a/contrib/config/backups/client/docker-compose.yml b/contrib/config/backups/client/docker-compose.yml deleted file mode 100644 index a6ac9abc837..00000000000 --- a/contrib/config/backups/client/docker-compose.yml +++ /dev/null @@ -1,44 +0,0 @@ -version: "3.5" -services: - zero1: - ## DGRAPH_VERSION set by ./compose-setup.sh - image: dgraph/dgraph:$DGRAPH_VERSION - container_name: zero1 - working_dir: /dgraph/data/zero1 - volumes: - - type: bind - source: ./data - target: /dgraph - read_only: false - ports: - - 5080:5080 - - 6080:6080 - command: dgraph zero --my=zero1:5080 --replicas 1 --raft="idx=1" - - alpha1: - ## DGRAPH_VERSION set by ./compose-setup.sh - image: dgraph/dgraph:$DGRAPH_VERSION - container_name: alpha1 - working_dir: /dgraph/data/alpha1 - volumes: - - type: bind - source: ./data - target: /dgraph - read_only: false - ports: - - 8080:8080 - - 9080:9080 - ## configuration setup by ./compose-setup.sh - command: dgraph alpha --config /dgraph/config/config.toml --my=alpha1:7080 --zero=zero1:5080 - - ratel: - ## DGRAPH_VERSION set by ./compose-setup.sh - image: dgraph/ratel:$DGRAPH_VERSION - volumes: - - type: bind - source: ./data - target: /dgraph - read_only: false - ports: - - 8000:8000 - container_name: ratel diff --git a/contrib/config/backups/gcp/.env b/contrib/config/backups/gcp/.env deleted file mode 100644 index 6df294f2998..00000000000 --- a/contrib/config/backups/gcp/.env +++ /dev/null @@ -1,5 +0,0 @@ -## IMPORTANT: Though `latest` should be alright for local dev environments, -## never use `latest` for production environments as this can lead to -## inconsistent versions on production -DGRAPH_VERSION=latest -MINIO_VERSION=latest diff --git a/contrib/config/backups/gcp/.gitignore b/contrib/config/backups/gcp/.gitignore deleted file mode 100644 index d213625aa5c..00000000000 --- a/contrib/config/backups/gcp/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -# Artifacts Are Automatically Generated -minio.env -credentials.json -env.sh diff --git a/contrib/config/backups/gcp/README.md b/contrib/config/backups/gcp/README.md deleted file mode 100644 index c521f8ba261..00000000000 --- a/contrib/config/backups/gcp/README.md +++ /dev/null @@ -1,292 +0,0 @@ -# Binary Backups to Google Cloud Storage - -Binary backups can use [Google Cloud Storage](https://cloud.google.com/storage) for object storage -using [MinIO GCS Gateway](https://docs.min.io/docs/minio-gateway-for-gcs.html). - -## Provisioning GCS - -Some example scripts have been provided to illustrate how to create a bucket in GCS. - -- [terraform](terraform/README.md) - terraform scripts to provision GCS bucket - -## Setting up the Environment - -### Prerequisites - -You will need these tools: - -- Docker Environment - - [Docker](https://docs.docker.com/get-docker/) - container engine platform - - [Docker Compose](https://docs.docker.com/compose/install/) - orchestrates running dokcer - containers -- Kubernetes Environment - - [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) - required for interacting - with Kubenetes platform - - [helm](https://helm.sh/docs/intro/install/) - deploys Kuberetes packages called helm charts - - [helm-diff](https://github.com/databus23/helm-diff) [optional] - displays differences that - will be applied to Kubernetes cluster - - [helmfile](https://github.com/roboll/helmfile#installation) [optional] - orchestrates helm chart - deployments - -### Using Docker Compose - -A `docker-compose.yml` configuration is provided that will run the MinIO GCS gateway and Dgraph -cluster. - -#### Configuring Docker Compose - -The Docker Compose configuration `docker-compose.yml` will require the following files: - -- `credentials.json` - credentials that grant access to the GCS bucket -- `minio.env` - that holds `MINIO_ACCESS_KEY` and `MINIO_SECRET_KEY` values. -- `env.sh` - tha stores `PROJECT_ID` and `BACKUP_BUCKET_NAME`. - -For convenience, [terraform](terraform/README.md) scripts and generate a random password. - -The `minio.env` will be used by both Dgraph alpha node(s) and the -[MinIO GCS Gateway](https://docs.min.io/docs/minio-gateway-for-gcs.html) server. You will need to -create a file like this: - -```bash -# minio.env -MINIO_ACCESS_KEY= -MINIO_SECRET_KEY= -``` - -The `env.sh` will be source before using Docker Compose or before triggering backups: - -```bash -# env.sh -export PROJECT_ID= -export BACKUP_BUCKET_NAME= -``` - -#### Running with Docker Compose - -```bash -## source script for envvars: PROJECT_ID and BACKUP_BUCKET_NAME -. env.sh -## Run Minio GCS Gateway and Dgraph Cluster -docker-compose up --detach -``` - -#### Access Minio and Ratel UI - -- MinIO UI: http://localhost:9000 -- Ratel UI: http://localhost:8000 - -#### Clean Up Docker Environment - -```bash -docker-compose stop -docker-compose rm -``` - -### Using Kubernetes with Helm Charts - -For Kubernetes, you can deploy -[MinIO GCS Gateway](https://docs.min.io/docs/minio-gateway-for-gcs.html), Dgraph cluster, and a -Kubernetes Cronjob that triggers backups using [helm](https://helm.sh/docs/intro/install/). - -#### Configuring Secrets Values - -These values are generated if you used either [terraform](terraform/README.md) scripts. If you -already have an existing GCS bucket that you would like to use, you will need to create -`charts/dgraph_secrets.yaml` and `charts/minio_secrets.yaml` files. - -For the `charts/dgraph_secrets.yaml`, you would create a file like this: - -```yaml -backups: - keys: - minio: - access: - secret: -``` - -For the `charts/minio_secrets.yaml`, you would create a file like this: - -```yaml -accessKey: -secretKey: -gcsgateway: - gcsKeyJson: | - -``` - -#### Configuring Environments - -Create an `env.sh` file to store `BACKUP_BUCKET_NAME` and `PROJECT_ID`. If -[terraform](terraform/README.md) scripts were used to create the GCS bucket, then these scripts will -have already generated this file. - -This is the same file used for the Docker Compose environment and will look like this: - -```bash -# env.sh -export PROJECT_ID= -export BACKUP_BUCKET_NAME= -``` - -#### Deploy Using Helmfile - -If you have [helmfile](https://github.com/roboll/helmfile#installation) and -[helm-diff](https://github.com/databus23/helm-diff) installed, you can deploy -[MinIO GCS Gateway](https://docs.min.io/docs/minio-gateway-for-gcs.html) and Dgraph cluster with the -following: - -```bash -## source script for envvars: PROJECT_ID and BACKUP_BUCKET_NAME -. env.sh -## deploy Dgraph cluster and MinIO GCS Gateway using helm charts -helmfile apply -``` - -#### Deploy Using Helm - -```bash -## source script for envvars: PROJECT_ID and BACKUP_BUCKET_NAME -. env.sh -## deploy MinIO GCS Gateway in minio namespace -kubectl create namespace "minio" -helm repo add "minio" https://helm.min.io/ -helm install "gcsgw" \ - --namespace minio \ - --values ./charts/minio_config.yaml \ - --values ./charts/minio_secrets.yaml \ - --set gcsgateway.projectId=${PROJECT_ID} \ - minio/minio - -## deploy Dgraph in default namespace -helm repo add "dgraph" https://charts.dgraph.io -helm install "my-release" \ - --namespace "default" \ - --values ./charts/dgraph_config.yaml \ - --values ./charts/dgraph_secrets.yaml \ - --set backups.destination="minio://gcsgw-minio.minio.svc:9000/${BACKUP_BUCKET_NAME}" \ - dgraph/dgraph -``` - -#### Access Resources - -For MinIO UI, you can use this to access it at http://localhost:9000: - -```bash -export MINIO_POD_NAME=$( - kubectl get pods \ - --namespace minio \ - --selector "release=gcsgw" \ - --output jsonpath="{.items[0].metadata.name}" -) -kubectl --namespace minio port-forward $MINIO_POD_NAME 9000:9000 -``` - -For Dgraph Alpha, you can use this to access it at http://localhost:8080: - -```bash -export ALPHA_POD_NAME=$( - kubectl get pods \ - --namespace default \ - --selector "statefulset.kubernetes.io/pod-name=my-release-dgraph-alpha-0,release=my-release" \ - --output jsonpath="{.items[0].metadata.name}" -) -kubectl --namespace default port-forward $ALPHA_POD_NAME 8080:8080 -``` - -For Dgraph Ratel UI, you can use this to access it at http://localhost:8000: - -```bash -export RATEL_POD_NAME=$( - kubectl get pods \ - --namespace default \ - --selector "component=ratel,release=my-release" \ - --output jsonpath="{.items[0].metadata.name}" -) -kubectl --namespace default port-forward $RATEL_POD_NAME 8000:8000 -``` - -#### Cleanup Kubernetes Environment - -If you are using helmfile, you can delete the resources with: - -```bash -## source script for envvars: PROJECT_ID and BACKUP_BUCKET_NAME -. env.sh -## remove Dgraph cluster and MinIO GCS Gateway -helmfile delete -## remove storage used by Dgraph cluster -kubectl delete pvc --selector release=my-release # release dgraph name specified in charts/helmfile.yaml -``` - -If you are just helm, you can delete the resources with: - -```bash -helm delete my-release --namespace default "my-release" # dgraph release name used earlier -kubectl delete pvc --selector release=my-release # dgraph release name used earlier -helm delete gcsgw --namespace minio -``` - -## Triggering a Backup - -This is run from the host with the alpha node accessible on localhost at port `8080`. Can be done by -running the docker-compose environment, or running -`kubectl port-forward pod/dgraph-dgraph-alpha-0 8080:8080`. In the docker-compose environment, the -host for `MINIO_HOST` is `gateway`. In the Kubernetes environment, using the scripts above, the -`MINIO_HOST` is `gcsgw-minio.minio.svc`. - -### Using GraphQL - -For versions of Dgraph that support GraphQL, you can use this: - -```bash -## source script for envvars BACKUP_BUCKET_NAME -. env.sh -## variables based depending on docker or kubernetes env -ALPHA_HOST="localhost" # hostname to connect to alpha1 container -MINIO_HOST="gateway" # hostname from alpha1 container -BACKUP_PATH=minio://${MINIO_HOST}:9000/${BACKUP_BUCKET_NAME}?secure=false - -GRAPHQL="{\"query\": \"mutation { backup(input: {destination: \\\"$BACKUP_PATH\\\" forceFull: true}) { response { message code } } }\"}" -HEADER="Content-Type: application/json" - -curl --silent --header "$HEADER" --request POST $ALPHA_HOST:8080/admin --data "$GRAPHQL" -``` - -This should return a response in JSON that will look like this if successful: - -```JSON -{ - "data": { - "backup": { - "response": { - "message": "Backup completed.", - "code": "Success" - } - } - } -} -``` - -### Using REST API - -For earlier Dgraph versions that support the REST admin port, you can do this: - -```bash -## source script for envvars BACKUP_BUCKET_NAME -. env.sh -## variables based depending on docker or kubernetes env -ALPHA_HOST="localhost" # hostname to connect to alpha1 container -MINIO_HOST="gateway" # hostname from alpha1 container -BACKUP_PATH=minio://${MINIO_HOST}:9000/${BACKUP_BUCKET_NAME}?secure=false - -curl --silent --request POST $ALPHA_HOST:8080/admin/backup?force_full=true --data "destination=$BACKUP_PATH" -``` - -This should return a response in JSON that will look like this if successful: - -```JSON -{ - "code": "Success", - "message": "Backup completed." -} -``` diff --git a/contrib/config/backups/gcp/charts/.gitignore b/contrib/config/backups/gcp/charts/.gitignore deleted file mode 100644 index f4b6b916ec4..00000000000 --- a/contrib/config/backups/gcp/charts/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -minio_secrets.yaml -dgraph_secrets.yaml diff --git a/contrib/config/backups/gcp/charts/dgraph_config.yaml b/contrib/config/backups/gcp/charts/dgraph_config.yaml deleted file mode 100644 index 83fe869f53d..00000000000 --- a/contrib/config/backups/gcp/charts/dgraph_config.yaml +++ /dev/null @@ -1,9 +0,0 @@ -backups: - full: - enabled: true - debug: true - schedule: "*/15 * * * *" -alpha: - configFile: - config.hcl: | - whitelist = "10.0.0.0/8,172.0.0.0/8,192.168.0.0/16,127.0.0.1" diff --git a/contrib/config/backups/gcp/charts/helmfile.yaml b/contrib/config/backups/gcp/charts/helmfile.yaml deleted file mode 100644 index 18c9a0f03be..00000000000 --- a/contrib/config/backups/gcp/charts/helmfile.yaml +++ /dev/null @@ -1,31 +0,0 @@ -repositories: - - name: minio - url: https://helm.min.io/ - - name: dgraph - url: https://charts.dgraph.io - -releases: - - name: gcsgw - namespace: minio - chart: minio/minio - version: 6.3.1 - values: - - minio_config.yaml - ## generated by terraform scripts - - minio_secrets.yaml - - gcsgateway: - projectId: {{ requiredEnv "PROJECT_ID" }} - - - - name: my-release - namespace: default - chart: dgraph/dgraph - version: 0.0.11 - values: - - ./dgraph_config.yaml - ## generated by terraform scripts - - ./dgraph_secrets.yaml - ## minio server configured - - backups: - ## Format -minio.namespace.svc:9000/ - destination: minio://gcsgw-minio.minio.svc:9000/{{ requiredEnv "BACKUP_BUCKET_NAME" }} diff --git a/contrib/config/backups/gcp/charts/minio_config.yaml b/contrib/config/backups/gcp/charts/minio_config.yaml deleted file mode 100644 index 1f4e8d784bf..00000000000 --- a/contrib/config/backups/gcp/charts/minio_config.yaml +++ /dev/null @@ -1,8 +0,0 @@ -image: - repository: minio/minio - tag: RELEASE.2020-09-17T04-49-20Z -persistence: - enabled: false -gcsgateway: - enabled: true - replicas: 1 diff --git a/contrib/config/backups/gcp/docker-compose.yml b/contrib/config/backups/gcp/docker-compose.yml deleted file mode 100644 index 2e2333cca64..00000000000 --- a/contrib/config/backups/gcp/docker-compose.yml +++ /dev/null @@ -1,44 +0,0 @@ -version: "3.5" -services: - zero1: - image: dgraph/dgraph:${DGRAPH_VERSION} - container_name: zero1 - working_dir: /data/zero1 - ports: - - 5080:5080 - - 6080:6080 - command: dgraph zero --my=zero1:5080 --replicas 1 --raft="idx=1;" - - alpha1: - image: dgraph/dgraph:${DGRAPH_VERSION} - container_name: alpha1 - working_dir: /data/alpha1 - env_file: - - minio.env - ports: - - 8080:8080 - - 9080:9080 - command: dgraph alpha --my=alpha1:7080 --zero=zero1:5080 - --security "whitelist=10.0.0.0/8,172.0.0.0/8,192.168.0.0/16,127.0.0.1;" - - ratel: - image: dgraph/ratel:${DGRAPH_VERSION} - ports: - - 8000:8000 - container_name: ratel - - minio: - image: minio/minio:${MINIO_VERSION} - command: gateway gcs ${PROJECT_ID} - container_name: gateway - volumes: - - type: bind - source: ./credentials.json - target: /credentials.json - read_only: true - env_file: - - minio.env - environment: - GOOGLE_APPLICATION_CREDENTIALS: /credentials.json - ports: - - 9000:9000 diff --git a/contrib/config/backups/gcp/helmfile.yaml b/contrib/config/backups/gcp/helmfile.yaml deleted file mode 100644 index 78b0eeffd54..00000000000 --- a/contrib/config/backups/gcp/helmfile.yaml +++ /dev/null @@ -1,2 +0,0 @@ -helmfiles: - - ./charts/helmfile.yaml diff --git a/contrib/config/backups/gcp/terraform/.gitignore b/contrib/config/backups/gcp/terraform/.gitignore deleted file mode 100644 index 578082f808e..00000000000 --- a/contrib/config/backups/gcp/terraform/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -# terraform files -terraform.tfvars -.terraform -*.tfstate* diff --git a/contrib/config/backups/gcp/terraform/README.md b/contrib/config/backups/gcp/terraform/README.md deleted file mode 100644 index 677eb35c67b..00000000000 --- a/contrib/config/backups/gcp/terraform/README.md +++ /dev/null @@ -1,74 +0,0 @@ -# Google Cloud Storage with Terraform - -## About - -This script will create the required resources needed to create a bucket in Google Storage Bucket -using the -[`simple-bucket`](https://github.com/terraform-google-modules/terraform-google-cloud-storage/tree/master/modules/simple_bucket) -Terraform module. These scripts will also create a `credentials.json` that will have access to the -storage bucket, which is needed for the -[MinIO GCS Gateway](https://docs.min.io/docs/minio-gateway-for-gcs.html) and optionally generate -random MinIO access key and secret key. - -## Prerequisites - -You need the following installed to use this automation: - -- [Google Cloud SDK](https://cloud.google.com/sdk/docs/install) - for the `gcloud` command and - required to access Google Cloud. - - Google Project with billing enabled - - `gcloud` logged into IAM account with roles added: - - `serviceusage.apiKeys.create` - - `clientauthconfig.clients.create` - - `iam.serviceAccountKeys.create` -- [Terraform](https://www.terraform.io/downloads.html) - tool used to provision resources and create - templates - -## Configuration - -You will need to define the following variables: - -- Required Variables: - - `region` (required) - the region where the GCS bucket will be created - - `project_id` (required) - a globally unique name for the Google project that will contain the - GCS bucket - - `name` (default = `my-dgraph-backups`) - globally unique name of the GCS bucket -- Optional Variables: - - `minio_access_key` - specify an access key or have terraform generate a random access key - - `minio_secret_key` - specify a secret key or have terraform generate a random secret key - -## Steps - -### Define Variables - -You can define these when prompted, or in `terrafrom.tfvars` file, or through command line -variables, e.g. `TF_VAR_project_id`, `TF_VAR_project_id`, and `TF_VAR_name`. Below is an example -`terraform.tfvars` file: - -```terraform -# terraform.tfvars -region = "us-central1" -project_id = "my-company-test" -name = "my-backups-31393832" -``` - -### Download Plugins and Modules - -```bash -terraform init -``` - -### Prepare and Provision Resources - -```bash -## get a list of changes that will be made -terraform plan -## apply the changes -terraform apply -``` - -## Cleanup - -```bash -terraform destroy -``` diff --git a/contrib/config/backups/gcp/terraform/main.tf b/contrib/config/backups/gcp/terraform/main.tf deleted file mode 100644 index c14d29c322f..00000000000 --- a/contrib/config/backups/gcp/terraform/main.tf +++ /dev/null @@ -1,116 +0,0 @@ -##################################################################### -# Variables -##################################################################### -variable "region" {} -variable "project_id" {} -variable "name" {} -variable "create_minio_env" { default = true } -variable "create_minio_secrets" { default = true } -variable "create_dgraph_secrets" { default = true } -variable "create_credentials_json" { default = true } -variable "create_env_sh" { default = true } -variable "minio_access_key" { default = "" } -variable "minio_secret_key" { default = "" } - -##################################################################### -# Modules -##################################################################### -module "dgraph_backups" { - source = "git::https://github.com/terraform-google-modules/terraform-google-cloud-storage.git//modules/simple_bucket?ref=v1.7.0" - name = var.name - project_id = var.project_id - location = var.region - - lifecycle_rules = [{ - action = { - type = "Delete" - } - - condition = { - age = 365 - with_state = "ANY" - } - }] -} - -module "service_account" { - source = "./modules/gsa" - service_account_id = var.name - display_name = var.name - project_id = var.project_id - roles = ["roles/storage.admin"] -} - -##################################################################### -# Resources - Random Vars -##################################################################### -resource "random_string" "key" { - length = 20 - special = false -} - -resource "random_password" "secret" { - length = 40 -} - -##################################################################### -# Locals -##################################################################### -locals { - minio_access_key = var.minio_access_key != "" ? var.minio_access_key : random_string.key.result - minio_secret_key = var.minio_secret_key != "" ? var.minio_secret_key : random_password.secret.result - - minio_vars = { - gcsKeyJson = indent(2, module.service_account.key) - accessKey = local.minio_access_key - secretKey = local.minio_secret_key - } - - env_vars = { - project_id = var.project_id - bucket = var.name - } - - dgraph_secrets = templatefile("${path.module}/templates/dgraph_secrets.yaml.tmpl", local.minio_vars) - minio_secrets = templatefile("${path.module}/templates/minio_secrets.yaml.tmpl", local.minio_vars) - minio_env = templatefile("${path.module}/templates/minio.env.tmpl", local.minio_vars) - env_sh = templatefile("${path.module}/templates/env.sh.tmpl", local.env_vars) -} - -##################################################################### -# Resources - Files -##################################################################### -resource "local_file" "credentials" { - count = var.create_credentials_json ? 1 : 0 - content = module.service_account.key - filename = "${path.module}/../credentials.json" - file_permission = "0644" -} - -resource "local_file" "minio_env" { - count = var.create_minio_env != "" ? 1 : 0 - content = local.minio_env - filename = "${path.module}/../minio.env" - file_permission = "0644" -} - -resource "local_file" "env_sh" { - count = var.create_env_sh != "" ? 1 : 0 - content = local.env_sh - filename = "${path.module}/../env.sh" - file_permission = "0644" -} - -resource "local_file" "minio_secrets" { - count = var.create_minio_secrets != "" ? 1 : 0 - content = local.minio_secrets - filename = "${path.module}/../charts/minio_secrets.yaml" - file_permission = "0644" -} - -resource "local_file" "dgraph_secrets" { - count = var.create_dgraph_secrets != "" ? 1 : 0 - content = local.dgraph_secrets - filename = "${path.module}/../charts/dgraph_secrets.yaml" - file_permission = "0644" -} diff --git a/contrib/config/backups/gcp/terraform/modules/gsa/main.tf b/contrib/config/backups/gcp/terraform/modules/gsa/main.tf deleted file mode 100644 index 356575eea87..00000000000 --- a/contrib/config/backups/gcp/terraform/modules/gsa/main.tf +++ /dev/null @@ -1,54 +0,0 @@ -##################################################################### -# Variables -##################################################################### -variable "service_account_id" {} -variable "display_name" {} -variable "project_id" {} -variable "roles" { - description = "IAM roles to be added to the service account. See https://cloud.google.com/iam/docs/understanding-roles." - type = list(string) - default = [] -} - -##################################################################### -# Locals -##################################################################### -locals { - roles = toset(var.roles) - sensitive_roles = ["roles/owner"] - filtered_roles = setsubtract(local.roles, local.sensitive_roles) -} - -##################################################################### -# Resources -##################################################################### -resource "google_service_account" "service_account" { - account_id = var.service_account_id - display_name = var.display_name - project = var.project_id -} - -resource "google_service_account_key" "key" { - service_account_id = google_service_account.service_account.name -} - -resource "google_project_iam_member" "project_roles" { - for_each = local.filtered_roles - - project = var.project_id - role = each.value - member = "serviceAccount:${google_service_account.service_account.email}" -} - -##################################################################### -# Output -##################################################################### -output "key" { - description = "Service account key (for single use)." - value = base64decode(google_service_account_key.key.private_key) -} - -output "email" { - description = "The fully qualified email address of the created service account." - value = google_service_account.service_account.email -} diff --git a/contrib/config/backups/gcp/terraform/provider.tf b/contrib/config/backups/gcp/terraform/provider.tf deleted file mode 100644 index a88ab79e52f..00000000000 --- a/contrib/config/backups/gcp/terraform/provider.tf +++ /dev/null @@ -1,9 +0,0 @@ -provider "google" { - version = "~> 3.38.0" - region = var.region - project = var.project_id -} - -provider "random" { - version = "2.3.0" -} diff --git a/contrib/config/backups/gcp/terraform/templates/dgraph_secrets.yaml.tmpl b/contrib/config/backups/gcp/terraform/templates/dgraph_secrets.yaml.tmpl deleted file mode 100644 index f4f39d7b732..00000000000 --- a/contrib/config/backups/gcp/terraform/templates/dgraph_secrets.yaml.tmpl +++ /dev/null @@ -1,5 +0,0 @@ -backups: - keys: - minio: - access: ${accessKey} - secret: ${secretKey} diff --git a/contrib/config/backups/gcp/terraform/templates/env.sh.tmpl b/contrib/config/backups/gcp/terraform/templates/env.sh.tmpl deleted file mode 100644 index 26369c7f6bc..00000000000 --- a/contrib/config/backups/gcp/terraform/templates/env.sh.tmpl +++ /dev/null @@ -1,3 +0,0 @@ -## env.sh -export PROJECT_ID=${project_id} -export BACKUP_BUCKET_NAME=${bucket} diff --git a/contrib/config/backups/gcp/terraform/templates/minio.env.tmpl b/contrib/config/backups/gcp/terraform/templates/minio.env.tmpl deleted file mode 100644 index 22844a84411..00000000000 --- a/contrib/config/backups/gcp/terraform/templates/minio.env.tmpl +++ /dev/null @@ -1,3 +0,0 @@ -## minio.env -MINIO_ACCESS_KEY=${accessKey} -MINIO_SECRET_KEY=${secretKey} diff --git a/contrib/config/backups/gcp/terraform/templates/minio_secrets.yaml.tmpl b/contrib/config/backups/gcp/terraform/templates/minio_secrets.yaml.tmpl deleted file mode 100644 index 8ffb9f040c4..00000000000 --- a/contrib/config/backups/gcp/terraform/templates/minio_secrets.yaml.tmpl +++ /dev/null @@ -1,6 +0,0 @@ -accessKey: ${accessKey} -secretKey: ${secretKey} - -gcsgateway: - gcsKeyJson: | - ${indent(2,gcsKeyJson)} diff --git a/contrib/config/backups/nfs/.env b/contrib/config/backups/nfs/.env deleted file mode 100644 index 94ebaa52948..00000000000 --- a/contrib/config/backups/nfs/.env +++ /dev/null @@ -1,4 +0,0 @@ -## IMPORTANT: Though `latest` should be alright for local dev environments, -## never use `latest` for production environments as this can lead to -## inconsistent versions -DGRAPH_VERSION=latest diff --git a/contrib/config/backups/nfs/.gitignore b/contrib/config/backups/nfs/.gitignore deleted file mode 100644 index b24ef693040..00000000000 --- a/contrib/config/backups/nfs/.gitignore +++ /dev/null @@ -1,15 +0,0 @@ -## Vagrant files -.vagrant - -## Terraform files -.terraform -*terraform.tfstate* -terraform.tfvars - -## Configurations auto-generated -/env.sh - -## Rook temporary helm charts -rook-nfs-operator -rook-nfs-server -rook-nfs-storageclass diff --git a/contrib/config/backups/nfs/README.md b/contrib/config/backups/nfs/README.md deleted file mode 100644 index cebeb0931a5..00000000000 --- a/contrib/config/backups/nfs/README.md +++ /dev/null @@ -1,333 +0,0 @@ -# Binary Backups to Network File System - -When using a file system for binary backups, NFS is recommended. NFS will allow _"backups work -seamlessly across multiple machines and/or containers"_. - -- [Overview of NFS Servers](#overview-of-nfs-servers) -- [Provision NFS Server Instructions](#provision-nfs-server-instructions) - - [Using Remote Cloud Solutions](#using-remote-cloud-solutions) - - [Using the Rook Solution](#using-the-rook-solution) - - [Using a Local Vagrant Solution](#using-a-local-vagrant-solution) - - [Vagrant Server](#vagrant-server) - - [Vagrant Client (Optional)](#vagrant-client-optional) - - [Vagrant Cleanup](#vagrant-cleanup) -- [Testing NFS with Docker Compose](#testing-nfs-with-docker-compose) - - [Setup Env Vars for Docker Compose](#setup-env-vars-for-docker-compose) - - [Start Docker Compose with NFS Volume](#start-docker-compose-with-nfs-volume) - - [Docker Cleanup](#docker-cleanup) -- [Testing NFS with Kubernetes](#testing-nfs-with-kubernetes) - - [Setup Env Vars for Kubernetes](#setup-env-vars-for-kubernetes) - - [Deploy Using Helmfile](#deploy-using-helmfile) - - [Cleanup Using Helmfile](#cleanup-using-helmfile) - - [Minikube Notes](#minikube-notes) - - [Minikube with Virtualbox](#minikube-with-virtualbox) - - [Minikube with KVM](#minikube-with-kvm) - - [Verify NFS between Minikube and Vagrant](#verify-nfs-between-minikube-and-vagrant) -- [Accessing Dgraph Services](#accessing-dgraph-services) -- [Trigger a Backup](#trigger-a-backup) - -## Overview of NFS Servers - -You can use external NFS outside of the [Docker](https://www.docker.com/) or -[Kubernetes](https://kubernetes.io/) cluster, or deploy a container offering NFS services.   - -For production environments, using an NFS server external to the cluster can increase availability -in an event where [Kubernetes](https://kubernetes.io/) services get interrupted. In more advanced -scenarios, deploying a container offering NFS services where the storage is backed by high-speed -storage such as [Ceph](https://ceph.io/) is beneficial for large datasets.  In this latter scenario, -secondary storage such as an object store by the cloud provider could be used for greater -availability in event of where Kubernetes services or the [Kubernetes](https://kubernetes.io/) -cluster itself has a failure event. - -This guide provides tips on how to back up Dgraph using NFS. For this scope, automation here covers -the following: - -- External NFS - - Cloud Providers - - AWS [EFS](https://aws.amazon.com/efs/) ([Elastic File System](https://aws.amazon.com/efs/)) - - [Google Cloud Filestore](https://cloud.google.com/filestore) - - Local NFS Server - - [Vagrant](https://www.vagrantup.com/) managed virtual server that implements Linux - kernel-based NFS Server -- Internal NFS (deployed as a container) - - [Rook](https://rook.io/) NFS operator to deploy a container offering NFS Server with - [Genesha NFS Server](https://github.com/nfs-ganesha/nfs-ganesha/wiki) - -## Provision NFS Server Instructions - -### Using Remote Cloud Solutions - -You can provision external NFS to use with your Dgraph cluster running on Kubernetes using these -scripts. Unlike object storage, such as S3 or GCS, this storage will not be accessible from the -public Internet and so can only be accessed from within a private subnet. - -- Shell Scripts - - [Google Cloud Filestore](gcfs-cli/README.md) - provision FileStore using `gcloud` -- Terraform - - [Google Cloud Filestore](gcfs-terraform/README.md) - use Filestore as NFS share on GKE. - - [Amazon Elastic File System](efs-terraform/README.md) - use EFS as NFS share on EKS. - -### Using the Rook Solution - -You can use an internal NFS server running on Kubernetes with [Rook](https://rook.io/) NFS Operator. -To enable this, run the following before running the -[Kubernetes Environment](#testing-nfs-with-kubernetes). Both of these steps are required for this -feature: - -```bash -## Download Rook NFS Operator Manifests -charts/rook/fetch-operator.sh -## Setup Environment for using Rook NFS Server -cp charts/rook/env.sh env.sh -``` - -### Using a Local Vagrant Solution - -The steps to configure NFS for your local operating system or distro can vary greatly, -so a [Vagrant](https://www.vagrantup.com/) example is provided. This should work -[Virtualbox](https://www.virtualbox.org/) provider on Windows, Mac, and Linux, as -[Virtualbox](https://www.virtualbox.org/) creates routable IP addresses available to the host. -Therefore, this NFS server can be accessed from either [Docker](https://docs.docker.com/engine/) or -[Minikube](https://github.com/kubernetes/minikube) environments. - -† Linux and macOS have native NFS implementations with macOS NFS configuration varying -between macOS versions. Windows Server has different -[NFS Server implementations](https://docs.microsoft.com/en-us/windows-server/storage/nfs/nfs-overview) -between Windows Server versions. For Windows 10, there are open source options such as -[Cygwin](https://www.cygwin.com/) or you can use Linux through -[WSL](https://docs.microsoft.com/en-us/windows/wsl/install-win10) - -#### Vagrant Server - -You can bring up the NFS server with: - -```bash -vagrant up -``` - -This will configure `env.sh` to point to NFS server on the guest system. - -#### Vagrant Client (Optional) - -Optionally, if you would like to use Dgraph in a virtual machine, you can bring up the client: - -```bash -## Launch Dgraph VM -vagrant up nfs-client -## Log into nfs client system -vagrant ssh -## Change directory to configuration -cd /vagrant -``` - -After this, you can follow [Docker Compose Usage](#docker-compose-usage) to access NFS. - -#### Vagrant Cleanup - -```bash -vagrant destroy -``` - -## Testing NFS with Docker Compose - -### Setup Env Vars for Docker Compose - -If you used automation from [Vagrant Solution](#using-local-vagrant-solution), you can skip this -step. - -Otherwise, you will need to create a file named `env.sh` and configure the IP address (or DNS name) -and exported NFS shared file path: - -```bash -export NFS_PATH="" -export NFS_SERVER="" -``` - -### Start Docker Compose with NFS Volume - -```bash -## Source required environments variables -source env.sh -## Start Docker Compose -docker-compose up --detach -``` - -### Docker Cleanup - -When finished, you can remove containers and volume resource with: - -```bash -docker-compose stop && docker-compose rm -docker volume ls | grep -q nfs_mount || docker volume rm nfs_nfsmount > /dev/null -``` - -## Testing NFS with Kubernetes - -### Setup Env Vars for Kubernetes - -If you used automation from local [Vagrant Solution](#using-local-vagrant-solution), -[Rook Solution](#using-rook-solution) cloud solution with [EFS](./efs-terraform/README.md) or -[Google Cloud Filestore](./gcfs-terraform/README.md), you can skip this step. - -Otherwise, you will need to create a file named `env.sh` and configure the IP address (or DNS name) -and exported NFS shared file path: - -```bash -export NFS_PATH="" -export NFS_SERVER="" -``` - -#### Deploy Using Helmfile - -If you have [helmfile](https://github.com/roboll/helmfile#installation) and -[helm-diff](https://github.com/databus23/helm-diff) installed, you can deploy Dgraph with NFS -support for backups with this: - -```bash -## Source required environments variables -source env.sh -## Deploy Dgraph (and optional Rook if Rook was enabled) -helmfile apply -``` - -#### Cleanup Using Helmfile - -```bash -helmfile delete -``` - -### Minikube Notes - -If you are using NFS with [Vagrant Solution](#using-local-vagrant-solution), you will need to park -[minikube](https://github.com/kubernetes/minikube) on the same private network as Vagrant. - -#### Minikube with Virtualbox - -For [VirtualBox](https://www.virtualbox.org) environments, where both -[Vagrant](https://www.vagrantup.com/) and [minikube](https://github.com/kubernetes/minikube) will -use [Virtualbox](https://www.virtualbox.org), you can do the following: - -```bash -## Vagrant should have been started with Virtualbox by default -export VAGRANT_DEFAULT_PROVIDER="virtualbox" -vagrant up - -## Set Driver to Virtualbox (same as Vagrant provider) -minikube config set driver virtualbox -## Start a miniKube cluster -minikube start --host-only-cidr='192.168.123.1/24' -``` - -#### Minikube with KVM - -When using vagrant with `libvirt` (see -[vagrant-libvirt](https://github.com/vagrant-libvirt/vagrant-libvirt)), you can have -[minikube](https://github.com/kubernetes/minikube) target the same network. - -```bash -## Vagrant should have been started with KVM -export VAGRANT_DEFAULT_PROVIDER="libvirt" -vagrant up - -## Check that Virtual Network Exists based on directory name, e.g. `nfs0` -virsh net-list - -## Start minikube using the same virtual network as Vagrant, e.g. `nfs0` -minikube config set driver kvm2 -minikube start --kvm-network nfs0 -``` - -#### Verify NFS between Minikube and Vagrant - -Next, verify that NFS share works between the Vagrant NFS server and client Dgraph Alpha pod running -in [minikube](https://github.com/kubernetes/minikube). - -Create a file from the client: - -```bash -## Log into an Alpha pod -RELEASE="my-release" -kubectl -ti exec $RELEASE-dgraph-alpha-0 -- bash -## Create a file on NFS volume -date > /dgraph/backups/hello_world.txt -exit -``` - -Verify that file was copied to the server: - -```bash -## Log into Vagrant NFS Server -vagrant ssh nfs-server -## Check Results -cat /srv/share/hello_world.txt -logout -``` - -## Accessing Dgraph Services - -In the [Docker Compose Environment](#testing-nfs-with-docker-compose), Ratel UI will be accessible -from http://localhost:8000 and Alpha from http://localhost:8080. - -In a [Kubernetes Environment](#testing-nfs-with-kubernetes), you will need to use port-forward to -access these from `localhost`. - -For Dgraph Alpha, you can use this to access it at http://localhost:8080: - -```bash -RELEASE="my-release" -export ALPHA_POD_NAME=$( - kubectl get pods \ - --namespace default \ - --selector "statefulset.kubernetes.io/pod-name=$RELEASE-dgraph-alpha-0,release=$RELEASE" \ - --output jsonpath="{.items[0].metadata.name}" -) - -kubectl --namespace default port-forward $ALPHA_POD_NAME 8080:8080 -``` - -For Dgraph Ratel UI, you can use this to access it at http://localhost:8000: - -```bash -RELEASE="my-release" -export RATEL_POD_NAME=$( - kubectl get pods \ - --namespace default \ - --selector "component=ratel,release=$RELEASE" \ - --output jsonpath="{.items[0].metadata.name}" -) - -kubectl --namespace default port-forward $RATEL_POD_NAME 8000:8000 -``` - -## Trigger a Backup - -In the [Kubernetes Environment](#testing-nfs-with-kubernetes), backups are scheduled automatically -using the -[Kubernetes CronJob](https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/). As long -as the services are available locally (see [Accessing Dgraph Services](#accessing-dgraph-services)), -we can trigger a backup using a `curl` command. - -For the [Docker Compose Environment](#testing-nfs-with-docker-compose) you can do the following: - -```bash -ALPHA_HOST="localhost" -BACKUP_PATH="/data/backups" - -GRAPHQL="{\"query\": \"mutation { backup(input: {destination: \\\"$BACKUP_PATH\\\" forceFull: true}) { response { message code } } }\"}" -HEADER="Content-Type: application/json" - -curl --silent --header "$HEADER" --request POST $ALPHA_HOST:8080/admin --data "$GRAPHQL" -``` - -For [Kubernetes Environment](#testing-nfs-with-kubernetes), after running port-forward, you can do -the following: - -```bash -ALPHA_HOST="localhost" -BACKUP_PATH="/dgraph/backups" - -GRAPHQL="{\"query\": \"mutation { backup(input: {destination: \\\"$BACKUP_PATH\\\" forceFull: true}) { response { message code } } }\"}" -HEADER="Content-Type: application/json" - -curl --silent --header "$HEADER" --request POST $ALPHA_HOST:8080/admin --data "$GRAPHQL" -``` diff --git a/contrib/config/backups/nfs/Vagrantfile b/contrib/config/backups/nfs/Vagrantfile deleted file mode 100644 index a0d1831c781..00000000000 --- a/contrib/config/backups/nfs/Vagrantfile +++ /dev/null @@ -1,57 +0,0 @@ -# -*- mode: ruby -*- -# vi: set ft=ruby : -eval File.read("./vagrant/helper.rb") - -Vagrant.configure("2") do |config| - @hosts.each do |hostname, ipaddr| - primary = hostname == @primary - autostart = @starts.include? hostname - config.vm.define hostname, autostart: autostart, primary: primary do |node| - node.vm.box = "generic/ubuntu1804" - node.vm.hostname = "#{hostname}" - node.vm.network "private_network", ip: ipaddr - node.vm.synced_folder ".", "/vagrant" - - ## virtualbox/windows - alternative synced_folder option - node.vm.provider "virtualbox" do |vbox, override| - vbox.name = "#{hostname}" - ## enable SMB3.0 for better fileshare UX on Windows-Virtualbox - if Vagrant::Util::Platform.windows? then - override.vm.synced_folder ".", "/vagrant", @smb_sync_opts - end - end - - ## hyperv - alternative synced_foler option - node.vm.provider "hyperv" do |hyperv, override| - hyperv.vmname = "#{hostname}" - ## enable SMB3.0 for better fileshare UX on Windows-HyperV - override.vm.synced_folder ".", "/vagrant", @smb_sync_opts - end - - ## Provision nfs-server and nfs-client - node.vm.provision "shell" do |shell| - shell.path = "./vagrant/provision.sh" - shell.privileged = true - shell.env = { - INSTALL_DOCKER: "true", - INSTALL_COMPOSE: "true" - } - end - - ## Configure Host 'env.sh' on host system - ## This is required if nfs-server is only used and client is on the host - node.trigger.after :up do |trigger| - trigger.name = "Configure host 'env.sh'" - trigger.ruby do |env,machine| - File.open('env.sh', 'w') do |file| - file << <<~HEREDOC - ## Configuration generated by Vagrant - export NFS_PATH="/srv/share" - export NFS_SERVER="#{ipaddr}" - HEREDOC - end - end - end - end - end -end diff --git a/contrib/config/backups/nfs/charts/dgraph_nfs.yaml b/contrib/config/backups/nfs/charts/dgraph_nfs.yaml deleted file mode 100644 index 771faceb026..00000000000 --- a/contrib/config/backups/nfs/charts/dgraph_nfs.yaml +++ /dev/null @@ -1,16 +0,0 @@ -backups: - nfs: - enabled: true - mountPath: &path /dgraph/backups - full: - enabled: true - debug: true - incremental: - enabled: true - debug: true - destination: *path -alpha: - configFile: - config.hcl: | - whitelist = "10.0.0.0/8,172.0.0.0/8,192.168.0.0/16" - lru_mb = 2048 diff --git a/contrib/config/backups/nfs/charts/dgraph_volume.yaml b/contrib/config/backups/nfs/charts/dgraph_volume.yaml deleted file mode 100644 index fc8e0106341..00000000000 --- a/contrib/config/backups/nfs/charts/dgraph_volume.yaml +++ /dev/null @@ -1,16 +0,0 @@ -backups: - volume: - enabled: true - mountPath: &path /dgraph/backups - full: - enabled: true - debug: true - incremental: - enabled: true - debug: true - destination: *path -alpha: - configFile: - config.hcl: | - whitelist = "10.0.0.0/8,172.0.0.0/8,192.168.0.0/16" - lru_mb = 2048 diff --git a/contrib/config/backups/nfs/charts/helmfile.yaml b/contrib/config/backups/nfs/charts/helmfile.yaml deleted file mode 100644 index 7afeaf54ab4..00000000000 --- a/contrib/config/backups/nfs/charts/helmfile.yaml +++ /dev/null @@ -1,28 +0,0 @@ -helmfiles: - - ./rook/helmfile.yaml - -repositories: - - name: dgraph - url: https://charts.dgraph.io - -releases: - ######### - # Dgraph helm chart configuration - ################################################# - - name: my-release - namespace: default - chart: dgraph/dgraph - values: - - ./dgraph_{{ env "VOL_TYPE" | default "nfs" }}.yaml - - backups: - {{- if eq (env "VOL_TYPE") "volume" }} - ## backup drive allocated through volume claim - volume: - claim: {{ env "NFS_CLAIM_NAME" }} - ## backup drive allocated through specifying NFS server and path - {{- else }} - nfs: - server: {{ env "NFS_SERVER" }} - path: {{ env "NFS_PATH" }} - storage: {{ env "NFS_CLAIM_SIZE" | default "32Gi" }} - {{- end }} diff --git a/contrib/config/backups/nfs/charts/rook/env.sh b/contrib/config/backups/nfs/charts/rook/env.sh deleted file mode 100644 index 2d1bf1bb72b..00000000000 --- a/contrib/config/backups/nfs/charts/rook/env.sh +++ /dev/null @@ -1,14 +0,0 @@ -## global -export NFS_STRATEGY="rook" - -## values for rook -export NFS_SERVER="rook-nfs" -export NFS_PATH="share1" -## storage to use by NFS server -export NFS_DISK_SIZE="32Gi" -## storage to use from NFS server -export NFS_CLIAM_SIZE="32Gi" -export NFS_CLAIM_NAME="rook-nfs-pv-claim" - -## values for dgraph (dynamic = will supply PVC claim to Dgraph) -export VOL_TYPE="volume" diff --git a/contrib/config/backups/nfs/charts/rook/fetch-operator.sh b/contrib/config/backups/nfs/charts/rook/fetch-operator.sh deleted file mode 100755 index 5a80494f588..00000000000 --- a/contrib/config/backups/nfs/charts/rook/fetch-operator.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/usr/bin/env bash - -command -v git >/dev/null || - { - echo "[ERROR]: 'git' command not not found" 1>&2 - exit 1 - } - -ROOK_VERSION="v1.4.7" -DEST_PATH="${PWD}/$(dirname "${BASH_SOURCE[0]}")/rook-nfs-operator-kustomize/base" -TEMP_PATH=$(mktemp -d) - -cd "${TEMP_PATH}" || exit -git clone --single-branch --branch "${ROOK_VERSION}" https://github.com/rook/rook.git 2>/dev/null - -for MANIFEST in common.yaml provisioner.yaml operator.yaml; do - cp "${TEMP_PATH}"/rook/cluster/examples/kubernetes/nfs/"${MANIFEST}" "${DEST_PATH}" -done diff --git a/contrib/config/backups/nfs/charts/rook/helmfile.yaml b/contrib/config/backups/nfs/charts/rook/helmfile.yaml deleted file mode 100644 index c52393f3450..00000000000 --- a/contrib/config/backups/nfs/charts/rook/helmfile.yaml +++ /dev/null @@ -1,69 +0,0 @@ -releases: - ######### - # Rook NFS configuration - uses helm charts created from kustomize - ################################################# - {{- if eq (env "NFS_STRATEGY") "rook" }} - - name: rook-nfs-operator - ## NOTE: namespace must also be specified in - ## ./rook-nfs-operator-kustomize/overlays/default/kustomization.yaml - namespace: rook-nfs-system - ## temporary helm chart rendered by helmify - chart: ./rook-nfs-operator - hooks: - - events: - - prepare - - cleanup - command: ./helmify.sh - args: - - "{{`{{if eq .Event.Name \"prepare\"}}build{{else}}clean{{end}}`}}" - - "{{`{{.Release.Chart}}`}}" - - default - - - name: rook-nfs-server - ## NOTE: namespace must also be specified in - ## ./rook-nfs-server-kustomize/overlays/default/kustomization.yaml - namespace: rook-nfs-system - ## temporary helm chart rendered by helmify - chart: ./rook-nfs-server - values: - - nfs: - size: {{ env "NFS_DISK_SIZE" | default "32Gi" }} - path: {{ env "NFS_PATH" | default "share1" }} - hooks: - - events: - - prepare - - cleanup - command: ./helmify.sh - args: - - "{{`{{if eq .Event.Name \"prepare\"}}build{{else}}clean{{end}}`}}" - - "{{`{{.Release.Chart}}`}}" - - default - needs: - - rook-nfs-system/rook-nfs-operator - disableValidation: true - - - name: rook-nfs-storageclass - ## NOTE: namespace must also be specified in - ## ./rook-nfs-storageclass-kustomize/overlays/default/kustomization.yaml - namespace: rook-nfs-system - ## temporary helm chart rendered by helmify - chart: ./rook-nfs-storageclass - values: - - nfs: - server: {{ env "NFS_SERVER" | default "rook-nfs" }} - path: {{ env "NFS_PATH" | default "share1" }} - namespace: rook-nfs-system - claim: - size: {{ env "NFS_CLAIM_SIZE" | default "32Gi" }} - name: {{ env "NFS_CLAIM_NAME" | default "rook-nfs-pv-claim" }} - namespace: default - hooks: - - events: - - prepare - - cleanup - command: ./helmify.sh - args: - - "{{`{{if eq .Event.Name \"prepare\"}}build{{else}}clean{{end}}`}}" - - "{{`{{.Release.Chart}}`}}" - - default - {{- end }} diff --git a/contrib/config/backups/nfs/charts/rook/helmify.sh b/contrib/config/backups/nfs/charts/rook/helmify.sh deleted file mode 100755 index 72c3c76e271..00000000000 --- a/contrib/config/backups/nfs/charts/rook/helmify.sh +++ /dev/null @@ -1,55 +0,0 @@ -#!/usr/bin/env bash - -cmd=$1 -chart=$2 -env=$3 -dir=${chart}-kustomize - -chart=${chart/.\//} - -build() { - if [[ ! -d ${dir} ]]; then - echo "directory \"${dir}\" does not exist. make a kustomize project there in order to generate a local helm chart at ${chart}/ from it!" 1>&2 - exit 1 - fi - - mkdir -p "${chart}"/templates - echo "generating ${chart}/Chart.yaml" 1>&2 - cat <"${chart}"/Chart.yaml -apiVersion: v1 -appVersion: "1.0" -description: A Helm chart for Kubernetes -name: ${chart} -version: 0.1.0 -EOF - echo "generating ${chart}/templates/NOTES.txt" 1>&2 - cat <"${chart}"/templates/NOTES.txt -${chart} has been installed as release {{ .Release.Name }}. - -Run \`helm status {{ .Release.Name }}\` for more information. -Run \`helm delete --purge {{.Release.Name}}\` to uninstall. -EOF - echo "running kustomize" 1>&2 - ( - cd "${dir}" || exit - kubectl kustomize overlays/"${env}" - ) >"${chart}"/templates/all.yaml - echo "running helm lint" 1>&2 - helm lint "${chart}" - echo "generated following files:" - tree "${chart}" -} - -clean() { - rm "${chart}"/Chart.yaml - rm "${chart}"/templates/*.{yaml,txt} -} - -case "${cmd}" in -"build") build ;; -"clean") clean ;; -*) - echo "unsupported command: ${cmd}" 1>&2 - exit 1 - ;; -esac diff --git a/contrib/config/backups/nfs/charts/rook/rook-nfs-operator-kustomize/base/.gitignore b/contrib/config/backups/nfs/charts/rook/rook-nfs-operator-kustomize/base/.gitignore deleted file mode 100644 index c6c0622f1ef..00000000000 --- a/contrib/config/backups/nfs/charts/rook/rook-nfs-operator-kustomize/base/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -common.yaml -operator.yaml -provisioner.yaml diff --git a/contrib/config/backups/nfs/charts/rook/rook-nfs-operator-kustomize/base/kustomization.yaml b/contrib/config/backups/nfs/charts/rook/rook-nfs-operator-kustomize/base/kustomization.yaml deleted file mode 100644 index db53aac86d4..00000000000 --- a/contrib/config/backups/nfs/charts/rook/rook-nfs-operator-kustomize/base/kustomization.yaml +++ /dev/null @@ -1,7 +0,0 @@ -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization - -resources: - - common.yaml - - provisioner.yaml - - operator.yaml diff --git a/contrib/config/backups/nfs/charts/rook/rook-nfs-operator-kustomize/overlays/default/kustomization.yaml b/contrib/config/backups/nfs/charts/rook/rook-nfs-operator-kustomize/overlays/default/kustomization.yaml deleted file mode 100644 index 85c7456201c..00000000000 --- a/contrib/config/backups/nfs/charts/rook/rook-nfs-operator-kustomize/overlays/default/kustomization.yaml +++ /dev/null @@ -1,7 +0,0 @@ -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization - -namespace: rook-nfs-system - -bases: - - ../../base diff --git a/contrib/config/backups/nfs/charts/rook/rook-nfs-server-kustomize/base/kustomization.yaml b/contrib/config/backups/nfs/charts/rook/rook-nfs-server-kustomize/base/kustomization.yaml deleted file mode 100644 index fd7195d9696..00000000000 --- a/contrib/config/backups/nfs/charts/rook/rook-nfs-server-kustomize/base/kustomization.yaml +++ /dev/null @@ -1,6 +0,0 @@ -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization - -resources: - - sa.yaml - - nfs.yaml diff --git a/contrib/config/backups/nfs/charts/rook/rook-nfs-server-kustomize/base/nfs.yaml b/contrib/config/backups/nfs/charts/rook/rook-nfs-server-kustomize/base/nfs.yaml deleted file mode 100644 index a6b07622642..00000000000 --- a/contrib/config/backups/nfs/charts/rook/rook-nfs-server-kustomize/base/nfs.yaml +++ /dev/null @@ -1,30 +0,0 @@ ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: nfs-default-claim -spec: - accessModes: - - ReadWriteOnce - resources: - requests: - ## Size to allocate from default storage class - storage: "{{ .Values.nfs.size }}" ---- -apiVersion: nfs.rook.io/v1alpha1 -kind: NFSServer -metadata: - name: rook-nfs -spec: - replicas: 1 - exports: - - name: "{{ .Values.nfs.path }}" - server: - accessMode: ReadWrite - squash: "none" - ## A Persistent Volume Claim must be created before creating NFS CRD instance. - persistentVolumeClaim: - claimName: nfs-default-claim - ## A key/value list of annotations - annotations: - rook: nfs diff --git a/contrib/config/backups/nfs/charts/rook/rook-nfs-server-kustomize/base/sa.yaml b/contrib/config/backups/nfs/charts/rook/rook-nfs-server-kustomize/base/sa.yaml deleted file mode 100644 index 19e79feee39..00000000000 --- a/contrib/config/backups/nfs/charts/rook/rook-nfs-server-kustomize/base/sa.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: rook-nfs-server diff --git a/contrib/config/backups/nfs/charts/rook/rook-nfs-server-kustomize/overlays/default/kustomization.yaml b/contrib/config/backups/nfs/charts/rook/rook-nfs-server-kustomize/overlays/default/kustomization.yaml deleted file mode 100644 index 85c7456201c..00000000000 --- a/contrib/config/backups/nfs/charts/rook/rook-nfs-server-kustomize/overlays/default/kustomization.yaml +++ /dev/null @@ -1,7 +0,0 @@ -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization - -namespace: rook-nfs-system - -bases: - - ../../base diff --git a/contrib/config/backups/nfs/charts/rook/rook-nfs-storageclass-kustomize/base/kustomization.yaml b/contrib/config/backups/nfs/charts/rook/rook-nfs-storageclass-kustomize/base/kustomization.yaml deleted file mode 100644 index d5231553fe3..00000000000 --- a/contrib/config/backups/nfs/charts/rook/rook-nfs-storageclass-kustomize/base/kustomization.yaml +++ /dev/null @@ -1,5 +0,0 @@ -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization - -resources: - - sc.yaml diff --git a/contrib/config/backups/nfs/charts/rook/rook-nfs-storageclass-kustomize/base/sc.yaml b/contrib/config/backups/nfs/charts/rook/rook-nfs-storageclass-kustomize/base/sc.yaml deleted file mode 100644 index d689aa16e38..00000000000 --- a/contrib/config/backups/nfs/charts/rook/rook-nfs-storageclass-kustomize/base/sc.yaml +++ /dev/null @@ -1,14 +0,0 @@ ---- -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - labels: - app: rook-nfs - name: rook-nfs-{{ .Values.nfs.path }} -parameters: - exportName: "{{ .Values.nfs.path }}" - nfsServerName: "{{ .Values.nfs.server }}" - nfsServerNamespace: "{{ .Values.nfs.namespace }}" -provisioner: rook.io/nfs-provisioner -reclaimPolicy: Retain -volumeBindingMode: Immediate diff --git a/contrib/config/backups/nfs/charts/rook/rook-nfs-storageclass-kustomize/overlays/default/kustomization.yaml b/contrib/config/backups/nfs/charts/rook/rook-nfs-storageclass-kustomize/overlays/default/kustomization.yaml deleted file mode 100644 index 4fffe79e92c..00000000000 --- a/contrib/config/backups/nfs/charts/rook/rook-nfs-storageclass-kustomize/overlays/default/kustomization.yaml +++ /dev/null @@ -1,8 +0,0 @@ -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization - -bases: - - ../../base - -resources: - - pvc.yaml diff --git a/contrib/config/backups/nfs/charts/rook/rook-nfs-storageclass-kustomize/overlays/default/pvc.yaml b/contrib/config/backups/nfs/charts/rook/rook-nfs-storageclass-kustomize/overlays/default/pvc.yaml deleted file mode 100644 index 0a96e56f050..00000000000 --- a/contrib/config/backups/nfs/charts/rook/rook-nfs-storageclass-kustomize/overlays/default/pvc.yaml +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: "{{ .Values.nfs.claim.name }}" - namespace: "{{ .Values.nfs.claim.namespace }}" -spec: - storageClassName: "rook-nfs-{{ .Values.nfs.path }}" - accessModes: - - ReadWriteMany - resources: - requests: - ## Allocation to use from Server - storage: "{{ .Values.nfs.claim.size }}" diff --git a/contrib/config/backups/nfs/docker-compose.yml b/contrib/config/backups/nfs/docker-compose.yml deleted file mode 100644 index 8dccd2d3291..00000000000 --- a/contrib/config/backups/nfs/docker-compose.yml +++ /dev/null @@ -1,39 +0,0 @@ -version: "3.5" -services: - zero1: - image: dgraph/dgraph:${DGRAPH_VERSION} - container_name: zero1 - working_dir: /data/zero1 - ports: - - 5080:5080 - - 6080:6080 - command: dgraph zero --my=zero1:5080 --replicas 1 --raft="idx=1;" - - alpha1: - image: dgraph/dgraph:${DGRAPH_VERSION} - container_name: alpha1 - working_dir: /data/alpha1 - ports: - - 8080:8080 - - 9080:9080 - command: dgraph alpha --my=alpha1:7080 --lru_mb=1024 --zero=zero1:5080 - --security "whitelist=10.0.0.0/8,172.0.0.0/8,192.168.0.0/16,127.0.0.1;" - volumes: - - type: volume - source: nfsmount - target: /data/backups - volume: - nocopy: true - ratel: - image: dgraph/ratel:${DGRAPH_VERSION} - ports: - - 8000:8000 - container_name: ratel - -volumes: - nfsmount: - driver: local - driver_opts: - type: nfs - o: addr=${NFS_SERVER},rw,nolock,soft,nointr,nfsvers=4 - device: ":${NFS_PATH}" diff --git a/contrib/config/backups/nfs/efs-terraform/README.md b/contrib/config/backups/nfs/efs-terraform/README.md deleted file mode 100644 index b256e277e4f..00000000000 --- a/contrib/config/backups/nfs/efs-terraform/README.md +++ /dev/null @@ -1,146 +0,0 @@ -# Amazon Elastic File Services with Terraform - -These [Terraform](https://www.terraform.io/) scripts and modules will create the resources required -to support an NFS server instance using [Amazon Elastic File Services](https://aws.amazon.com/efs/). - -This automation script will create the following resources: - -- [EFS](https://aws.amazon.com/efs/) Server -- SG to allow EKS worker nodes to access the [EFS](https://aws.amazon.com/efs/) Server (if discovery - used) -- Configuration file (`../env.sh`) that specifies NFS Server and Path - -## Prerequisites - -To use this automation, you must install the following: - -- [AWS CLI](https://aws.amazon.com/cli/) - AWS CLI installed and configured with local profile -- [Terraform](https://www.terraform.io/downloads.html) - tool used to provision resources and create - templates - -## Configuration - -You can use the following input variables to configure this automation: - -- **Required** -- `vpc_name` or `vpc_id` - specify either explicit `vpc_id` or a name of Tag `Name` used -- `subnets` or use [discovery](#discovery) - specify Subnet IDs for subnets that will have access to - EFS, or have this discovered automatically -- **Optional** -- `security_groups` or use [discovery](#discovery) - specify SG IDs of security groups to add that - will allow access to EFS server, or have this discovered automatically. -- `dns_name` with `dns_domain` or `zone_id` - this is used to create a friendly alternative name - such as `myfileserver.devest.mycompany.com` -- `encrypted` (default: false) - whether EFS storage is encrypted or not - -## Discovery - -Configuring the following values allows this automation to discover the resources used to configure -EFS. These can be overridden by specifying explicit values as input variables. - -These are values affected by discovery: - -- **VPC Name** - you can supply either explicit `vpc_id` or `vpc_name` if VPC has a tag key of - `Name`. -- **EKS Cluster Name** - if `eks_cluster_name` is not specified, then the VPC tag `Name` will be - used as the EKS Cluster Name. This is default configuration if both VPC and EKS cluster that was - provisioned by `eksctl`. -- **Private Subnets** - if `subnets` is not specified, private subnets used by an EKS cluster can be - discovered provided that the tags are set up appropriately (see - [Requirements for Discovery](#requirements-for-discovery)) -- **Security Group** (optional for access)- if `security_groups` is not specified this security - group can be discovered provided that the tags are set up appropriately (see - [Requirements for Discovery](#requirements-for-discovery)) -- **DNS Domain** (optional for DNS name)- a domain name, e.g. `devtest.mycompany.com.`, managed by - Route53 can be specified to fetch a Zone ID, otherwise a `zone_id` must be specified to use this - feature. When using this, you need to supply the CNAME you want to use, e.g. `myfileserver` with - `dns_name` - -### Requirements for Discovery - -You will need to have the appropriate tags per subnets and security groups configured to support the -discovery feature. This feature will allow these [Terraform](https://www.terraform.io/) scripts to -find the resources required to allow EFS configuration alongside an Amazon EKS cluster and SG -configuration to allow EKS worker nodes to access EFS. If you used `eksctl` to provision your -cluster, these tags and keys will be set up automatically. - -#### Subnets - -Your private subnets where EKS is installed should have the following tags: - -| Tag Key | Tag Value | -| ------------------------------------------- | --------- | -| `kubernetes.io/cluster/${EKS_CLUSTER_NAME}` | `shared` | -| `kubernetes.io/role/internal-elb` | `1` | - -#### Security Groups - -A security group used to allow access to EKS Nodes needs to have the following tags: - -| Tag Key | Tag Value | -| ------------------------------------------- | -------------------- | -| `kubernetes.io/cluster/${EKS_CLUSTER_NAME}` | `owned` | -| `aws:eks:cluster-name` | `{EKS_CLUSTER_NAME}` | - -## Steps - -### Define Variables - -If discovery was configured (see [Requirements for Discovery](#requirements-for-discovery)), you can -specify this for `terraform.tfvars` files: - -```hcl -vpc_name = "dgraph-eks-test-cluster" -region = "us-east-2" - -## optional DNS values -dns_name = "myfileserver" -dns_domain = "devtest.example.com." -``` - -Alternatively, you can supply the SG IDs and Subnet IDs explicitly in `terraform.tfvars`: - -```hcl -vpc_id = "vpc-xxxxxxxxxxxxxxxxx" -eks_cluster_name = "dgraph-eks-test-cluster" -region = "us-east-2" - -## optional DNS values -dns_name = "myfileserver" -zone_id = "XXXXXXXXXXXXXXXXXXXX" - -## Specify subnets and security groups explicitly -subnets = [ - "subnet-xxxxxxxxxxxxxxxxx", - "subnet-xxxxxxxxxxxxxxxxx", - "subnet-xxxxxxxxxxxxxxxxx", -] - -security_groups = [ - "sg-xxxxxxxxxxxxxxxxx", -] -``` - -### Download Plugins and Modules - -```bash -terraform init -``` - -### Prepare and Provision Resources - -```bash -## get a list of changes that will be made -terraform plan -## apply the changes -terraform apply -``` - -## Cleanup - -When finished, you can destroy resources created with [Terraform](https://www.terraform.io/) using -this: - -```bash -terraform destroy -``` diff --git a/contrib/config/backups/nfs/efs-terraform/main.tf b/contrib/config/backups/nfs/efs-terraform/main.tf deleted file mode 100644 index 6665349ca46..00000000000 --- a/contrib/config/backups/nfs/efs-terraform/main.tf +++ /dev/null @@ -1,115 +0,0 @@ -##################################################################### -# Locals -##################################################################### - -locals { - ## Use specified vpc_id or search byh vpc tag name - vpc_id = var.vpc_id != "" ? var.vpc_id : data.aws_vpc.vpc_by_name[0].id - vpc_name = var.vpc_name != "" ? var.vpc_name : data.aws_vpc.vpc_by_id[0].tags["Name"] - - ## lookup zone_id if dns_domain is passed - zone_id = var.dns_domain == "" ? var.zone_id : data.aws_route53_zone.devtest[0].zone_id - - ## use vpc tag name as eks cluster name if not specified (default behavior with eksctl) - eks_cluster_name = var.eks_cluster_name != "" ? var.eks_cluster_name : local.vpc_name - - ## fetch list of private subnets in current VPC if list of subnet IDs not specified - subnets = var.subnets != [] ? var.subnets : data.aws_subnet_ids.private[0].ids - - ## fetch EKS Node SG if list of SG IDs are not specified - security_groups = var.security_groups == [] ? var.security_groups : [data.aws_security_group.eks_nodes[0].id] - - env_vars = { - nfs_server = local.zone_id == "" ? module.efs.dns_name : module.efs.host - nfs_path = "/" - } - - env_sh = templatefile("${path.module}/templates/env.sh.tmpl", local.env_vars) -} - -###################################################################### -## Datasources -###################################################################### - -data "aws_vpc" "vpc_by_name" { - count = var.vpc_name == "" ? 0 : 1 - - tags = { - Name = var.vpc_name - } -} - -data "aws_vpc" "vpc_by_id" { - count = var.vpc_id == "" ? 0 : 1 - id = local.vpc_id -} - -## fetch private subnets if subnets were not specified -data "aws_subnet_ids" "private" { - count = var.subnets != [] ? 0 : 1 - vpc_id = local.vpc_id - - ## Search for Subnet used by specific EKS Cluster - filter { - name = "tag:kubernetes.io/cluster/${local.eks_cluster_name}" - values = ["shared"] - } - - ## Search for Subnets used designated as private for EKS Cluster - filter { - name = "tag:kubernetes.io/role/internal-elb" - values = [1] - } -} - -## lookup zone if dns_domain specified -data "aws_route53_zone" "devtest" { - count = var.dns_domain == "" ? 0 : 1 - name = var.dns_domain -} - -## lookup SG ID used for EKS Nodes if not specified -## NOTE: If created by eksctl, the SG will have this description: -## EKS created security group applied to ENI that is attached to EKS -## Control Plane master nodes, as well as any managed workloads. -data "aws_security_group" "eks_nodes" { - count = var.security_groups == [] ? 0 : 1 - - filter { - name = "tag:aws:eks:cluster-name" - values = ["${local.eks_cluster_name}"] - } - - filter { - name = "tag:kubernetes.io/cluster/${local.eks_cluster_name}" - values = ["owned"] - } -} - - -##################################################################### -# Modules -##################################################################### -module "efs" { - source = "git::https://github.com/cloudposse/terraform-aws-efs.git?ref=tags/0.22.0" - namespace = "dgraph" - stage = "test" - name = "fileserver" - region = var.region - vpc_id = local.vpc_id - subnets = local.subnets - security_groups = local.security_groups - zone_id = local.zone_id - dns_name = var.dns_name - encrypted = var.encrypted -} - -###################################################################### -## Create ../env.sh -###################################################################### -resource "local_file" "env_sh" { - count = var.create_env_sh != "" ? 1 : 0 - content = local.env_sh - filename = "${path.module}/../env.sh" - file_permission = "0644" -} diff --git a/contrib/config/backups/nfs/efs-terraform/output.tf b/contrib/config/backups/nfs/efs-terraform/output.tf deleted file mode 100644 index bfd12b51179..00000000000 --- a/contrib/config/backups/nfs/efs-terraform/output.tf +++ /dev/null @@ -1,55 +0,0 @@ - -output "efs_arn" { - value = module.efs.arn - description = "EFS ARN" -} - -output "efs_id" { - value = module.efs.id - description = "EFS ID" -} - -output "efs_host" { - value = module.efs.host - description = "Route53 DNS hostname for the EFS" -} - -output "efs_dns_name" { - value = module.efs.dns_name - description = "EFS DNS name" -} - -output "efs_mount_target_dns_names" { - value = module.efs.mount_target_dns_names - description = "List of EFS mount target DNS names" -} - -output "efs_mount_target_ids" { - value = module.efs.mount_target_ids - description = "List of EFS mount target IDs (one per Availability Zone)" -} - -output "efs_mount_target_ips" { - value = module.efs.mount_target_ips - description = "List of EFS mount target IPs (one per Availability Zone)" -} - -output "efs_network_interface_ids" { - value = module.efs.network_interface_ids - description = "List of mount target network interface IDs" -} - -output "security_group_id" { - value = module.efs.security_group_id - description = "EFS Security Group ID" -} - -output "security_group_arn" { - value = module.efs.security_group_arn - description = "EFS Security Group ARN" -} - -output "security_group_name" { - value = module.efs.security_group_name - description = "EFS Security Group name" -} diff --git a/contrib/config/backups/nfs/efs-terraform/provider.tf b/contrib/config/backups/nfs/efs-terraform/provider.tf deleted file mode 100644 index 685eca8e84f..00000000000 --- a/contrib/config/backups/nfs/efs-terraform/provider.tf +++ /dev/null @@ -1,6 +0,0 @@ -##################################################################### -# Provider: Amazon Web Services -##################################################################### -provider "aws" { - region = var.region -} diff --git a/contrib/config/backups/nfs/efs-terraform/templates/env.sh.tmpl b/contrib/config/backups/nfs/efs-terraform/templates/env.sh.tmpl deleted file mode 100644 index 4b8a69f2efe..00000000000 --- a/contrib/config/backups/nfs/efs-terraform/templates/env.sh.tmpl +++ /dev/null @@ -1,3 +0,0 @@ -## Configuration generated by Terraform EFS automation -export NFS_PATH="${nfs_path}" -export NFS_SERVER="${nfs_server}" diff --git a/contrib/config/backups/nfs/efs-terraform/variables.tf b/contrib/config/backups/nfs/efs-terraform/variables.tf deleted file mode 100644 index 9b04cae5e19..00000000000 --- a/contrib/config/backups/nfs/efs-terraform/variables.tf +++ /dev/null @@ -1,73 +0,0 @@ -##################################################################### -# Required Variables -##################################################################### - -## Required by AWS Provider -variable "region" {} - -## Must Supply VPC ID or VPC Tag Name -variable "vpc_id" { - type = string - description = "VPC ID" - default = "" -} - -variable "vpc_name" { - type = string - description = "VPC Tag Name used to search for VPC ID" - default = "" -} - -##################################################################### -# Optional Variables -##################################################################### -variable "eks_cluster_name" { - type = string - description = "Name of EKS Cluster (specify if VPC Tag Name is different that EKS Cluster Name)" - default = "" -} - -variable "dns_name" { - type = string - description = "Name of Server, e.g. myfileserver" - default = "" -} - -## Specify Route53 Zone ID or DNS Domain Name used to search for Route53 Zone ID -variable "dns_domain" { - type = string - description = "Domain used to search for Route53 DNS Zone ID, e.g. devtest.mycompany.com" - default = "" -} - -variable "zone_id" { - type = string - description = "Route53 DNS Zone ID" - default = "" -} - -variable "encrypted" { - type = bool - description = "If true, the file system will be encrypted" - default = false -} - -variable "create_env_sh" { - type = bool - description = "If true, env.sh will be created for use with Docker-Compose or Kubernetes" - default = true -} - -variable "security_groups" { - type = list(string) - description = "Security group IDs to allow access to the EFS" - default = [] -} - - -## Supply List of Subnet IDs or search for private subnets based on eksctl tag names -variable "subnets" { - type = list(string) - description = "Subnet IDs" - default = [] -} \ No newline at end of file diff --git a/contrib/config/backups/nfs/gcfs-cli/README.md b/contrib/config/backups/nfs/gcfs-cli/README.md deleted file mode 100644 index 68ff068cedd..00000000000 --- a/contrib/config/backups/nfs/gcfs-cli/README.md +++ /dev/null @@ -1,77 +0,0 @@ -# Google Cloud Filestore using Google Cloud SDK (Shell) - -This shell script creates the resources needed to create an NFS server instance using Google Cloud -Filestore. - -This automation will create the following resources: - -- [Google Cloud Filestore Server](https://cloud.google.com/filestore) -- Configuration file (`../env.sh`) that specifies NFS Server and Path - -## Prerequisites - -You need the following installed to use this automation: - -- [Google Cloud SDK](https://cloud.google.com/sdk/docs/install) - for the `gcloud` command and - required to access Google Cloud. -- [bash](https://www.gnu.org/software/bash/) - shell environment - -## Configuration - -You will need to define these environment variables: - -- Required Variables: - - `MY_FS_NAME` (required) - Name of Filestore instance. -- Optional Variables: - - `MY_PROJECT` (default to current configured project) - Project with billing enabled to create - Filestore instance. - - `MY_ZONE` (default `us-central1-b`) - zone where Filestore instance will be created - - `MY_FS_CAPACITY` (default `1TB`) - size of the storage used for Filestore - - `MY_FS_SHARE_NAME` (default `volumes`) - NFS path - -## Create Filestore - -Run these steps to create [filestore](https://cloud.google.com/filestore) and populate the -configuration (`../env.sh`) - -### Define Variables - -You can create an `env.sh` with the desired values, for example: - -```bash -cat <<-EOF > env.sh -export MY_FS_NAME="my-organization-nfs-server" -export MY_PROJECT="my-organization-test" -export MY_ZONE="us-central1-b" -EOF -``` - -These values can be used to create and destroy [filestore](https://cloud.google.com/filestore). - -### Run the Script - -```bash -## get env vars used to create filestore -. env.sh -## create filestore and populate ../env.sh -./create_gcfs.sh -``` - -## Cleanup - -You can run these commands to delete the resources (with prompts) on GCP. - -```bash -## get env vars used to create filestore -. env.sh - -## conditionally delete filestore if it exists (idempotent) -if gcloud filestore instances list | grep -q ${MY_FS_NAME}; then - gcloud filestore instances delete ${MY_FS_NAME} \ - --project=${MY_PROJECT} \ - --zone=${MY_ZONE} -fi - -## remove configuration that points to deleted filestore -rm ../env.sh -``` diff --git a/contrib/config/backups/nfs/gcfs-cli/create_gcfs.sh b/contrib/config/backups/nfs/gcfs-cli/create_gcfs.sh deleted file mode 100755 index 60535db0dac..00000000000 --- a/contrib/config/backups/nfs/gcfs-cli/create_gcfs.sh +++ /dev/null @@ -1,88 +0,0 @@ -#!/usr/bin/env bash - -set -e - -##### -# main -################## -main() { - check_environment $@ - create_filestore - create_config_values -} - -##### -# check_environment -################## -check_environment() { - ## Check for Azure CLI command - command -v gcloud >/dev/null || - { - echo "[ERROR]: 'az' command not not found" 1>&2 - exit 1 - } - - if [[ -z ${MY_FS_NAME} ]]; then - if (($# < 1)); then - printf "[ERROR]: Need at least one parameter or define 'MY_FS_NAME'\n\n" 1>&2 - printf "Usage:\n\t$0 \n\tMY_FS_NAME= $0\n" 1>&2 - exit 1 - fi - fi - - MY_PROJECT=${MY_PROJECT:-$(gcloud config get-value project)} - MY_ZONE=${MY_ZONE:-"us-central1-b"} - MY_FS_TIER=${MY_FS_TIER:-"STANDARD"} - MY_FS_CAPACITY=${MY_FS_CAPACITY:-"1TB"} - MY_FS_SHARE_NAME=${MY_FS_SHARE_NAME:-"volumes"} - MY_NETWORK_NAME=${MY_NETWORK_NAME:-"default"} - MY_FS_NAME=${MY_FS_NAME:-$1} - CREATE_ENV_VALUES=${CREATE_ENV_VALUES:-"true"} - -} - -##### -# create_filestore -################## -create_filestore() { - if ! gcloud filestore instances list | grep -q "${MY_FS_NAME}"; then - gcloud filestore instances create "${MY_FS_NAME}" \ - --project="${MY_PROJECT}" \ - --zone="${MY_ZONE}" \ - --tier="${MY_FS_TIER}" \ - --file-share=name="${MY_FS_SHARE_NAME}",capacity="${MY_FS_CAPACITY}" \ - --network=name="${MY_NETWORK_NAME}" - fi -} - -##### -# create_config_values -################## -create_config_values() { - ## TODO: Verify Server Exists - - ## Create Minio env file and Helm Chart secret files - if [[ ${CREATE_ENV_VALUES} =~ true|(y)es ]]; then - echo "[INFO]: Creating 'env.sh' file" - SERVER_ADDRESS=$( - gcloud filestore instances describe "${MY_FS_NAME}" \ - --project="${MY_PROJECT}" \ - --zone="${MY_ZONE}" \ - --format="value(networks.ipAddresses[0])" - ) - SERVER_SHARE=$( - gcloud filestore instances describe "${MY_FS_NAME}" \ - --project="${MY_PROJECT}" \ - --zone="${MY_ZONE}" \ - --format="value(fileShares[0].name)" - ) - - cat <<-EOF >../env.sh - ## Configuration generated by 'create_gcfs.sh' script - export NFS_PATH="${SERVER_SHARE}" - export NFS_SERVER="${SERVER_ADDRESS}" - EOF - fi -} - -main $@ diff --git a/contrib/config/backups/nfs/gcfs-terraform/README.md b/contrib/config/backups/nfs/gcfs-terraform/README.md deleted file mode 100644 index b29f4d06a2b..00000000000 --- a/contrib/config/backups/nfs/gcfs-terraform/README.md +++ /dev/null @@ -1,73 +0,0 @@ -# Google Cloud Filestore with Terraform - -These [Terraform](https://www.terraform.io/) scripts and modules will create the resources required -to create an NFS server instance using Google Cloud Filestore. - -This automation will create the following resources: - -- [Google Cloud Filestore Server](https://cloud.google.com/filestore) -- Configuration file (`../env.sh`) that specifies NFS Server and Path - -## Prerequisites - -You need the following installed to use this automation: - -- [Google Cloud SDK](https://cloud.google.com/sdk/docs/install) - for the `gcloud` command and - required to access Google Cloud. - - Google Project with billing enabled -- [Terraform](https://www.terraform.io/downloads.html) - tool used to provision resources and create - templates - -## Configuration - -You will need to define the following variables: - -- Required Variables: - - `project_id` (required) - a globally unique name for the Google project that will contain the - GCS bucket - - `name` (required) - name of GCFS server instance -- Optional Variables: - - `zone` (default = `us-central1-b`) - specify zone where instances will be located - - `tier` (default = `STANDARD`) - service tier of the instance, e.g. `TIER_UNSPECIFIED`, - `STANDARD`, `PREMIUM`, `BASIC_HDD`, `BASIC_SSD`, and `HIGH_SCALE_SSD`. - - `network` (default = `default`) - specify a GCE VPC network to which the instance is connected. - - `capacity_gb` (default = `1024`) - specify file share capacity in GiB (minimum of `1024`) - - `share_name` (default = `volumes`)- specify a name of the file share - -## Steps - -### Define Variables - -You can define these when prompted, in `terrafrom.tfvars` file, or through command line variables, -e.g. `TF_VAR_project_id`, `TF_VAR_project_id`, and `TF_VAR_name`. Below is an example -`terraform.tfvars` file: - -```terraform -## terraform.tfvars -name = "my-company-nfs-backups" -project_id = "my-company-test" -``` - -### Download Plugins and Modules - -```bash -terraform init -``` - -### Prepare and Provision Resources - -```bash -## get a list of changes that will be made -terraform plan -## apply the changes -terraform apply -``` - -## Cleanup - -When finished, you can destroy resources created with [Terraform](https://www.terraform.io/) using -this: - -```bash -terraform destroy -``` diff --git a/contrib/config/backups/nfs/gcfs-terraform/main.tf b/contrib/config/backups/nfs/gcfs-terraform/main.tf deleted file mode 100644 index 844ac9fe210..00000000000 --- a/contrib/config/backups/nfs/gcfs-terraform/main.tf +++ /dev/null @@ -1,43 +0,0 @@ -variable "name" {} -variable "project_id" {} -variable "zone" { default = "us-central1-b" } -variable "tier" { default = "STANDARD" } -variable "network" { default = "default" } -variable "capacity_gb" { default = 1024 } -variable "share_name" { default = "volumes" } -variable "create_env_sh" { default = true } - -##################################################################### -# Google Cloud Filestore instance -##################################################################### -module "gcfs" { - source = "./modules/simple_gcfs" - name = var.name - zone = var.zone - tier = var.tier - network = var.network - capacity_gb = var.capacity_gb - share_name = var.share_name -} - -##################################################################### -# Locals -##################################################################### -locals { - env_vars = { - nfs_server = module.gcfs.nfs_server - nfs_path = module.gcfs.nfs_path - } - - env_sh = templatefile("${path.module}/templates/env.sh.tmpl", local.env_vars) -} - -##################################################################### -# Create ../env.sh -##################################################################### -resource "local_file" "env_sh" { - count = var.create_env_sh != "" ? 1 : 0 - content = local.env_sh - filename = "${path.module}/../env.sh" - file_permission = "0644" -} diff --git a/contrib/config/backups/nfs/gcfs-terraform/modules/simple_gcfs/main.tf b/contrib/config/backups/nfs/gcfs-terraform/modules/simple_gcfs/main.tf deleted file mode 100644 index 5a5a37fb80e..00000000000 --- a/contrib/config/backups/nfs/gcfs-terraform/modules/simple_gcfs/main.tf +++ /dev/null @@ -1,47 +0,0 @@ -# gcloud filestore instances create ${MY_FS_NAME} \ -# --project=${MY_PROJECT} \ -# --zone=${MY_ZONE} \ -# --tier=${MY_FS_TIER} \ -# --file-share=name="${MY_FS_SHARE_NAME}",capacity=${MY_FS_CAPACITY} \ -# --network=name="${MY_NETWORK_NAME}" - - -# MY_PROJECT=${MY_PROJECT:-$(gcloud config get-value project)} -# MY_ZONE=${MY_ZONE:-"us-central1-b"} -# MY_FS_TIER=${MY_FS_TIER:-"STANDARD"} -# MY_FS_CAPACITY=${MY_FS_CAPACITY:-"1TB"} -# MY_FS_SHARE_NAME=${MY_FS_SHARE_NAME:-"volumes"} -# MY_NETWORK_NAME=${MY_NETWORK_NAME:-"default"} -# MY_FS_NAME=${MY_FS_NAME:-$1} -# CREATE_ENV_VALUES=${CREATE_ENV_VALUES:-"true"} - -variable "name" {} -variable "zone" { default = "us-central1-b" } -variable "tier" { default = "STANDARD" } -variable "network" { default = "default" } -variable "capacity_gb" { default = 1024 } -variable "share_name" { default = "volumes" } - -resource "google_filestore_instance" "instance" { - name = var.name - zone = var.zone - tier = var.tier - - file_shares { - capacity_gb = var.capacity_gb - name = var.share_name - } - - networks { - network = var.network - modes = ["MODE_IPV4"] - } -} - -output "nfs_server" { - value = google_filestore_instance.instance.networks[0].ip_addresses[0] -} - -output "nfs_path" { - value = "/${google_filestore_instance.instance.file_shares[0].name}" -} diff --git a/contrib/config/backups/nfs/gcfs-terraform/provider.tf b/contrib/config/backups/nfs/gcfs-terraform/provider.tf deleted file mode 100644 index 1505e9f0afa..00000000000 --- a/contrib/config/backups/nfs/gcfs-terraform/provider.tf +++ /dev/null @@ -1,9 +0,0 @@ -provider "google" { - version = "~> 3.38.0" - # region = var.region - project = var.project_id -} - -provider "random" { - version = "2.3.0" -} diff --git a/contrib/config/backups/nfs/gcfs-terraform/templates/env.sh.tmpl b/contrib/config/backups/nfs/gcfs-terraform/templates/env.sh.tmpl deleted file mode 100644 index b2c52fc0294..00000000000 --- a/contrib/config/backups/nfs/gcfs-terraform/templates/env.sh.tmpl +++ /dev/null @@ -1,3 +0,0 @@ -## Configuration generated by Terraform GCFS automation -export NFS_PATH="${nfs_path}" -export NFS_SERVER="${nfs_server}" diff --git a/contrib/config/backups/nfs/helmfile.yaml b/contrib/config/backups/nfs/helmfile.yaml deleted file mode 100644 index 78b0eeffd54..00000000000 --- a/contrib/config/backups/nfs/helmfile.yaml +++ /dev/null @@ -1,2 +0,0 @@ -helmfiles: - - ./charts/helmfile.yaml diff --git a/contrib/config/backups/nfs/vagrant/helper.rb b/contrib/config/backups/nfs/vagrant/helper.rb deleted file mode 100644 index f91793a35f5..00000000000 --- a/contrib/config/backups/nfs/vagrant/helper.rb +++ /dev/null @@ -1,23 +0,0 @@ -## Read lines from configuration -lines = File.readlines("./vagrant/hosts") - -## Hash of hostname:inet_addr -@hosts = lines.map { |ln| i,h = ln.split(/\s+/); [h,i] }.to_h -## List of systems that will autostart -@starts = lines.select { |ln| ln !~ /nostart/; }.map { |ln| ln.split(/\s+/)[1] } -## Set primary host for `vagrant ssh` -@primary = (lines.select { |ln| ln =~ /primary|default/ }[0] ||="").split[1] || "alpha-1" - -## Set Replicas based on # of zeros -@replicas = @hosts.keys.select { |host| host.to_s.match /^zero-\d+/ }.count - -## Create hash 0f SMB sync options w/ optional smb_username and smb_password -@smb_sync_opts = { type: "smb", mount_options: %w[mfsymlinks vers=3.0] } -@smb_sync_opts.merge! smb_username: ENV['SMB_USER'] if ENV['SMB_USER'] -@smb_sync_opts.merge! smb_password: ENV['SMB_PASSWD'] if ENV['SMB_PASSWD'] - -## Set Latest Version -uri = URI.parse("https://get.dgraph.io/latest") -response = Net::HTTP.get_response(uri) -latest = JSON.parse(response.body)["tag_name"] -@version = ENV['DGRAPH_VERSION'] || latest diff --git a/contrib/config/backups/nfs/vagrant/hosts b/contrib/config/backups/nfs/vagrant/hosts deleted file mode 100644 index 29d8beed4fa..00000000000 --- a/contrib/config/backups/nfs/vagrant/hosts +++ /dev/null @@ -1,2 +0,0 @@ -192.168.123.27 nfs-server -192.168.123.28 nfs-client nostart default diff --git a/contrib/config/backups/nfs/vagrant/provision.sh b/contrib/config/backups/nfs/vagrant/provision.sh deleted file mode 100644 index 53acdc474e9..00000000000 --- a/contrib/config/backups/nfs/vagrant/provision.sh +++ /dev/null @@ -1,161 +0,0 @@ -#!/usr/bin/env bash - -###### -## main -################################# -main() { - export DEV_USER=${1:-'vagrant'} - export PYTHON_VERSION=${PYTHON_VERSION:-'3.8.2'} - INSTALL_DOCKER=${INSTALL_DOCKER:-'true'} - INSTALL_COMPOSE=${INSTALL_COMPOSE:-'true'} - - setup_hosts - - case $(hostname) in - *nfs-server*) - install_nfs_server - ;; - *nfs-client*) - install_nfs_client - [[ ${INSTALL_DOCKER} =~ "true" ]] && install_docker - [[ ${INSTALL_COMPOSE} =~ "true" ]] && - export -f install_compose && - install_common && - su "${DEV_USER}" -c "install_compose" - ;; - esac - -} - -###### -## setup_hosts - configure /etc/hosts in absence of DNS -################################# -setup_hosts() { - CONFIG_FILE=/vagrant/hosts - if [[ ! -f /vagrant/hosts ]]; then - echo "INFO: '${CONFIG_FILE}' does not exist. Skipping configuring /etc/hosts" - return 1 - fi - - while read -a LINE; do - ## append to hosts entry if it doesn't exist - if ! grep -q "${LINE[1]}" /etc/hosts; then - printf "%s %s \n" "${LINE[*]}" >>/etc/hosts - fi - done <"${CONFIG_FILE}" -} - -###### -## install_nfs_server -################################# -install_nfs_server() { - SHAREPATH=${1:-"/srv/share"} - ACCESSLIST=${2:-'*'} - apt-get -qq update && apt-get install -y nfs-kernel-server - mkdir -p "${SHAREPATH}" - chown -R nobody:nogroup "${SHAREPATH}" - chmod -R 777 "${SHAREPATH}" - sed -i "\:${SHAREPATH}:d" /etc/exports - echo "${SHAREPATH} ${ACCESSLIST}(rw,sync,no_root_squash,no_subtree_check)" >>/etc/exports - exportfs -rav -} - -###### -## install_nfs_client -################################# -install_nfs_client() { - MOUNTPATH=${1:-"/mnt/share"} - NFS_PATH=${2:-"/srv/share"} - NFS_SERVER=$(grep nfs-server /vagrant/vagrant/hosts | cut -d' ' -f1) - apt-get -qq update && apt-get install -y nfs-common - - mkdir -p "${MOUNTPATH}" - mount -t nfs "${NFS_SERVER}":"${NFS_PATH}" "${MOUNTPATH}" -} - -###### -## install_common -################################# -install_common() { - apt-get update -qq -y - - ## tools and libs needed by pyenv - ## ref. https://github.com/pyenv/pyenv/wiki/Common-build-problems - apt-get install -y \ - build-essential \ - curl \ - git \ - libbz2-dev \ - libffi-dev \ - liblzma-dev \ - libncurses5-dev \ - libncursesw5-dev \ - libreadline-dev \ - libsqlite3-dev \ - libssl-dev \ - llvm \ - make \ - python-openssl \ - software-properties-common \ - sqlite \ - tk-dev \ - wget \ - xz-utils \ - zlib1g-dev -} - -###### -## install_docker -################################# -install_docker() { - [[ -z ${DEV_USER} ]] && { - echo '$DEV_USER not specified. Aborting' 2>&1 - return 1 - } - - apt update -qq -y && apt-get install -y \ - apt-transport-https \ - ca-certificates \ - gnupg-agent - - curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - - add-apt-repository \ - "deb [arch=amd64] https://download.docker.com/linux/ubuntu \ - $(lsb_release -cs) \ - stable" - apt update -qq -y - apt-get -y install docker-ce docker-ce-cli containerd.io - - usermod -aG docker "${DEV_USER}" -} - -###### -## install_compose - installs pyenv, python, docker-compose -################################# -install_compose() { - PROJ=pyenv-installer - SCRIPT_URL=https://github.com/pyenv/${PROJ}/raw/master/bin/${PROJ} - curl -sL "${SCRIPT_URL}" | bash - - ## setup current environment - export PATH="${HOME}/.pyenv/bin:${PATH}" - eval "$(pyenv init -)" - eval "$(pyenv virtualenv-)" - - ## append to shell environment - cat <<-'BASHRC' >>~/.bashrc - - export PATH="$HOME/.pyenv/bin:$PATH" - eval "$(pyenv init -)" - eval "$(pyenv virtualenv-init -)" - BASHRC - - ## install recent version of python 3 - pyenv install "${PYTHON_VERSION}" - pyenv global "${PYTHON_VERSION}" - pip install --upgrade pip - pip install docker-compose - pyenv rehash -} - -main $@ diff --git a/contrib/config/backups/s3/.env b/contrib/config/backups/s3/.env deleted file mode 100644 index c925fe364f2..00000000000 --- a/contrib/config/backups/s3/.env +++ /dev/null @@ -1,3 +0,0 @@ -## IMPORTANT: Though `latest` shold be alright for local dev environments, -## never use `latest` for production env as this can lead to a mixed version cluster. -DGRAPH_VERSION=latest diff --git a/contrib/config/backups/s3/.gitignore b/contrib/config/backups/s3/.gitignore deleted file mode 100644 index 7d106a785b8..00000000000 --- a/contrib/config/backups/s3/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -# Artifacts Are Automatically Generated -s3.env -env.sh diff --git a/contrib/config/backups/s3/README.md b/contrib/config/backups/s3/README.md deleted file mode 100644 index beb615912a0..00000000000 --- a/contrib/config/backups/s3/README.md +++ /dev/null @@ -1,228 +0,0 @@ -# Binary backups to S3 - -Binary backups can use AWS S3 (Simple Storage Service) for an object storage. - -## Provisioning S3 - -Some example scripts have been provided to illustrate how to create S3. - -- [Terraform](terraform/README.md) - terraform scripts to provision S3 bucket and an IAM user with - access to the S3 bucket. - -## Setting up the environment - -### Prerequisites - -You will need these tools: - -- Docker Environment - - [Docker](https://docs.docker.com/get-docker/) - container engine platform - - [Docker Compose](https://docs.docker.com/compose/install/) - orchestrates running dokcer - containers -- Kubernetes Environment - - [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) - required for interacting - with Kubenetes platform - - [helm](https://helm.sh/docs/intro/install/) - deploys Kuberetes packages called helm charts - - [helm-diff](https://github.com/databus23/helm-diff) [optional] - displays differences that - will be applied to Kubernetes cluster - - [helmfile](https://github.com/roboll/helmfile#installation) [optional] - orchestrates helm chart - deployments - -### Using Docker Compose - -A `docker-compose.yml` configuration is provided that will run the Dgraph cluster. - -#### Configuring Docker Compose - -You will need to create an `s3.env` file first like the example below. If you created the S3 bucket -using the [Terraform](terraform/README.md) scripts, this will have been created automatically. - -```bash -## s3.env -AWS_ACCESS_KEY_ID= -AWS_SECRET_ACCESS_KEY= -``` - -#### Running with Docker Compose - -```bash -## Run a Dgraph Cluster -docker-compose up --detach -``` - -#### Access Ratel UI - -- Ratel UI: http://localhost:8000 - -#### Clean up the Docker Environment - -```bash -docker-compose stop -docker-compose rm -``` - -### Using Kubernetes with Helm Charts - -For Kubernetes, you can deploy a Dgraph cluster and a Kubernetes Cronjob that triggers backups using -[Helm](https://helm.sh/docs/intro/install/). - -#### Configuring secrets values - -These values are automatically created if you used the [Terraform](terraform/README.md) scripts. - -If you already an existing S3 bucket you would like to use, you will need to create -`charts/dgraph_secrets.yaml` files as shown below. Otherwise, if you created the bucket using the -[Terraform](terraform/README.md) scripts, then this would be created automatically. - -For the `charts/dgraph_secrets.yaml`, you would create a file like this: - -```yaml -backups: - keys: - s3: - ## AWS_ACCESS_KEY_ID - access: - ## AWS_SECRET_ACCESS_KEY - secret: -``` - -#### Configuring Environments - -We need to define one environment variable `BACKUP_PATH`. If [Terraform](terraform/README.md) -scripts were used to create the S3 bucket, we can source the `env.sh` or otherwise create it here: - -```bash -## env.sh -export BACKUP_PATH=s3://s3..amazonaws.com/ -``` - -#### Deploy using Helmfile - -If you have [helmfile](https://github.com/roboll/helmfile#installation) and the -[helm-diff](https://github.com/databus23/helm-diff) plugin installed, you can deploy a Dgraph -cluster with the following: - -```bash -## source script for BACKUP_PATH env var -. env.sh - ## deploy Dgraph cluster and configure K8S CronJob with BACKUP_PATH -helmfile apply -``` - -#### Deploy using Helm - -```bash -## source script for BACKUP_PATH env var -. env.sh -## deploy Dgraph cluster and configure K8S CronJob with BACKUP_PATH -helm repo add "dgraph" https://charts.dgraph.io -helm install "my-release" \ - --namespace default \ - --values ./charts/dgraph_config.yaml \ - --values ./charts/dgraph_secrets.yaml \ - --set backups.destination="${BACKUP_PATH}" \ - dgraph/dgraph -``` - -#### Access resources - -For Dgraph Alpha, you can use this to access it at http://localhost:8080: - -```bash -export ALPHA_POD_NAME=$( - kubectl get pods \ - --namespace default \ - --selector "statefulset.kubernetes.io/pod-name=my-release-dgraph-alpha-0,release=my-release" \ - --output jsonpath="{.items[0].metadata.name}" -) -kubectl --namespace default port-forward $ALPHA_POD_NAME 8080:8080 -``` - -For Dgraph Ratel UI, you can use this to access it at http://localhost:8000: - -```bash -export RATEL_POD_NAME=$( - kubectl get pods \ - --namespace default \ - --selector "component=ratel,release=my-release" \ - --output jsonpath="{.items[0].metadata.name}" -) -kubectl --namespace default port-forward $RATEL_POD_NAME 8000:8000 -``` - -#### Cleanup the Kubernetes environment - -If you are using `helmfile`, you can delete the resources with: - -```bash -## source script for BACKUP_PATH env var -. env.sh -helmfile delete -kubectl delete pvc --selector release=my-release # release dgraph name specified in charts/helmfile.yaml -``` - -If you are just `helm`, you can delete the resources with: - -```bash -helm delete my-release --namespace default "my-release" # dgraph release name used earlier -kubectl delete pvc --selector release=my-release # dgraph release name used earlier -``` - -## Triggering a backup - -This is run from the host with the alpha node accessible on localhost at port `8080`. This can can -be done by running the `docker-compose` environment, or in the Kubernetes environment, after running -`kubectl --namespace default port-forward pod/dgraph-dgraph-alpha-0 8080:8080`. - -### Using GraphQL - -For versions of Dgraph that support GraphQL, you can use this: - -```bash -## source script for BACKUP_PATH env var -. env.sh -## endpoint of alpha1 container -ALPHA_HOST="localhost" -## graphql mutation and required header -GRAPHQL="{\"query\": \"mutation { backup(input: {destination: \\\"$BACKUP_PATH\\\" forceFull: true}) { response { message code } } }\"}" -HEADER="Content-Type: application/json" - -curl --silent --header "$HEADER" --request POST $ALPHA_HOST:8080/admin --data "$GRAPHQL" -``` - -This should return a response in JSON that will look like this if successful: - -```JSON -{ - "data": { - "backup": { - "response": { - "message": "Backup completed.", - "code": "Success" - } - } - } -} -``` - -### Using REST API - -For earlier Dgraph versions that support the REST admin port, you can do this: - -```bash -## source script for BACKUP_PATH env var -. env.sh -## endpoint of alpha1 container -ALPHA_HOST="localhost" - -curl --silent --request POST $ALPHA_HOST:8080/admin/backup?force_full=true --data "destination=$BACKUP_PATH" -``` - -This should return a response in JSON that will look like this if successful: - -```JSON -{ - "code": "Success", - "message": "Backup completed." -} -``` diff --git a/contrib/config/backups/s3/charts/.gitignore b/contrib/config/backups/s3/charts/.gitignore deleted file mode 100644 index f4b6b916ec4..00000000000 --- a/contrib/config/backups/s3/charts/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -minio_secrets.yaml -dgraph_secrets.yaml diff --git a/contrib/config/backups/s3/charts/dgraph_config.yaml b/contrib/config/backups/s3/charts/dgraph_config.yaml deleted file mode 100644 index 83fe869f53d..00000000000 --- a/contrib/config/backups/s3/charts/dgraph_config.yaml +++ /dev/null @@ -1,9 +0,0 @@ -backups: - full: - enabled: true - debug: true - schedule: "*/15 * * * *" -alpha: - configFile: - config.hcl: | - whitelist = "10.0.0.0/8,172.0.0.0/8,192.168.0.0/16,127.0.0.1" diff --git a/contrib/config/backups/s3/charts/helmfile.yaml b/contrib/config/backups/s3/charts/helmfile.yaml deleted file mode 100644 index ef9701867e6..00000000000 --- a/contrib/config/backups/s3/charts/helmfile.yaml +++ /dev/null @@ -1,15 +0,0 @@ -repositories: - - name: dgraph - url: https://charts.dgraph.io - -releases: - - name: my-release - namespace: default - chart: dgraph/dgraph - values: - - ./dgraph_config.yaml - ## generated by terraform scripts - - ./dgraph_secrets.yaml - - backups: - ## Format - s3://s3..amazonaws.com/ - destination: {{ requiredEnv "BACKUP_PATH" }} diff --git a/contrib/config/backups/s3/docker-compose.yml b/contrib/config/backups/s3/docker-compose.yml deleted file mode 100644 index 7181b48ef89..00000000000 --- a/contrib/config/backups/s3/docker-compose.yml +++ /dev/null @@ -1,28 +0,0 @@ -version: "3.5" -services: - zero1: - image: dgraph/dgraph:${DGRAPH_VERSION} - container_name: zero1 - working_dir: /data/zero1 - ports: - - 5080:5080 - - 6080:6080 - command: dgraph zero --my=zero1:5080 --replicas 1 --raft="idx=1" - - alpha1: - image: dgraph/dgraph:${DGRAPH_VERSION} - container_name: alpha1 - working_dir: /data/alpha1 - env_file: - - s3.env - ports: - - 8080:8080 - - 9080:9080 - command: dgraph alpha --my=alpha1:7080 --zero=zero1:5080 - --security "whitelist=10.0.0.0/8,172.0.0.0/8,192.168.0.0/16,127.0.0.1;" - - ratel: - image: dgraph/ratel:${DGRAPH_VERSION} - ports: - - 8000:8000 - container_name: ratel diff --git a/contrib/config/backups/s3/helmfile.yaml b/contrib/config/backups/s3/helmfile.yaml deleted file mode 100644 index 78b0eeffd54..00000000000 --- a/contrib/config/backups/s3/helmfile.yaml +++ /dev/null @@ -1,2 +0,0 @@ -helmfiles: - - ./charts/helmfile.yaml diff --git a/contrib/config/backups/s3/terraform/.gitignore b/contrib/config/backups/s3/terraform/.gitignore deleted file mode 100644 index f92efc3cebc..00000000000 --- a/contrib/config/backups/s3/terraform/.gitignore +++ /dev/null @@ -1,5 +0,0 @@ -# terraform files -terraform.tfvars -.terraform -*.tfstate* -.terraform.lock.hcl diff --git a/contrib/config/backups/s3/terraform/README.md b/contrib/config/backups/s3/terraform/README.md deleted file mode 100644 index a5f850f1197..00000000000 --- a/contrib/config/backups/s3/terraform/README.md +++ /dev/null @@ -1,65 +0,0 @@ -# S3 Bucket with Terraform - -## About - -This script will create the required resources needed to create S3 (Simple Storage Service) bucket -using [`s3-bucket`](github.com/darkn3rd/s3-bucket) module. - -## Prerequisites - -You need the following installed to use this automation: - -- [AWS CLI](https://aws.amazon.com/cli/) - AWS CLI installed and configured with local profile -- [Terraform](https://www.terraform.io/downloads.html) - tool used to provision resources and create - templates - -## Configuration - -You will need to define the following variables: - -- Required Variables: - - `region` (required) - region where bucket will be created - - `name` (required) - unique name of s3 bucket - -## Steps - -### Define Variables - -You can define these when prompted, or in `terrafrom.tfvars` file, or through command line -variables, e.g. `TF_VAR_name`, `TF_VAR_region`. - -```terraform -# terraform.tfvars -name = "my-organization-backups" -region = "us-west-2" -``` - -### Download Plugins and Modules - -```bash -terraform init -``` - -### Prepare and Provision Resources - -This will create an S3 bucket and an IAM user that has access to that bucket. For convenience, will -also generate the following files: - -- `../s3.env` - used to demonstrate or test dgraph backups with s3 bucket in local docker - environment -- `../env.sh`- destination string to use trigger backups from the command line or to configure - Kubernetes cron jobs to schedule backups -- `../charts/dgraph_secrets.yaml` - used to deploy Dgraph with support for backups - -```bash -## get a list of changes that will be made -terraform plan -## apply the changes -terraform apply -``` - -## Cleanup - -```bash -terraform destroy -``` diff --git a/contrib/config/backups/s3/terraform/main.tf b/contrib/config/backups/s3/terraform/main.tf deleted file mode 100644 index 9cbce8cdb5f..00000000000 --- a/contrib/config/backups/s3/terraform/main.tf +++ /dev/null @@ -1,62 +0,0 @@ -##################################################################### -# Variables -##################################################################### -variable "region" {} -variable "name" {} -variable "user_enabled" { default = true } -variable "create_env_sh" { default = true } -variable "create_s3_env" { default = true } -variable "create_dgraph_secrets" { default = true } - -##################################################################### -# Bucket Module -##################################################################### -module "bucket" { - source = "github.com/darkn3rd/s3-bucket?ref=v1.0.0" - name = var.name - user_enabled = var.user_enabled -} - -##################################################################### -# Locals -##################################################################### - -locals { - s3_vars = { - access_key_id = module.bucket.access_key_id - secret_access_key = module.bucket.secret_access_key - } - - env_vars = { - bucket_region = var.region - bucket_name = var.name - } - - dgraph_secrets = templatefile("${path.module}/templates/dgraph_secrets.yaml.tmpl", local.s3_vars) - env_sh = templatefile("${path.module}/templates/env.sh.tmpl", local.env_vars) - s3_env = templatefile("${path.module}/templates/s3.env.tmpl", local.s3_vars) -} - -##################################################################### -# File Resources -##################################################################### -resource "local_file" "env_sh" { - count = var.create_env_sh != "" ? 1 : 0 - content = local.env_sh - filename = "${path.module}/../env.sh" - file_permission = "0644" -} - -resource "local_file" "s3_env" { - count = var.create_s3_env != "" ? 1 : 0 - content = local.s3_env - filename = "${path.module}/../s3.env" - file_permission = "0644" -} - -resource "local_file" "dgraph_secrets" { - count = var.create_dgraph_secrets != "" ? 1 : 0 - content = local.dgraph_secrets - filename = "${path.module}/../charts/dgraph_secrets.yaml" - file_permission = "0644" -} diff --git a/contrib/config/backups/s3/terraform/provider.tf b/contrib/config/backups/s3/terraform/provider.tf deleted file mode 100644 index 685eca8e84f..00000000000 --- a/contrib/config/backups/s3/terraform/provider.tf +++ /dev/null @@ -1,6 +0,0 @@ -##################################################################### -# Provider: Amazon Web Services -##################################################################### -provider "aws" { - region = var.region -} diff --git a/contrib/config/backups/s3/terraform/templates/dgraph_secrets.yaml.tmpl b/contrib/config/backups/s3/terraform/templates/dgraph_secrets.yaml.tmpl deleted file mode 100644 index de87b5a4a2e..00000000000 --- a/contrib/config/backups/s3/terraform/templates/dgraph_secrets.yaml.tmpl +++ /dev/null @@ -1,5 +0,0 @@ -backups: - keys: - s3: - access: ${access_key_id} - secret: ${secret_access_key} diff --git a/contrib/config/backups/s3/terraform/templates/env.sh.tmpl b/contrib/config/backups/s3/terraform/templates/env.sh.tmpl deleted file mode 100644 index b8146fe66d0..00000000000 --- a/contrib/config/backups/s3/terraform/templates/env.sh.tmpl +++ /dev/null @@ -1,2 +0,0 @@ -## env.sh -export BACKUP_PATH=s3://s3.${bucket_region}.amazonaws.com/${bucket_name} diff --git a/contrib/config/backups/s3/terraform/templates/s3.env.tmpl b/contrib/config/backups/s3/terraform/templates/s3.env.tmpl deleted file mode 100644 index c2d945b7c60..00000000000 --- a/contrib/config/backups/s3/terraform/templates/s3.env.tmpl +++ /dev/null @@ -1,3 +0,0 @@ -## s3.env -AWS_ACCESS_KEY_ID=${access_key_id} -AWS_SECRET_ACCESS_KEY=${secret_access_key} diff --git a/contrib/config/monitoring/fluentd/fluent-docker.conf b/contrib/config/monitoring/fluentd/fluent-docker.conf deleted file mode 100644 index f12ada6e322..00000000000 --- a/contrib/config/monitoring/fluentd/fluent-docker.conf +++ /dev/null @@ -1,51 +0,0 @@ - - @id fluentd-containers.log - @type tail - path /var/lib/docker/containers/*/*.log - pos_file /var/log/containers.log.pos - - tag dgraph.* - read_from_head true - - - @type json - keep_time_key true - time_format %Y-%m-%dT%H:%M:%S.%NZ - - - - - @type parser - key_name log - - - @type regexp - expression /^(?[IWECF])(? - - reserve_data true - - - - @type record_transformer - enable_ruby true - - - severity ${ if (record["severity"] == "E") then "Error" elsif (record["severity"] == "W") then "Warning" elsif (record["severity"] == "I") then "Info" elsif (record["severity"] == "D") then "Debug" else record["severity"] end} - tag ${tag} - - - - - @type rewrite_tag_filter - - key tag - pattern /^dgraph.var.lib.docker.containers.(\w{32})/ - tag raw.docker.$1 - - - - - @type stdout - diff --git a/contrib/config/monitoring/fluentd/fluentd-config.yaml b/contrib/config/monitoring/fluentd/fluentd-config.yaml deleted file mode 100644 index 090c927ca9d..00000000000 --- a/contrib/config/monitoring/fluentd/fluentd-config.yaml +++ /dev/null @@ -1,62 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: fluentd-config-dgraph-io - namespace: default - labels: - addonmanager.kubernetes.io/mode: Reconcile -data: - containers.input.conf: |- - - @id fluentd-containers.log - @type tail - path /var/log/containers/dgraph*.log - pos_file /var/log/containers.log.pos - - tag dgraph.* - read_from_head true - - - @type regexp - - expression /^(? - - - - @type parser - key_name log - - - @type regexp - expression /^(?[IWECF])(? - - reserve_data true - - - - @type record_transformer - enable_ruby true - - - severity ${ if (record["severity"] == "E") then "Error" elsif (record["severity"] == "W") then "Warning" elsif (record["severity"] == "I") then "Info" elsif (record["severity"] == "D") then "Debug" else record["severity"] end} - tag ${tag} - - - - # Add your log injector and management pipeline here. - - @type elasticsearch - - logstash_format true - include_tag_key true - - host "#{ENV['FLUENT_ELASTICSEARCH_HOST']}" - port "#{ENV['FLUENT_ELASTICSEARCH_PORT']}" - scheme "#{ENV['FLUENT_ELASTICSEARCH_SCHEME'] || 'http'}" - user "#{ENV['FLUENT_ELASTICSEARCH_USER']}" - password "#{ENV['FLUENT_ELASTICSEARCH_PASSWORD']}" - diff --git a/contrib/config/monitoring/fluentd/fluentd.yaml b/contrib/config/monitoring/fluentd/fluentd.yaml deleted file mode 100644 index 1d69a38a9c9..00000000000 --- a/contrib/config/monitoring/fluentd/fluentd.yaml +++ /dev/null @@ -1,113 +0,0 @@ -apiVersion: v1 -kind: ServiceAccount -metadata: - name: fluentd-dgraph-io - namespace: default ---- -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRole -metadata: - name: fluentd-dgraph-io - namespace: default -rules: -- apiGroups: - - "" - resources: - - pods - - namespaces - verbs: - - get - - list - - watch - ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: fluentd-dgraph-io -roleRef: - kind: ClusterRole - name: fluentd-dgraph-io - apiGroup: rbac.authorization.k8s.io -subjects: -- kind: ServiceAccount - name: fluentd-dgraph-io - namespace: default ---- -apiVersion: apps/v1 -kind: DaemonSet -metadata: - name: fluentd-elasticsearch - namespace: default - labels: - k8s-app: fluentd-logging - version: v1 -spec: - selector: - matchLabels: - name: fluentd-elasticsearch - template: - metadata: - labels: - name: fluentd-elasticsearch - k8s-app: fluentd-logging - version: v1 - spec: - serviceAccount: fluentd-dgraph-io - serviceAccountName: fluentd-dgraph-io - tolerations: - - key: node-role.kubernetes.io/master - effect: NoSchedule - initContainers: - - name: config-fluentd - image: busybox - imagePullPolicy: IfNotPresent - command: ["/bin/sh","-c"] - args: - - cp /fluentd/etcsrc/containers.input.conf /fluentd/etc/fluent.conf - volumeMounts: - - name: config-path - mountPath: /fluentd/etc - - name: config-source - mountPath: /fluentd/etcsrc - containers: - - name: fluentd-elasticsearch - image: fluent/fluentd-kubernetes-daemonset:v1-debian-elasticsearch - env: - - name: FLUENT_ELASTICSEARCH_HOST - value: "" - - name: FLUENT_ELASTICSEARCH_PORT - value: "" - - name: FLUENT_ELASTICSEARCH_SCHEME - value: "https" - - name: FLUENT_ELASTICSEARCH_USER - value: - - name: FLUENT_ELASTICSEARCH_PASSWORD - value: - resources: - limits: - memory: 200Mi - requests: - cpu: 100m - memory: 200Mi - volumeMounts: - - name: varlog - mountPath: /var/log - - name: varlibdockercontainers - mountPath: /var/lib/docker/containers - readOnly: true - - name: config-path - mountPath: /fluentd/etc - terminationGracePeriodSeconds: 30 - volumes: - - name: varlog - hostPath: - path: /var/log - - name: varlibdockercontainers - hostPath: - path: /var/lib/docker/containers - - name: config-source - configMap: - name: fluentd-config-dgraph-io - - name: config-path - emptyDir: {} diff --git a/contrib/config/monitoring/grafana/dgraph-kubernetes-grafana-dashboard.json b/contrib/config/monitoring/grafana/dgraph-kubernetes-grafana-dashboard.json deleted file mode 100644 index ec25fbb2865..00000000000 --- a/contrib/config/monitoring/grafana/dgraph-kubernetes-grafana-dashboard.json +++ /dev/null @@ -1,1194 +0,0 @@ -{ - "annotations": { - "list": [ - { - "$$hashKey": "object:315", - "builtIn": 1, - "datasource": "-- Grafana --", - "enable": true, - "hide": true, - "iconColor": "rgba(0, 211, 255, 1)", - "name": "Annotations & Alerts", - "type": "dashboard" - } - ] - }, - "editable": true, - "gnetId": null, - "graphTooltip": 0, - "id": 1, - "iteration": 1585706329057, - "links": [], - "panels": [ - { - "cacheTimeout": null, - "datasource": "Prometheus", - "gridPos": { - "h": 2, - "w": 24, - "x": 0, - "y": 0 - }, - "hideTimeOverride": false, - "id": 30, - "links": [], - "options": { - "colorMode": "background", - "fieldOptions": { - "calcs": [ - "max" - ], - "defaults": { - "mappings": [ - { - "id": 0, - "op": "=", - "text": "N/A", - "type": 1, - "value": "null" - } - ], - "nullValueMode": "connected", - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 1 - } - ] - }, - "title": "Health Status -", - "unit": "short" - }, - "overrides": [], - "values": false - }, - "graphMode": "none", - "justifyMode": "center", - "orientation": "vertical" - }, - "pluginVersion": "6.7.1", - "targets": [ - { - "expr": "dgraph_alpha_health_status{pod=~'$Pod'}-1", - "format": "heatmap", - "hide": false, - "instant": false, - "intervalFactor": 1, - "legendFormat": "{{pod}}", - "metric": "dgraph_active_mutations_total", - "refId": "A", - "step": 2 - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Zero and Alpha", - "transparent": true, - "type": "stat" - }, - { - "datasource": "Prometheus", - "gridPos": { - "h": 3, - "w": 24, - "x": 0, - "y": 2 - }, - "id": 34, - "links": [], - "options": { - "fieldOptions": { - "calcs": [ - "last" - ], - "defaults": { - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - } - ] - }, - "title": "Memory in use", - "unit": "decbytes" - }, - "limit": 3, - "overrides": [], - "values": false - }, - "orientation": "auto", - "showThresholdLabels": false, - "showThresholdMarkers": true - }, - "pluginVersion": "6.7.1", - "targets": [ - { - "expr": "(dgraph_memory_idle_bytes{pod=~'$Pod'}+dgraph_memory_inuse_bytes{pod=~'$Pod'})", - "interval": "", - "intervalFactor": 1, - "legendFormat": "Alpha", - "metric": "dgraph_memory_idle_bytes", - "refId": "A", - "step": 2 - } - ], - "timeFrom": null, - "timeShift": null, - "title": "", - "transparent": true, - "type": "gauge" - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "editable": true, - "error": false, - "fill": 1, - "fillGradient": 0, - "grid": {}, - "gridPos": { - "h": 9, - "w": 24, - "x": 0, - "y": 5 - }, - "hiddenSeries": false, - "id": 1, - "isNew": true, - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "max": true, - "min": false, - "rightSide": false, - "show": true, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 2, - "links": [], - "nullPointMode": "connected", - "options": { - "dataLinks": [] - }, - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "dgraph_memory_inuse_bytes+dgraph_memory_idle_bytes{pod=~'$Pod'}", - "interval": "", - "intervalFactor": 2, - "legendFormat": "Inuse+Idle ({{pod}})", - "metric": "dgraph_memory_idle_bytes", - "refId": "A", - "step": 2 - }, - { - "expr": "dgraph_memory_proc_bytes{pod=~'$Pod'}", - "interval": "", - "intervalFactor": 2, - "legendFormat": "Proc ({{pod}})", - "metric": "dgraph_memory_proc_bytes", - "refId": "B", - "step": 2 - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Total memory", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "cumulative" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "bytes", - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": "", - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "editable": true, - "error": false, - "fill": 1, - "fillGradient": 0, - "grid": {}, - "gridPos": { - "h": 7, - "w": 12, - "x": 0, - "y": 14 - }, - "hiddenSeries": false, - "id": 17, - "isNew": true, - "legend": { - "alignAsTable": true, - "avg": true, - "current": true, - "max": true, - "min": false, - "show": true, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 2, - "links": [], - "nullPointMode": "connected", - "options": { - "dataLinks": [] - }, - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "dgraph_active_mutations_total{pod=~'$Pod'}", - "intervalFactor": 2, - "legendFormat": "{{pod}}", - "metric": "dgraph_active_mutations_total", - "refId": "A", - "step": 2 - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Active mutations", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "cumulative" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "editable": true, - "error": false, - "fill": 0, - "fillGradient": 0, - "grid": {}, - "gridPos": { - "h": 7, - "w": 12, - "x": 12, - "y": 14 - }, - "hiddenSeries": false, - "id": 5, - "isNew": true, - "legend": { - "alignAsTable": true, - "avg": true, - "current": true, - "hideEmpty": false, - "hideZero": false, - "max": false, - "min": false, - "rightSide": false, - "show": true, - "total": true, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "connected", - "options": { - "dataLinks": [] - }, - "percentage": false, - "pluginVersion": "6.6.1", - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "dgraph_pending_proposals_total{pod=~'$Pod'}", - "intervalFactor": 2, - "legendFormat": "{{pod}}", - "metric": "dgraph_pending_proposals_total", - "refId": "A", - "step": 2 - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Pending Proposals", - "tooltip": { - "shared": true, - "sort": 1, - "value_type": "cumulative" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": "", - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": "", - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "editable": true, - "error": false, - "fill": 1, - "fillGradient": 0, - "grid": {}, - "gridPos": { - "h": 7, - "w": 12, - "x": 0, - "y": 21 - }, - "hiddenSeries": false, - "id": 14, - "isNew": true, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 2, - "links": [], - "nullPointMode": "connected", - "options": { - "dataLinks": [] - }, - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "dgraph_memory_idle_bytes{pod=~'$Pod'}", - "interval": "", - "intervalFactor": 2, - "legendFormat": "{{pod}}", - "metric": "dgraph_memory_idle_bytes", - "refId": "A", - "step": 2 - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Memory Heap Idle", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "cumulative" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "bytes", - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "editable": true, - "error": false, - "fill": 1, - "fillGradient": 0, - "grid": {}, - "gridPos": { - "h": 7, - "w": 12, - "x": 12, - "y": 21 - }, - "hiddenSeries": false, - "id": 6, - "isNew": true, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 2, - "links": [], - "nullPointMode": "connected", - "options": { - "dataLinks": [] - }, - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "rate(go_gc_duration_seconds_sum{pod=~'$Pod'}[5m])", - "intervalFactor": 2, - "legendFormat": "{{pod}}", - "metric": "go_gc_duration_seconds_sum", - "refId": "A", - "step": 2 - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "GC second sum rate(30s)", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "cumulative" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "cacheTimeout": null, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "description": "goroutines used by go.", - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 7, - "w": 24, - "x": 0, - "y": 28 - }, - "hiddenSeries": false, - "hideTimeOverride": false, - "id": 35, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "options": { - "dataLinks": [] - }, - "percentage": false, - "pluginVersion": "6.6.1", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "go_goroutines{pod=~'$Pod'}", - "format": "time_series", - "hide": false, - "instant": false, - "interval": "", - "intervalFactor": 1, - "legendFormat": "{{pod}}", - "metric": "dgraph_active_mutations_total", - "refId": "A", - "step": 2 - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "goroutines", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:595", - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "$$hashKey": "object:596", - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "cacheTimeout": null, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 7, - "w": 12, - "x": 0, - "y": 35 - }, - "hiddenSeries": false, - "hideTimeOverride": false, - "id": 23, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "options": { - "dataLinks": [] - }, - "percentage": false, - "pluginVersion": "6.6.1", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "dgraph_num_queries_total", - "format": "time_series", - "hide": false, - "instant": false, - "interval": "", - "intervalFactor": 1, - "legendFormat": "", - "metric": "dgraph_active_mutations_total", - "refId": "A", - "step": 2 - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Processed Queries", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "editable": true, - "error": false, - "fill": 1, - "fillGradient": 0, - "grid": {}, - "gridPos": { - "h": 7, - "w": 12, - "x": 12, - "y": 35 - }, - "hiddenSeries": false, - "id": 16, - "isNew": true, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 2, - "links": [], - "nullPointMode": "connected", - "options": { - "dataLinks": [] - }, - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "dgraph_pending_queries_total{pod=~'$Pod'}", - "intervalFactor": 2, - "legendFormat": "{{pod}}", - "metric": "dgraph_pending_queries_total", - "refId": "A", - "step": 2 - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Pending Queries", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "cumulative" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "cacheTimeout": null, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 7, - "w": 12, - "x": 0, - "y": 42 - }, - "hiddenSeries": false, - "hideTimeOverride": false, - "id": 31, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "options": { - "dataLinks": [] - }, - "percentage": false, - "pluginVersion": "6.6.1", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "dgraph_raft_applied_index{pod=~'$Pod'}", - "format": "time_series", - "hide": false, - "instant": false, - "interval": "", - "intervalFactor": 1, - "legendFormat": "{{pod}}", - "metric": "dgraph_active_mutations_total", - "refId": "A", - "step": 2 - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Raft Applied Index", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "editable": true, - "error": false, - "fill": 1, - "fillGradient": 0, - "grid": {}, - "gridPos": { - "h": 7, - "w": 12, - "x": 12, - "y": 42 - }, - "hiddenSeries": false, - "id": 18, - "isNew": true, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 2, - "links": [], - "nullPointMode": "connected", - "options": { - "dataLinks": [] - }, - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "dgraph_alpha_health_status{pod=~'$Pod'}", - "intervalFactor": 2, - "legendFormat": "{{pod}}", - "metric": "dgraph_alpha_health_status", - "refId": "A", - "step": 2 - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Server Health", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "cumulative" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - } - ], - "refresh": "5s", - "schemaVersion": 22, - "style": "dark", - "tags": [], - "templating": { - "list": [ - { - "allValue": null, - "current": { - "selected": true, - "text": "All", - "value": "$__all" - }, - "datasource": "Prometheus", - "definition": "label_values(pod)", - "hide": 0, - "includeAll": true, - "index": -1, - "label": null, - "multi": false, - "multiFormat": "glob", - "name": "Pod", - "options": [], - "query": "label_values(pod)", - "refresh": 1, - "regex": "/dgraph-.*-[0-9]*$/", - "skipUrlSync": false, - "sort": 0, - "tagValuesQuery": "", - "tags": [], - "tagsQuery": "", - "type": "query", - "useTags": false - } - ] - }, - "time": { - "from": "now-12h", - "to": "now" - }, - "timepicker": { - "now": true, - "refresh_intervals": [ - "5s", - "10s", - "30s", - "1m", - "5m", - "15m", - "30m", - "1h", - "2h", - "1d" - ], - "time_options": [ - "5m", - "15m", - "1h", - "6h", - "12h", - "24h", - "2d", - "7d", - "30d" - ] - }, - "timezone": "browser", - "title": "Dgraph-Kubernetes", - "uid": "d0cZK8i6M", - "variables": { - "list": [] - }, - "version": 4 - } diff --git a/contrib/config/monitoring/jaeger/README.md b/contrib/config/monitoring/jaeger/README.md deleted file mode 100644 index 8b1acb71cfd..00000000000 --- a/contrib/config/monitoring/jaeger/README.md +++ /dev/null @@ -1,10 +0,0 @@ -# Jaeger - -Jaeger is a distributed tracing system that can be integrated with Dgraph. Included in this section -automation to help install Jaeger into your Kubernetes environment. - -- [operator](operator/README.md) - use jaeger operator to install `all-in-one` jaeger pod with - [badger](https://github.com/dgraph-io/badger) for storage. -- [chart](chart/README.md) - use jaeger helm chart to install distributed jaeger cluster with - [ElasticSearch](https://www.elastic.co/) or [Cassandra](https://cassandra.apache.org/) for - storage. diff --git a/contrib/config/monitoring/jaeger/chart/README.md b/contrib/config/monitoring/jaeger/chart/README.md deleted file mode 100644 index 06a385a670b..00000000000 --- a/contrib/config/monitoring/jaeger/chart/README.md +++ /dev/null @@ -1,101 +0,0 @@ -# Jaeger Helm Chart - -The [Jaeger Helm Chart](https://github.com/jaegertracing/helm-charts/tree/master/charts/jaeger) adds -all components required to run Jaeger in Kubernetes for a production-like deployment. - -## Tool Requirements - -### Required - -- [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) - required to interact with - kubernetes -- [helm](https://helm.sh/docs/intro/install/) - required to install jaeger, cassandra, and - elasticsearch using helm chart - -### Optional - -These tools are optional if you would like to use a single command to install all the jaeger -components and dgraph configured to use jaeger. - -- [helmfile](https://github.com/roboll/helmfile#installation) (optional) -- [helm-diff](https://github.com/databus23/helm-diff) helm plugin: - `helm plugin install https://github.com/databus23/helm-diff` - -## Deploy - -First choose the desired storage of Cassandra or ElasticSearch: - -```bash -# Cassandra is desired storage -export JAEGER_STORAGE_TYPE=cassandra -# ElasticSearch is the desired storage -export JAEGER_STORAGE_TYPE=elasticsearch -``` - -**IMPORTANT**: Change the `` to a strong password in the instructions below. - -### Deploy Using Helmfile - -```bash -JAEGER_STORAGE_PASSWORD="" helmfile apply -``` - -### Deploy Using Helm - -```bash -kubectl create namespace observability - -export JAEGER_STORAGE_TYPE=${JAEGER_STORAGE_TYPE:-'cassandra'} -helm repo add jaegertracing https://jaegertracing.github.io/helm-charts -helm install "jaeger" \ - --namespace observability \ - --values ./jaeger_${JAEGER_STORAGE_TYPE}.yaml \ - --set storage.${JAEGER_STORAGE_TYPE}.password="" \ - jaegertracing/jaeger - -helm install "my-release" \ - --namespace default \ - --values ./dgraph_jaeger.yaml \ - dgraph/dgraph -``` - -## Cleanup - -### Cleanup Using Helmfile - -```bash -## Delete Jaeger, Storage (Cassandra or ElasticSearch), Dgraph -JAEGER_STORAGE_PASSWORD="" helmfile delete - -## Remove Any Persistent Storage -kubectl delete pvc --namespace default --selector release="dgraph" -kubectl delete pvc --namespace observability --selector release="jaeger" - -``` - -### Cleanup Using Helm - -```bash -## Delete Jaeger, Storage (Cassandra or ElasticSearch), Dgraph -helm delete --namespace default "my-release" -helm delete --namespace observability "jaeger" - -## Remove Any Persistent Storage -kubectl delete pvc --namespace default --selector release="my-release" -kubectl delete pvc --namespace observability --selector release="jaeger" -``` - -## Jaeger Query UI - -```bash -export POD_NAME=$(kubectl get pods \ - --namespace observability \ - --selector "app.kubernetes.io/instance=jaeger,app.kubernetes.io/component=query" \ - --output jsonpath="{.items[0].metadata.name}" -) -kubectl port-forward --namespace observability $POD_NAME 16686:16686 -``` - -Afterward, you can visit: - -- http://localhost:16686 diff --git a/contrib/config/monitoring/jaeger/chart/dgraph_jaeger.yaml b/contrib/config/monitoring/jaeger/chart/dgraph_jaeger.yaml deleted file mode 100644 index 0361911a64c..00000000000 --- a/contrib/config/monitoring/jaeger/chart/dgraph_jaeger.yaml +++ /dev/null @@ -1,8 +0,0 @@ -alpha: - extraEnvs: - - name: DGRAPH_ALPHA_JAEGER_COLLECTOR - value: http://jaeger-collector.observability.svc:14268 -zero: - extraEnvs: - - name: DGRAPH_ZERO_JAEGER_COLLECTOR - value: http://jaeger-collector.observability.svc:14268 diff --git a/contrib/config/monitoring/jaeger/chart/helmfile.yaml b/contrib/config/monitoring/jaeger/chart/helmfile.yaml deleted file mode 100644 index 5d67e745df4..00000000000 --- a/contrib/config/monitoring/jaeger/chart/helmfile.yaml +++ /dev/null @@ -1,24 +0,0 @@ -repositories: - - name: jaegertracing - url: https://jaegertracing.github.io/helm-charts - - name: dgraph - url: https://charts.dgraph.io - -releases: - - name: jaeger - namespace: observability - chart: jaegertracing/jaeger - version: 0.37.0 - values: - - ./jaeger_{{ env "JAEGER_STORAGE_TYPE" | default "cassandra" }}.yaml - - storage: - {{ env "JAEGER_STORAGE_TYPE" | default "cassandra" }}: - password: {{ requiredEnv "JAEGER_STORAGE_PASSWORD" }} - - - name: dgraph - namespace: default - chart: dgraph/dgraph - needs: - - observability/jaeger - values: - - ./dgraph_jaeger.yaml diff --git a/contrib/config/monitoring/jaeger/chart/jaeger_cassandra.yaml b/contrib/config/monitoring/jaeger/chart/jaeger_cassandra.yaml deleted file mode 100644 index 61bc8558ea4..00000000000 --- a/contrib/config/monitoring/jaeger/chart/jaeger_cassandra.yaml +++ /dev/null @@ -1,34 +0,0 @@ -provisionDataStore: - cassandra: true -storage: - type: cassandra - cassandra: - user: cassandrauser - usePassword: true - ## CHANGE THIS BEFORE DEPLOYING!!! - password: CHANGEME - -## The settings under cassandra can be found here: -## https://github.com/helm/charts/tree/master/incubator/cassandra -cassandra: - persistence: - enabled: true - image: - repo: cassandra - tag: 3.11.8 - -agent: - ## Optional Monitoring for Prometheus - serviceMonitor: - enabled: false - additionalLabels: {release: prometheus} -collector: - ## Optional Monitoring for Prometheus - serviceMonitor: - enabled: false - additionalLabels: {release: prometheus} -query: - ## Optional Monitoring for Prometheus - serviceMonitor: - enabled: false - additionalLabels: {release: prometheus} diff --git a/contrib/config/monitoring/jaeger/chart/jaeger_elasticsearch.yaml b/contrib/config/monitoring/jaeger/chart/jaeger_elasticsearch.yaml deleted file mode 100644 index d32154b87ef..00000000000 --- a/contrib/config/monitoring/jaeger/chart/jaeger_elasticsearch.yaml +++ /dev/null @@ -1,33 +0,0 @@ -provisionDataStore: - elasticsearch: true -storage: - type: elasticsearch - elasticsearch: - user: elasticuser - usePassword: true - ## CHANGE THIS BEFORE DEPLOYING!!! - password: CHANGEME - -## The settings under cassandra can be found here: -## https://github.com/elastic/helm-charts/tree/master/elasticsearch -elasticsearch: - persistence: - enabled: true - labels: - enabled: true - -agent: - ## Optional Monitoring for Prometheus - serviceMonitor: - enabled: false - additionalLabels: {release: prometheus} -collector: - ## Optional Monitoring for Prometheus - serviceMonitor: - enabled: false - additionalLabels: {release: prometheus} -query: - ## Optional Monitoring for Prometheus - serviceMonitor: - enabled: false - additionalLabels: {release: prometheus} diff --git a/contrib/config/monitoring/jaeger/operator/.gitignore b/contrib/config/monitoring/jaeger/operator/.gitignore deleted file mode 100644 index 7bfb9b0c70a..00000000000 --- a/contrib/config/monitoring/jaeger/operator/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -# ignore autogenerated files -jaeger diff --git a/contrib/config/monitoring/jaeger/operator/README.md b/contrib/config/monitoring/jaeger/operator/README.md deleted file mode 100644 index cedd62b296f..00000000000 --- a/contrib/config/monitoring/jaeger/operator/README.md +++ /dev/null @@ -1,100 +0,0 @@ -# Jaeger Operator - -The [Jaeger operator](https://github.com/jaegertracing/jaeger-operator) is an implementation of a -[Kubernetes operator](https://coreos.com/operators/) that aims to ease the operational complexity of -deploying and managing Jaeger. - -## Tool Requirements - -### Required - -- [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) - required to interact with - kubernetes -- [helm](https://helm.sh/docs/intro/install/) - required to install jaeger-operator using helm chart - -### Optional - -These tools are optional if you would like to use a single command to install all the jaeger -components and dgraph configured to use jaeger. - -- [helmfile](https://github.com/roboll/helmfile#installation) -- [helm-diff](https://github.com/databus23/helm-diff) helm plugin: - `helm plugin install https://github.com/databus23/helm-diff` - -## Deploy - -### Deploy Using Helmfile - -```bash -helmfile apply -``` - -### Deploy Helm and Kubectl - -If you do not have `helmfile` available you can do these steps: - -```bash -kubectl create namespace observability - -## Install Jaeger Operator -helm repo add jaegertracing https://jaegertracing.github.io/helm-charts -helm install "jaeger-operator" \ - --namespace observability \ - --set serviceAccount.name=jaeger-operator \ - --set rbac.clusterRole=true \ - jaegertracing/jaeger-operator - -## Install Jaeger using Jaeger Operator CRD -kubectl apply \ - --namespace observability \ - --kustomize ./jaeger-kustomize/overlays/badger - -## Install Dgraph configured to use Jaeger -helm repo add dgraph https://charts.dgraph.io -helm install "my-release" \ - --namespace default \ - --values ./dgraph_jaeger.yaml \ - dgraph/dgraph -``` - -## Cleanup - -### Cleanup Using Helmfile - -```bash -helmfile delete -kubectl delete pvc --namespace default --selector release="dgraph" -``` - -### Cleanup Using Helm and Kubectl - -```bash -## Delete Dgraph and Dgraph Persistence -helm delete --namespace default "my-release" -kubectl delete pvc --namespace default --selector release="my-release" - -## Delete Jaeger -kubectl delete \ - --namespace observability \ - --kustomize jaeger-kustomize/overlays/badger/ - -## Delete Jaeger Operator -helm delete --namespace observability "jaeger-operator" -``` - -## Jaeger Query UI - -You can use port-forward option to access the Jaeger Query UI from localhost with this: - -```bash -export POD_NAME=$(kubectl get pods \ - --namespace observability \ - --selector "app.kubernetes.io/instance=jaeger,app.kubernetes.io/component=all-in-one" \ - --output jsonpath="{.items[0].metadata.name}" -) -kubectl port-forward --namespace observability $POD_NAME 16686:16686 -``` - -Afterward, visit: - -- http://localhost:16686 diff --git a/contrib/config/monitoring/jaeger/operator/dgraph_jaeger.yaml b/contrib/config/monitoring/jaeger/operator/dgraph_jaeger.yaml deleted file mode 100644 index 0361911a64c..00000000000 --- a/contrib/config/monitoring/jaeger/operator/dgraph_jaeger.yaml +++ /dev/null @@ -1,8 +0,0 @@ -alpha: - extraEnvs: - - name: DGRAPH_ALPHA_JAEGER_COLLECTOR - value: http://jaeger-collector.observability.svc:14268 -zero: - extraEnvs: - - name: DGRAPH_ZERO_JAEGER_COLLECTOR - value: http://jaeger-collector.observability.svc:14268 diff --git a/contrib/config/monitoring/jaeger/operator/helmfile.yaml b/contrib/config/monitoring/jaeger/operator/helmfile.yaml deleted file mode 100644 index eeb8cebdf9e..00000000000 --- a/contrib/config/monitoring/jaeger/operator/helmfile.yaml +++ /dev/null @@ -1,39 +0,0 @@ -repositories: - - name: jaegertracing - url: https://jaegertracing.github.io/helm-charts - - name: dgraph - url: https://charts.dgraph.io - -releases: - - name: jaeger-operator - namespace: observability - chart: jaegertracing/jaeger-operator - version: 2.17.0 - values: - - serviceAccount: - name: jaeger-operator - rbac: - clusterRole: true - ## Jaeger Operator Reference (official) - ## https://godoc.org/github.com/jaegertracing/jaeger-operator/pkg/apis/jaegertracing/v1#JaegerSpec - ## Example based on - ## https://github.com/jaegertracing/jaeger-operator/blob/master/deploy/examples/with-badger-and-volume.yaml - - name: jaeger - namespace: observability - chart: ./jaeger - needs: - - observability/jaeger-operator - hooks: - - events: - - prepare - - cleanup - command: ./helmify.sh - args: - - "{{`{{if eq .Event.Name \"prepare\"}}build{{else}}clean{{end}}`}}" - - "{{`{{.Release.Chart}}`}}" - - badger - - name: dgraph - namespace: default - chart: dgraph/dgraph - values: - - dgraph_jaeger.yaml diff --git a/contrib/config/monitoring/jaeger/operator/helmify.sh b/contrib/config/monitoring/jaeger/operator/helmify.sh deleted file mode 100755 index c42206f1920..00000000000 --- a/contrib/config/monitoring/jaeger/operator/helmify.sh +++ /dev/null @@ -1,55 +0,0 @@ -#!/usr/bin/env bash - -cmd=$1 -chart=$2 -env=$3 -dir=${chart}-kustomize - -chart=${chart/.\//} - -build() { - if [[ ! -d ${dir} ]]; then - echo "directory \"${dir}\" does not exist. make a kustomize project there in order to generate a local helm chart at ${chart}/ from it!" 1>&2 - exit 1 - fi - - mkdir -p "${chart}"/templates - echo "generating ${chart}/Chart.yaml" 1>&2 - cat <"${chart}"/Chart.yaml -apiVersion: v1 -appVersion: "1.0" -description: A Helm chart for Kubernetes -name: ${chart} -version: 0.1.0 -EOF - echo "generating ${chart}/templates/NOTES.txt" 1>&2 - cat <"${chart}"/templates/NOTES.txt -${chart} has been installed as release {{ .Release.Name }}. - -Run \`helm status {{ .Release.Name }}\` for more information. -Run \`helm delete --purge {{.Release.Name}}\` to uninstall. -EOF - echo "running kustomize" 1>&2 - ( - cd "${dir}" || exit - kubectl kustomize overlays/"${env}" - ) >"${chart}"/templates/all.yaml - echo "running helm lint" 1>&2 - helm lint "${chart}" - echo "generated following files:" - tree "${chart}" -} - -clean() { - rm "${chart}"/Chart.yaml - rm "${chart}"/templates/*.yaml -} - -case "${cmd}" in -"build") build ;; -"clean") clean ;; -*) - echo "unsupported command: ${cmd}" 1>&2 - exit 1 - ;; -esac diff --git a/contrib/config/monitoring/jaeger/operator/jaeger-kustomize/base/jaeger.yaml b/contrib/config/monitoring/jaeger/operator/jaeger-kustomize/base/jaeger.yaml deleted file mode 100644 index ab6039c6f21..00000000000 --- a/contrib/config/monitoring/jaeger/operator/jaeger-kustomize/base/jaeger.yaml +++ /dev/null @@ -1,6 +0,0 @@ -apiVersion: jaegertracing.io/v1 -kind: Jaeger -metadata: - name: jaeger -spec: - strategy: allInOne diff --git a/contrib/config/monitoring/jaeger/operator/jaeger-kustomize/base/kustomization.yaml b/contrib/config/monitoring/jaeger/operator/jaeger-kustomize/base/kustomization.yaml deleted file mode 100644 index 8dfbb61ef49..00000000000 --- a/contrib/config/monitoring/jaeger/operator/jaeger-kustomize/base/kustomization.yaml +++ /dev/null @@ -1,6 +0,0 @@ -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization - -# namespace: observability -resources: -- jaeger.yaml diff --git a/contrib/config/monitoring/jaeger/operator/jaeger-kustomize/overlays/badger/kustomization.yaml b/contrib/config/monitoring/jaeger/operator/jaeger-kustomize/overlays/badger/kustomization.yaml deleted file mode 100644 index 6b5819fb477..00000000000 --- a/contrib/config/monitoring/jaeger/operator/jaeger-kustomize/overlays/badger/kustomization.yaml +++ /dev/null @@ -1,7 +0,0 @@ -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization - -bases: - - ../../base -patches: - - storage.yaml diff --git a/contrib/config/monitoring/jaeger/operator/jaeger-kustomize/overlays/badger/storage.yaml b/contrib/config/monitoring/jaeger/operator/jaeger-kustomize/overlays/badger/storage.yaml deleted file mode 100644 index e9b2c40ca4d..00000000000 --- a/contrib/config/monitoring/jaeger/operator/jaeger-kustomize/overlays/badger/storage.yaml +++ /dev/null @@ -1,18 +0,0 @@ -apiVersion: jaegertracing.io/v1 -kind: Jaeger -metadata: - name: jaeger -spec: - storage: - type: badger - options: - badger: - ephemeral: false - directory-key: /badger/key - directory-value: /badger/data - volumeMounts: - - name: data - mountPath: /badger - volumes: - - name: data - emptyDir: {} diff --git a/contrib/config/monitoring/prometheus/README.md b/contrib/config/monitoring/prometheus/README.md deleted file mode 100644 index 2e1c18f7b1e..00000000000 --- a/contrib/config/monitoring/prometheus/README.md +++ /dev/null @@ -1,34 +0,0 @@ -## Prometheus Metrics - -[Prometheus](https://prometheus.io/) platform for gathering metrics and triggering alerts. This can -be used to monitor Dgraph deployed on the Kubernetes platform. - -You can install [Prometheus](https://prometheus.io/) using either of these options: - -- Kubernetes manifests (this directory) - - Instructions: - [Deploy: Monitoring in Kubernetes](https://dgraph.io/docs/deploy/#monitoring-in-kubernetes) -- Helm Chart Values - This will install [Prometheus](https://prometheus.io/), - [AlertManager](https://prometheus.io/docs/alerting/latest/alertmanager/), and - [Grafana](https://grafana.com/). - - Instructions: [README.md](chart-values/README.md) - -## Kubernetes Manifests Details - -These manifests require the -[prometheus-operator](https://coreos.com/blog/the-prometheus-operator.html) to be installed before -using these (see [instructions](https://dgraph.io/docs/deploy/#monitoring-in-kubernetes)). - -This will contain the following files: - -- `prometheus.yaml` - Prometheus service and Dgraph service monitors to keep the configuration - synchronized Dgraph configuration changes. The service monitor use service discovery, such as - Kubernetes labels and namespaces, to discover Dgraph. Should you have multiple Dgraph - installations installed, such as a dev-test and production, you can tailor these narrow the scope - of which Dgraph version you would want to track. -- `alertmanager-config.yaml` - This is a secret you can create when installing `alertmanager.yaml`. - Here you can specify where to direct alerts, such as Slack or PagerDuty. -- `alertmanager.yaml` - AlertManager service to trigger alerts if metrics fall over a threshold - specified in alert rules. -- `alert-rules.yaml` - These are rules that can trigger alerts. Adjust these as they make sense for - your Dgraph deployment. diff --git a/contrib/config/monitoring/prometheus/alert-rules.yaml b/contrib/config/monitoring/prometheus/alert-rules.yaml deleted file mode 100644 index 0267a100a76..00000000000 --- a/contrib/config/monitoring/prometheus/alert-rules.yaml +++ /dev/null @@ -1,74 +0,0 @@ -apiVersion: monitoring.coreos.com/v1 -kind: PrometheusRule -metadata: - creationTimestamp: null - labels: - app: dgraph-io - prometheus: dgraph-io - role: alert-rules - name: prometheus-rules-dgraph-io -spec: - groups: - - name: ./dgraph-alert.rules - interval: 30s - rules: - - alert: AlphaNotReady - expr: dgraph_alpha_health_status{job="dgraph-alpha-public"} - == 0 - for: 3m - annotations: - description: '{{ $labels.instance }} for cluster {{ $labels.cluster }} has been - down for more than 3 minutes.' - summary: Instance {{ $labels.instance }} down - labels: - severity: medium - - alert: AlphaDead - expr: dgraph_alpha_health_status{job="dgraph-alpha-public"} - == 0 - for: 10m - annotations: - description: '{{ $labels.instance }} for cluster {{ $labels.cluster }} has been - down for more than 10 minutes.' - summary: Instance {{ $labels.instance }} down - labels: - severity: high - - alert: HighPendingQueriesCount - expr: (sum - by(instance, cluster) (dgraph_pending_queries_total{job="dgraph-alpha-public"})) - > 1000 - for: 5m - annotations: - description: '{{ $labels.instance }} for cluster {{ $labels.cluster }} has - a high number of pending queries({{ $value }} in last 5m).' - summary: Instance {{ $labels.instance }} is experiencing high pending query rates. - labels: - severity: medium - - alert: HighAlphaOpenFDCount - expr: process_open_fds{job="dgraph-alpha-public"} - / process_max_fds{job="dgraph-alpha-public"} > 0.75 - for: 10m - annotations: - description: 'Too many open file descriptors on alpha instance {{ $labels.instance }}: {{ $value - }} fraction used.' - summary: 'Alpha instance {{ $labels.instance }} have too many open file descriptors.' - labels: - severity: high - - alert: HighZeroOpenFDCount - expr: process_open_fds{job="dgraph-zero-public"} - / process_max_fds{job="dgraph-zero-public"} > 0.75 - for: 10m - annotations: - description: 'Too many open file descriptors on zero instance {{ $labels.instance }}: {{ $value - }} fraction used.' - summary: 'Zero instance {{ $labels.instance }} have too many open file descriptors.' - labels: - severity: high - - alert: FollowerBehindTs - expr: (max - by(cluster) (dgraph_max_assigned_ts)) - (min by(cluster) (dgraph_max_assigned_ts)) - > 1000 - for: 30s - annotations: - description: A follower is behind the leader's latest applied timestamp by {{ $value }}. - labels: - severity: medium diff --git a/contrib/config/monitoring/prometheus/alertmanager-config.yaml b/contrib/config/monitoring/prometheus/alertmanager-config.yaml deleted file mode 100644 index 7f5e08c95ed..00000000000 --- a/contrib/config/monitoring/prometheus/alertmanager-config.yaml +++ /dev/null @@ -1,25 +0,0 @@ -global: - resolve_timeout: 2m -route: - group_by: ['cluster', 'alertname'] - group_wait: 30s - group_interval: 2m - repeat_interval: 3h - receiver: 'default_receiver' - routes: - - receiver: 'slack' - group_wait: 10s - group_by: ['job'] - match_re: - severity: high|medium -receivers: -- name: 'default_receiver' - webhook_configs: - - url: 'https://alertmanagerwh:8080/' # dummy default webhook. -- name: 'slack' - slack_configs: - - send_resolved: true - api_url: 'SLACK_WEBHOOK_URL' - text: " \nsummary: {{ .CommonAnnotations.summary }}\ndescription: {{ .CommonAnnotations.description }}" - channel: alerts - username: alert-bot diff --git a/contrib/config/monitoring/prometheus/alertmanager.yaml b/contrib/config/monitoring/prometheus/alertmanager.yaml deleted file mode 100644 index e8a64293c8c..00000000000 --- a/contrib/config/monitoring/prometheus/alertmanager.yaml +++ /dev/null @@ -1,31 +0,0 @@ -# Create an Alertmanager resource to be managed by prometheus-operator. -# This creates a new alertmanager cluster with 3 replicas. -# -# Create an alertmanager config using the below command: -# kubectl create secret generic alertmanager-alertmanager-dgraph-io --from-file=alertmanager.yaml=alertmanager-config.yaml -# Make sure the name of secret is of the form alertmanager-{ALERTMANAGER_NAME} -apiVersion: monitoring.coreos.com/v1 -kind: Alertmanager -metadata: - name: alertmanager-dgraph-io - labels: - app: dgraph-io -spec: - replicas: 1 - logLevel: debug ---- -apiVersion: v1 -kind: Service -metadata: - name: alertmanager-dgraph-io - labels: - app: dgraph-io -spec: - type: ClusterIP - ports: - - name: web - port: 9093 - protocol: TCP - targetPort: web - selector: - alertmanager: alertmanager-dgraph-io diff --git a/contrib/config/monitoring/prometheus/chart-values/README.md b/contrib/config/monitoring/prometheus/chart-values/README.md deleted file mode 100644 index a045800a445..00000000000 --- a/contrib/config/monitoring/prometheus/chart-values/README.md +++ /dev/null @@ -1,148 +0,0 @@ -# Helm Chart Values - -You can install [Prometheus](https://prometheus.io/) and [Grafana](https://grafana.com/) using this -helm chart and supplied helm chart values. - -## Usage - -### Tool Requirements - -- [Kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) - Kubernetes client tool to - interact with a Kubernetes cluster -- [Helm](https://helm.sh/) - package manager for Kubernetes -- [Helmfile](https://github.com/roboll/helmfile#installation) (optional) - declarative spec that - allows you to compose several helm charts - - [helm-diff](https://github.com/databus23/helm-diff) - helm plugin used by `helmfile` to show - differences when applying helm files. - -### Using Helm - -You can use helm to install -[kube-prometheus-stack](https://github.com/prometheus-operator/kube-prometheus) helm chart. This -helm chart is a collection of Kubernetes manifests, [Grafana](http://grafana.com/) dashboards, , -[Prometheus rules](https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/) -combined with scripts to provide monitoring with [Prometheus](https://prometheus.io/) using the -[Prometheus Operator](https://github.com/prometheus-operator/prometheus-operator). This helm chart -will also install [Grafana](http://grafana.com/), -[node_exporter](https://github.com/prometheus/node_exporter), -[kube-state-metrics](https://github.com/kubernetes/kube-state-metrics). - -To use this, run the following: - -```bash -helm repo add prometheus-community https://prometheus-community.github.io/helm-charts -helm repo add stable https://charts.helm.sh/stable -helm repo update - -## set Grafana secret admin password -GRAFANA_ADMIN_PASSWORD='' -## optionally set namespace (default=monitoring if not specified) -export NAMESPACE="monitoring" - -helm install my-prometheus \ - --values ./dgraph-prometheus-operator.yaml \ - --set grafana.adminPassword=$GRAFANA_ADMIN_PASSWORD \ - --namespace $NAMESPACE \ - prometheus-community/kube-prometheus-stack -``` - -### Using Helmfile - -You can use helmfile to manage multiple helm charts and corresponding helmcharts values from a -single configuration file: `helmfile.yaml`. The provided example `helmfile.yaml` will show how to -use this to install the helm chart. - -To use this, run the following: - -```bash -## set Grafana secret admin password -GRAFANA_ADMIN_PASSWORD='' -## optionally set namespace (default=monitoring if not specified) -export NAMESPACE="monitoring" - -helmfile apply -``` - -## Grafana Dashboards - -You can import [Grafana](https://grafana.com/) Dashboards from within the web consoles. - -There's an example dash board for some metrics that you can use to monitor Dgraph on Kubernetes: - -- [dgraph-kubernetes-grafana-dashboard.json](../../grafana/dgraph-kubernetes-grafana-dashboard.json) - -## Helm Chart Configuration - -Here are some Helm chart values you may want to configure depending on your environment. - -### General - -- `grafana.service.type` - set to `LoadBalancer` if you would like to expose this port. -- `grafana.service.annotations` - add annotations to configure a `LoadBalancer` such as if it is - internal or external facing, DNS name with external-dns, etc. -- `prometheus.service.type` - set to `LoadBalancer` if you would like to expose this port. -- `prometheus.service.annotations` - add annotations to configure a `LoadBalancer` such as if it is - internal or external facing, DNS name with external-dns, etc. - -### Dgraph Service Monitors - -- `prometheus.additionalServiceMonitors.namespaceSelector.matchNames` - if you want to match a - dgraph installed into a specific namespace. -- `prometheus.additionalServiceMonitors.selector.matchLabels` - if you want to match through a - specific labels in your dgraph deployment. Currently matches `monitor: zero.dgraph-io` and - `monitor: alpha.dgraph-io`, which si the default for - [Dgraph helm chart](https://github.com/dgraph-io/charts). - -## Alerting for Dgraph - -You can use examples here to add alerts for Dgraph using Prometheus AlertManager. - -With `helmfile`, you can deploy this using the following: - -```bash -## set Grafana secret admin password -GRAFANA_ADMIN_PASSWORD='' -## optionally set namespace (default=monitoring if not specified) -export NAMESPACE="monitoring" -## enable dgraph alerting -export DGRAPH_ALERTS_ENABLED=1 -## enable pagerduty and set integration key (optional) -export PAGERDUTY_INTEGRATION_KEY='' - -helmfile apply -``` - -For PagerDuty integration, you will need to add a service with integration type of `Prometheus` and -later copy the integration key that is created. - -### Alerting for Dgraph binary backups with Kubenretes CronJobs - -In addition to adding alerts for Dgraph, if you you enabled binary backups through Kubernetes -CronJob enabled with the Dgraph helm chart (see [backups/README.md](../backups/README.md)), you can -use the examples here add alerting for backup cron jobs. - -With `helmfile`, you can deploy this using the following: - -```bash -## set grafana secret admin password -GRAFANA_ADMIN_PASSWORD='' -## optionally set namespace (default=monitoring if not specified) -export NAMESPACE="monitoring" -## enable dgraph alerting and Kubernetes CronJobs alerting -export DGRAPH_ALERTS_ENABLED=1 -export DGRAPH_BACKUPS_ALERTS_ENABLED=1 -## enable pagerduty and set integration key (optional) -export PAGERDUTY_INTEGRATION_KEY='' - -helmfile apply -``` - -## Upgrading from previous versions - -Previously, this chart was called `stable/prometheus-operator`, which has been deprecated and now -called `prometheus-community/kube-prometheus-stack`. If you are using the old chart, you will have -to do a migration to use the new chart. - -The prometheus community has created a migration guide for this process: - -- [Migrating from stable/prometheus-operator chart](https://github.com/prometheus-community/helm-charts/blob/main/charts/kube-prometheus-stack/README.md#migrating-from-stableprometheus-operator-chart) diff --git a/contrib/config/monitoring/prometheus/chart-values/alertmanager-pagerduty.yaml.gotmpl b/contrib/config/monitoring/prometheus/chart-values/alertmanager-pagerduty.yaml.gotmpl deleted file mode 100644 index 5ee5297f5fd..00000000000 --- a/contrib/config/monitoring/prometheus/chart-values/alertmanager-pagerduty.yaml.gotmpl +++ /dev/null @@ -1,25 +0,0 @@ -alertmanager: - config: - global: - resolve_timeout: 1m - pagerduty_url: https://events.pagerduty.com/v2/enqueue - - route: - receiver: 'null' - {{- if env "DGRAPH_ALERTS_ENABLED" }} - routes: - - match: - alertname: dgraph* - receiver: 'pagerduty-notifications' - {{- if env "DGRAPH_BACKUPS_ALERTS_ENABLED" }} - - match: - alertname: CronJobStatusFailed - receiver: 'pagerduty-notifications' - {{- end }} - {{- end }} - receivers: - - name: 'pagerduty-notifications' - pagerduty_configs: - - service_key: "{{ requiredEnv "PAGERDUTY_INTEGRATION_KEY" }}" - send_resolved: true - - name: 'null' diff --git a/contrib/config/monitoring/prometheus/chart-values/dgraph-app-alert-rules.yaml.gotmpl b/contrib/config/monitoring/prometheus/chart-values/dgraph-app-alert-rules.yaml.gotmpl deleted file mode 100644 index 3f7a55578a5..00000000000 --- a/contrib/config/monitoring/prometheus/chart-values/dgraph-app-alert-rules.yaml.gotmpl +++ /dev/null @@ -1,59 +0,0 @@ -additionalPrometheusRulesMap: - dgraph-alerts: - groups: - - name: dgraph-alert.rules - rules: - - alert: dgraphAlphaNotReady - expr: dgraph_alpha_health_status{job="{{ env "DGRAPH_RELEASE" | default "my-release" }}-dgraph-alpha"} == 0 - for: 3m - annotations: - description: '{{ printf "{{ $labels.instance }}" }} for cluster {{ printf "{{ $labels.cluster }}" }} has been down for more than 3 minutes.' - summary: Instance {{ printf "{{ $labels.instance }}" }} down - labels: - severity: medium - - alert: dgraphAlphaDead - expr: dgraph_alpha_health_status{job="{{ env "DGRAPH_RELEASE" | default "my-release" }}-dgraph-alpha"} == 0 - for: 10m - annotations: - description: '{{ printf "{{ $labels.instance }}" }} for cluster {{ printf "{{ $labels.cluster }}" }} has been down for more than 10 minutes.' - summary: Instance {{ printf "{{ $labels.instance }}" }} down - labels: - severity: high - - alert: dgraphHighPendingQueriesCount - expr: (sum - by(instance, cluster) (dgraph_pending_queries_total{job="{{ env "DGRAPH_RELEASE" | default "my-release" }}-dgraph-alpha"})) - > 1000 - for: 5m - annotations: - description: '{{ printf "{{ $labels.instance }}" }} for cluster {{ printf "{{ $labels.cluster }}" }} has - a high number of pending queries({{ printf "{{ $value }}" }} in last 5m).' - summary: Instance {{ printf "{{ $labels.instance }}" }} is experiencing high pending query rates. - labels: - severity: medium - - alert: dgraphHighAlphaOpenFDCount - expr: process_open_fds{job="{{ env "DGRAPH_RELEASE" | default "my-release" }}-dgraph-alpha"} - / process_max_fds{job="{{ env "DGRAPH_RELEASE" | default "my-release" }}-dgraph-alpha"} > 0.75 - for: 10m - annotations: - description: 'Too many open file descriptors on alpha instance {{ printf "{{ $labels.instance }}" }}: {{ printf "{{ $value }}" }} fraction used.' - summary: 'Alpha instance {{ printf "{{ $labels.instance }}" }} have too many open file descriptors.' - labels: - severity: high - - alert: dgraphHighZeroOpenFDCount - expr: process_open_fds{job="{{ env "DGRAPH_RELEASE" | default "my-release" }}-dgraph-zero"} - / process_max_fds{job="{{ env "DGRAPH_RELEASE" | default "my-release" }}-dgraph-zero"} > 0.75 - for: 10m - annotations: - description: 'Too many open file descriptors on zero instance {{ printf "{{ $labels.instance }}" }}: {{ printf "{{ $value }}" }} fraction used.' - summary: 'Zero instance {{ printf "{{ $labels.instance }}" }} have too many open file descriptors.' - labels: - severity: high - - alert: dgraphFollowerBehindTs - expr: (max - by(cluster) (dgraph_max_assigned_ts)) - (min by(cluster) (dgraph_max_assigned_ts)) - > 1000 - for: 30s - annotations: - description: A follower is behind the leader's latest applied timestamp by {{ printf "{{ $value }}" }}. - labels: - severity: medium diff --git a/contrib/config/monitoring/prometheus/chart-values/dgraph-backup-alert-rules.yaml b/contrib/config/monitoring/prometheus/chart-values/dgraph-backup-alert-rules.yaml deleted file mode 100644 index 0a93e181a1e..00000000000 --- a/contrib/config/monitoring/prometheus/chart-values/dgraph-backup-alert-rules.yaml +++ /dev/null @@ -1,42 +0,0 @@ -additionalPrometheusRulesMap: - backup-cron: - groups: - - name: kube-cron.rules - rules: - - record: job_cronjob:kube_job_status_start_time:max - expr: | - label_replace( - label_replace( - max( - kube_job_status_start_time - * ON(job_name) GROUP_RIGHT() - kube_job_labels{label_cronjob!=""} - ) BY (job_name, label_cronjob) - == ON(label_cronjob) GROUP_LEFT() - max( - kube_job_status_start_time - * ON(job_name) GROUP_RIGHT() - kube_job_labels{label_cronjob!=""} - ) BY (label_cronjob), - "job", "$1", "job_name", "(.+)"), - "cronjob", "$1", "label_cronjob", "(.+)") - - record: job_cronjob:kube_job_status_failed:sum - expr: | - clamp_max( - job_cronjob:kube_job_status_start_time:max, - 1) - * ON(job) GROUP_LEFT() - label_replace( - label_replace( - (kube_job_status_failed != 0), - "job", "$1", "job_name", "(.+)"), - "cronjob", "$1", "label_cronjob", "(.+)") - - alert: CronJobStatusFailed - expr: | - job_cronjob:kube_job_status_failed:sum - * ON(cronjob) GROUP_RIGHT() - kube_cronjob_labels - > 0 - for: 1m - annotations: - description: '{{ $labels.cronjob }} last run has failed {{ $value }} times.' diff --git a/contrib/config/monitoring/prometheus/chart-values/dgraph-prometheus-operator.yaml b/contrib/config/monitoring/prometheus/chart-values/dgraph-prometheus-operator.yaml deleted file mode 100644 index 518290cf955..00000000000 --- a/contrib/config/monitoring/prometheus/chart-values/dgraph-prometheus-operator.yaml +++ /dev/null @@ -1,82 +0,0 @@ -prometheusOperator: - createCustomResource: true - -grafana: - enabled: true - persistence: - enabled: true - accessModes: ["ReadWriteOnce"] - size: 5Gi - defaultDashboardsEnabled: true - service: - type: ClusterIP - -alertmanager: - service: - labels: - app: dgraph-io - alertmanagerSpec: - storage: - volumeClaimTemplate: - spec: - accessModes: ["ReadWriteOnce"] - resources: - requests: - storage: 5Gi - replicas: 1 - logLevel: debug - config: - global: - resolve_timeout: 2m - route: - group_by: ['job'] - group_wait: 30s - group_interval: 5m - repeat_interval: 12h - receiver: 'null' - routes: - - match: - alertname: Watchdog - receiver: 'null' - receivers: - - name: 'null' - -prometheus: - service: - type: ClusterIP - serviceAccount: - create: true - name: prometheus-dgraph-io - - prometheusSpec: - storageSpec: - volumeClaimTemplate: - spec: - accessModes: ["ReadWriteOnce"] - resources: - requests: - storage: 25Gi - resources: - requests: - memory: 400Mi - enableAdminAPI: false - - additionalServiceMonitors: - - name: zero-dgraph-io - endpoints: - - port: http-zero - path: /debug/prometheus_metrics - namespaceSelector: - any: true - selector: - matchLabels: - monitor: zero-dgraph-io - - name: alpha-dgraph-io - endpoints: - - port: http-alpha - path: /debug/prometheus_metrics - namespaceSelector: - any: true - selector: - matchLabels: - monitor: alpha-dgraph-io diff --git a/contrib/config/monitoring/prometheus/chart-values/helmfile.yaml b/contrib/config/monitoring/prometheus/chart-values/helmfile.yaml deleted file mode 100644 index 064c2e276f8..00000000000 --- a/contrib/config/monitoring/prometheus/chart-values/helmfile.yaml +++ /dev/null @@ -1,28 +0,0 @@ -repositories: - - name: prometheus-community - url: https://prometheus-community.github.io/helm-charts - - name: stable - url: https://charts.helm.sh/stable - -releases: - - name: my-prometheus - namespace: {{ env "NAMESPACE" | default "monitoring" }} - chart: prometheus-community/kube-prometheus-stack - values: - - ./dgraph-prometheus-operator.yaml - - grafana: - adminPassword: {{ requiredEnv "GRAFANA_ADMIN_PASSWORD" }} - {{/* Dgraph Kubernetes Monitoring Support */}} - {{/* Set DGRAPH_ALERTS_ENABLED=1 to enable alerts for dgraph */}} - {{- if env "DGRAPH_ALERTS_ENABLED" }} - - ./dgraph-app-alert-rules.yaml.gotmpl - {{/* Dgraph Kubernetes CronJob Monitoring Support */}} - {{/* Set DGRAPH_BACKUPS_ALERTS_ENABLED=1 and DGRAPH_ALERTS_ENABLED=1 to enable this feature */}} - {{- if env "DGRAPH_BACKUPS_ALERTS_ENABLED" }} - - ./dgraph-backup-alert-rules.yaml - {{- end }} - {{- if env "PAGERDUTY_INTEGRATION_KEY" }} - - ./alertmanager-pagerduty.yaml.gotmpl - {{- end }} - {{- end }} - disableValidation: true diff --git a/contrib/config/monitoring/prometheus/prometheus.yaml b/contrib/config/monitoring/prometheus/prometheus.yaml deleted file mode 100644 index 36a9e5e35f2..00000000000 --- a/contrib/config/monitoring/prometheus/prometheus.yaml +++ /dev/null @@ -1,100 +0,0 @@ -apiVersion: v1 -kind: ServiceAccount -metadata: - name: prometheus-dgraph-io - labels: - app: dgraph-io ---- -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRole -metadata: - name: prometheus-dgraph-io - labels: - app: dgraph-io -rules: -- apiGroups: [""] - resources: - - nodes - - services - - endpoints - - pods - verbs: ["get", "list", "watch"] -- apiGroups: [""] - resources: - - configmaps - verbs: ["get"] ---- -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRoleBinding -metadata: - name: prometheus-dgraph-io - labels: - app: dgraph-io -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: prometheus-dgraph-io -subjects: -- kind: ServiceAccount - name: prometheus-dgraph-io - namespace: default ---- -apiVersion: monitoring.coreos.com/v1 -kind: ServiceMonitor -metadata: - name: alpha.dgraph-io - labels: - app: dgraph-io - prometheus: dgraph-io -spec: - namespaceSelector: - any: true - selector: - matchLabels: - monitor: alpha-dgraph-io - endpoints: - - port: http-alpha - path: /debug/prometheus_metrics ---- -apiVersion: monitoring.coreos.com/v1 -kind: ServiceMonitor -metadata: - name: zero-dgraph-io - labels: - app: dgraph-io - prometheus: dgraph-io -spec: - namespaceSelector: - any: true - selector: - matchLabels: - monitor: zero.dgraph-io - endpoints: - - port: http-zero - path: /debug/prometheus_metrics ---- -apiVersion: monitoring.coreos.com/v1 -kind: Prometheus -metadata: - name: dgraph-io - labels: - app: prometheus -spec: - serviceAccountName: prometheus-dgraph-io - alerting: - alertmanagers: - - namespace: default - name: alertmanager-dgraph-io - port: web - serviceMonitorSelector: - matchLabels: - app: dgraph-io - resources: - requests: - memory: 400Mi - ruleSelector: - matchLabels: - app: dgraph-io - prometheus: dgraph-io - role: alert-rules - enableAdminAPI: false diff --git a/contrib/config/terraform/.gitignore b/contrib/config/terraform/.gitignore deleted file mode 100644 index 67428311679..00000000000 --- a/contrib/config/terraform/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -.terraform/ -*.tfvars -*.tfstate -*.tfstate.* diff --git a/contrib/config/terraform/aws/ha/README.md b/contrib/config/terraform/aws/ha/README.md deleted file mode 100644 index 613aba994f7..00000000000 --- a/contrib/config/terraform/aws/ha/README.md +++ /dev/null @@ -1,40 +0,0 @@ -# Highly Available Dgraph on AWS using terraform - -[Terraform](https://terraform.io/) automates the process of spinning up the EC2 instance, setting -up, and running Dgraph in it. This setup deploys terraform in HA mode in AWS. - -Here are the steps to follow: - -1. You must have an AWS account set up. - -2. [Download](https://terraform.io/downloads.html) and install terraform. - -3. Create a `terraform.tfvars` file similar to that of - [terraform.tfvars.example](./terraform.tfvars.example) and edit the variables inside accordingly. - You can override any variable present in [variables.tf](./variables.tf) by providing an explicit - value in `terraform.tfvars` file. - -4. Execute the following commands: - -```sh -terraform init -terraform plan -terraform apply -``` - -The output of `terraform apply` will contain the Load Balancer DNS name configured with the setup. -Dgraph-ratel will be available on `:8000`. Change the server URL in the dashboard to -`:8080` and start playing with dgraph. - -5. Use `terraform destroy` to delete the setup and restore the previous state. - -**Note** - -- The terraform setup has been tested to work well with AWS - [m5](https://aws.amazon.com/ec2/instance-types/m5/) instances. - -- AWS ALBs (Application Load Balancers) configured with this template do not support gRPC load - balancing. To get the best performance out of the Dgraph cluster, you can use an externally - configured load balancer with gRPC capabilities like - [HA Proxy](https://www.haproxy.com/blog/haproxy-1-9-2-adds-grpc-support/) or - [Nginx](https://www.nginx.com/blog/nginx-1-13-10-grpc/). diff --git a/contrib/config/terraform/aws/ha/aws/auto_scaling_group/main.tf b/contrib/config/terraform/aws/ha/aws/auto_scaling_group/main.tf deleted file mode 100644 index 1a31c5071ae..00000000000 --- a/contrib/config/terraform/aws/ha/aws/auto_scaling_group/main.tf +++ /dev/null @@ -1,26 +0,0 @@ -resource "aws_autoscaling_group" "dgraph" { - name = var.deployment_name - - max_size = var.instance_count + 1 - min_size = var.instance_count - 1 - desired_capacity = var.instance_count - - vpc_zone_identifier = [var.subnet_id] - - launch_template { - id = var.launch_template_id - version = "$Latest" - } - - tag { - key = "name" - value = var.deployment_name - propagate_at_launch = true - } - - timeouts { - delete = "15m" - } - - target_group_arns = [var.target_group_arn] -} diff --git a/contrib/config/terraform/aws/ha/aws/auto_scaling_group/outputs.tf b/contrib/config/terraform/aws/ha/aws/auto_scaling_group/outputs.tf deleted file mode 100644 index 07ac8103c59..00000000000 --- a/contrib/config/terraform/aws/ha/aws/auto_scaling_group/outputs.tf +++ /dev/null @@ -1,4 +0,0 @@ -output "id" { - description = "ID of the autoscaling group created." - value = aws_autoscaling_group.dgraph.id -} diff --git a/contrib/config/terraform/aws/ha/aws/auto_scaling_group/variables.tf b/contrib/config/terraform/aws/ha/aws/auto_scaling_group/variables.tf deleted file mode 100644 index 8fbce549b0a..00000000000 --- a/contrib/config/terraform/aws/ha/aws/auto_scaling_group/variables.tf +++ /dev/null @@ -1,24 +0,0 @@ -variable "deployment_name" { - type = string - description = "Name of the ASG deployment." -} - -variable "instance_count" { - type = number - description = "Desired instance count for the autoscaling group." -} - -variable "launch_template_id" { - type = string - description = "Launch configuration template ID." -} - -variable "subnet_id" { - type = string - description = "Subnet ID for the VPC zone" -} - -variable "target_group_arn" { - type = string - description = "Target group ARN to associate with the autoscaling group." -} diff --git a/contrib/config/terraform/aws/ha/aws/instance/main.tf b/contrib/config/terraform/aws/ha/aws/instance/main.tf deleted file mode 100644 index 6e2ce3f3b4c..00000000000 --- a/contrib/config/terraform/aws/ha/aws/instance/main.tf +++ /dev/null @@ -1,55 +0,0 @@ -resource "aws_network_interface" "dgraph" { - count = var.instance_count - - subnet_id = var.subnet_id - private_ips = [var.private_ips[count.index].outputs["private"]] - security_groups = [var.sg_id] - - tags = { - Name = "${var.deployment_name}-interface-${count.index}" - } -} - -resource "aws_instance" "dgraph" { - count = var.instance_count - - ami = var.ami_id - instance_type = var.instance_type - - disable_api_termination = false - key_name = var.key_pair_name - - network_interface { - network_interface_id = aws_network_interface.dgraph[count.index].id - device_index = 0 - } - - credit_specification { - cpu_credits = "standard" - } - - dynamic "root_block_device" { - for_each = var.io_optimized == "false" ? [] : ["io1"] - content { - volume_size = var.disk_size - delete_on_termination = false - volume_type = root_block_device.value - iops = var.disk_iops - } - } - - dynamic "root_block_device" { - for_each = var.io_optimized == "false" ? [] : ["standard"] - content { - volume_size = var.disk_size - delete_on_termination = false - volume_type = root_block_device.value - } - } - - user_data = base64encode(var.user_scripts[count.index].rendered) - - tags = { - Name = var.deployment_name - } -} diff --git a/contrib/config/terraform/aws/ha/aws/instance/outputs.tf b/contrib/config/terraform/aws/ha/aws/instance/outputs.tf deleted file mode 100644 index f62effda254..00000000000 --- a/contrib/config/terraform/aws/ha/aws/instance/outputs.tf +++ /dev/null @@ -1,4 +0,0 @@ -output "instance_ids" { - description = "IDs of all the instances created" - value = aws_instance.dgraph[*].id -} diff --git a/contrib/config/terraform/aws/ha/aws/instance/variables.tf b/contrib/config/terraform/aws/ha/aws/instance/variables.tf deleted file mode 100644 index 62b3a742767..00000000000 --- a/contrib/config/terraform/aws/ha/aws/instance/variables.tf +++ /dev/null @@ -1,58 +0,0 @@ -variable "deployment_name" { - type = string - description = "Name to associate with the created instance." -} - -variable "disk_size" { - type = string - description = "Disk size to associate with the running instance." -} - -variable "io_optimized" { - type = string - description = "Should we attach an IO optimized disk to the instance." - default = "false" -} - -variable "disk_iops" { - type = number - description = "IOPS limit for the disk associated with the instance." -} - -variable "instance_type" { - type = string - description = "AWS instance type to launch." -} - -variable "instance_count" { - type = number - description = "Number of AWS instances to create." -} - -variable "ami_id" { - type = string - description = "AMI to launch the instance with." -} - -variable "key_pair_name" { - type = string - description = "AWS key-pair name to associate with the launched instance for SSH access" -} - -variable "sg_id" { - type = string - description = "AWS VPC security groups to associate with the instance." -} - -variable "subnet_id" { - type = string - description = "Subnet ID for the launch template" -} - -variable "user_scripts" { - description = "User provided scripts(len = instance_count) to run during the instance startup." -} - -variable "private_ips" { - description = "Custom private IP addresses to associate with the instances." -} diff --git a/contrib/config/terraform/aws/ha/aws/launch_template/main.tf b/contrib/config/terraform/aws/ha/aws/launch_template/main.tf deleted file mode 100644 index bd0a199de32..00000000000 --- a/contrib/config/terraform/aws/ha/aws/launch_template/main.tf +++ /dev/null @@ -1,58 +0,0 @@ -# -------------------------------------------------------------------------------- -# AWS Launch template for configuring EC2 instances. -# -------------------------------------------------------------------------------- -resource "aws_launch_template" "dgraph" { - name = var.deployment_name - description = "Launch template for dgraph(${var.deployment_name}) instances" - - block_device_mappings { - device_name = "/dev/sda1" - - ebs { - volume_size = var.disk_size - volume_type = "io1" - iops = var.disk_iops - delete_on_termination = false - } - } - - capacity_reservation_specification { - capacity_reservation_preference = "open" - } - - credit_specification { - cpu_credits = "standard" - } - - disable_api_termination = false - # ebs_optimized = true - - image_id = var.ami_id - - instance_initiated_shutdown_behavior = "terminate" - - instance_type = var.instance_type - key_name = var.key_pair_name - - monitoring { - enabled = true - } - - network_interfaces { - delete_on_termination = true - associate_public_ip_address = false - subnet_id = var.subnet_id - - security_groups = [var.vpc_sg_id] - } - - tag_specifications { - resource_type = "instance" - - tags = { - Name = var.deployment_name - } - } - - user_data = var.user_script -} diff --git a/contrib/config/terraform/aws/ha/aws/launch_template/outputs.tf b/contrib/config/terraform/aws/ha/aws/launch_template/outputs.tf deleted file mode 100644 index 6288044c771..00000000000 --- a/contrib/config/terraform/aws/ha/aws/launch_template/outputs.tf +++ /dev/null @@ -1,4 +0,0 @@ -output "id" { - description = "ID of the launch template created." - value = aws_launch_template.dgraph.id -} diff --git a/contrib/config/terraform/aws/ha/aws/launch_template/variables.tf b/contrib/config/terraform/aws/ha/aws/launch_template/variables.tf deleted file mode 100644 index 2c84289d56d..00000000000 --- a/contrib/config/terraform/aws/ha/aws/launch_template/variables.tf +++ /dev/null @@ -1,44 +0,0 @@ -variable "deployment_name" { - type = string - description = "Name to associate with the launch template configuration" -} - -variable "disk_size" { - type = string - description = "Disk size to associate with the instance running through the launch template." -} - -variable "disk_iops" { - type = number - description = "IOPS limit for the disk associated with the instance." -} - -variable "instance_type" { - type = string - description = "Type of instance to launch from the launch template." -} - -variable "ami_id" { - type = string - description = "AMI to launch the instance with." -} - -variable "key_pair_name" { - type = string - description = "AWS key-pair name to associate with the launched instance for SSH access" -} - -variable "vpc_sg_id" { - type = string - description = "AWS VPC security groups to associate with the instance." -} - -variable "subnet_id" { - type = string - description = "Subnet ID for the launch template" -} - -variable "user_script" { - type = string - description = "User provided script to run during the instance startup." -} diff --git a/contrib/config/terraform/aws/ha/aws/load_balancer/lb_listner/main.tf b/contrib/config/terraform/aws/ha/aws/load_balancer/lb_listner/main.tf deleted file mode 100644 index 5679905087b..00000000000 --- a/contrib/config/terraform/aws/ha/aws/load_balancer/lb_listner/main.tf +++ /dev/null @@ -1,10 +0,0 @@ -resource "aws_lb_listener" "dgraph" { - load_balancer_arn = var.load_balancer_arn - port = var.port - protocol = var.protocol - - default_action { - type = "forward" - target_group_arn = var.target_group_arn - } -} diff --git a/contrib/config/terraform/aws/ha/aws/load_balancer/lb_listner/variables.tf b/contrib/config/terraform/aws/ha/aws/load_balancer/lb_listner/variables.tf deleted file mode 100644 index e190e9c7f42..00000000000 --- a/contrib/config/terraform/aws/ha/aws/load_balancer/lb_listner/variables.tf +++ /dev/null @@ -1,20 +0,0 @@ -variable "load_balancer_arn" { - type = string - description = "ARN of the load balancer to attach the listner to." -} - -variable "target_group_arn" { - type = string - description = "ARN of the target group to forward the request on for the listner rule." -} - -variable "port" { - type = string - description = "Port the listner listen to in the load balancer." -} - -variable "protocol" { - type = string - description = "Protocol for the listner to respond to, defaults to HTTP." - default = "HTTP" -} diff --git a/contrib/config/terraform/aws/ha/aws/load_balancer/main.tf b/contrib/config/terraform/aws/ha/aws/load_balancer/main.tf deleted file mode 100644 index 60b2647cf70..00000000000 --- a/contrib/config/terraform/aws/ha/aws/load_balancer/main.tf +++ /dev/null @@ -1,22 +0,0 @@ -resource "aws_lb" "dgraph" { - name = var.deployment_name - - internal = false - load_balancer_type = "application" - - security_groups = [var.sg_id] - subnets = [var.subnet_id, var.secondary_subnet_id] - - enable_deletion_protection = false - enable_http2 = true - - # access_logs { - # bucket = "${aws_s3_bucket.lb_logs.bucket}" - # prefix = "test-lb" - # enabled = true - # } - - tags = { - Name = var.deployment_name - } -} diff --git a/contrib/config/terraform/aws/ha/aws/load_balancer/outputs.tf b/contrib/config/terraform/aws/ha/aws/load_balancer/outputs.tf deleted file mode 100644 index 0ef29a65f41..00000000000 --- a/contrib/config/terraform/aws/ha/aws/load_balancer/outputs.tf +++ /dev/null @@ -1,14 +0,0 @@ -output "dns_name" { - description = "DNS name of the load balancer created." - value = aws_lb.dgraph.dns_name -} - -output "id" { - description = "ID of the created load balancer resource." - value = aws_lb.dgraph.id -} - -output "arn" { - description = "ARN of the created load balancer resource." - value = aws_lb.dgraph.arn -} diff --git a/contrib/config/terraform/aws/ha/aws/load_balancer/variables.tf b/contrib/config/terraform/aws/ha/aws/load_balancer/variables.tf deleted file mode 100644 index 4b0e4c9b557..00000000000 --- a/contrib/config/terraform/aws/ha/aws/load_balancer/variables.tf +++ /dev/null @@ -1,19 +0,0 @@ -variable "deployment_name" { - type = string - description = "Name to associate with the created load balancer resource." -} - -variable "sg_id" { - type = string - description = "Security group to associate with the load balancer." -} - -variable "subnet_id" { - type = string - description = "Subnet ID for the load balancer." -} - -variable "secondary_subnet_id" { - type = string - description = "Secondary subnet ID for the load balancer, this must be in the different zone than subnet_id." -} diff --git a/contrib/config/terraform/aws/ha/aws/target_group/main.tf b/contrib/config/terraform/aws/ha/aws/target_group/main.tf deleted file mode 100644 index 1b0d9054ff3..00000000000 --- a/contrib/config/terraform/aws/ha/aws/target_group/main.tf +++ /dev/null @@ -1,17 +0,0 @@ -resource "aws_lb_target_group" "dgraph" { - name = var.deployment_name - port = var.port - protocol = var.protocol - vpc_id = var.vpc_id - - health_check { - enabled = true - interval = var.health_check_interval - path = var.health_check_path - port = var.port - timeout = var.timeout - - healthy_threshold = 2 - unhealthy_threshold = 3 - } -} diff --git a/contrib/config/terraform/aws/ha/aws/target_group/outputs.tf b/contrib/config/terraform/aws/ha/aws/target_group/outputs.tf deleted file mode 100644 index 35ea13b98dd..00000000000 --- a/contrib/config/terraform/aws/ha/aws/target_group/outputs.tf +++ /dev/null @@ -1,9 +0,0 @@ -output "arn" { - description = "ARN of the target group created." - value = aws_lb_target_group.dgraph.arn -} - -output "id" { - description = "ID of the target group created." - value = aws_lb_target_group.dgraph.id -} diff --git a/contrib/config/terraform/aws/ha/aws/target_group/variables.tf b/contrib/config/terraform/aws/ha/aws/target_group/variables.tf deleted file mode 100644 index 54c0a7e0952..00000000000 --- a/contrib/config/terraform/aws/ha/aws/target_group/variables.tf +++ /dev/null @@ -1,38 +0,0 @@ -variable "deployment_name" { - type = string - description = "Name to associate with the created load balancer target group resource." -} - -variable "port" { - type = number - description = "Port for the load balancer target group." -} - -variable "vpc_id" { - type = string - description = "VPC ID of the dgraph cluster we created." -} - -variable "health_check_interval" { - type = number - description = "Periodic health check interval time, defaults to 10." - default = 10 -} - -variable "timeout" { - type = number - description = "Timeout for the health check corresponding to target group, defaults to 10." - default = 5 -} - -variable "health_check_path" { - type = string - description = "Path for health check of the target group, defaults to /health." - default = "/health" -} - -variable "protocol" { - type = string - description = "Protocol to use for health check, defaults to HTTP." - default = "HTTP" -} diff --git a/contrib/config/terraform/aws/ha/aws/vpc/data.tf b/contrib/config/terraform/aws/ha/aws/vpc/data.tf deleted file mode 100644 index 7865bc8ad32..00000000000 --- a/contrib/config/terraform/aws/ha/aws/vpc/data.tf +++ /dev/null @@ -1,3 +0,0 @@ -data "aws_availability_zones" "az" { - state = "available" -} diff --git a/contrib/config/terraform/aws/ha/aws/vpc/main.tf b/contrib/config/terraform/aws/ha/aws/vpc/main.tf deleted file mode 100644 index 7d1d912039c..00000000000 --- a/contrib/config/terraform/aws/ha/aws/vpc/main.tf +++ /dev/null @@ -1,231 +0,0 @@ -# The architecture of the VPC is as follows: -# * Primary Subnet - Private subnet where everything is deployed. -# * Secondary Subnet - Public subnet from where we route things to the internet. -# -# In the primary subnet we deploy all the application that dgraph is concerned with, since -# this subnet is private these instances cannot be accessed outside the VPC. -# -# Primary subnet contains a route table which contains an entry to route all the traffic -# destining to 0.0.0.0/0 via the nat gateway we have configured. This is so as to allow -# access from inside the the instance to the outside world. -# -# The nat instance gateway and the internet gateway are then deployed in the other subnet -# which is public. The route table entry of this subnet routes all the traffic destined -# to 0.0.0.0/0 via internet gateway so that it is accessible. -# -# A typical outbound connection from dgraph instance to google.com looks something like this -# Instance --> Route --> NAT Instance(in public subnet) --> Route --> Internet Gateway(in public subnet) -resource "aws_vpc" "dgraph" { - cidr_block = var.cidr_block - enable_dns_support = true - instance_tenancy = "dedicated" - - # For enabling assignment of private dns addresses within AWS. - enable_dns_hostnames = true - - tags = { - Name = var.name - } -} - -resource "aws_eip" "dgraph_nat" { - vpc = true -} - -resource "aws_internet_gateway" "dgraph_gw" { - vpc_id = aws_vpc.dgraph.id - - tags = { - Name = var.name - } -} - -resource "aws_nat_gateway" "dgraph_gw" { - allocation_id = aws_eip.dgraph_nat.id - subnet_id = aws_subnet.dgraph_secondary.id - - tags = { - Name = var.name - } -} - -resource "aws_route_table" "dgraph_igw" { - vpc_id = aws_vpc.dgraph.id - - route { - cidr_block = "0.0.0.0/0" - gateway_id = aws_internet_gateway.dgraph_gw.id - } - - tags = { - Name = var.name - } -} - -resource "aws_route_table_association" "internet_gw" { - subnet_id = aws_subnet.dgraph_secondary.id - route_table_id = aws_route_table.dgraph_igw.id -} - -resource "aws_main_route_table_association" "dgraph" { - vpc_id = aws_vpc.dgraph.id - route_table_id = aws_route_table.dgraph_igw.id -} - -resource "aws_route_table" "dgraph_ngw" { - vpc_id = aws_vpc.dgraph.id - - route { - cidr_block = "0.0.0.0/0" - nat_gateway_id = aws_nat_gateway.dgraph_gw.id - } - - tags = { - Name = var.name - } -} - -resource "aws_route_table_association" "nat_gw" { - subnet_id = aws_subnet.dgraph.id - route_table_id = aws_route_table.dgraph_ngw.id -} - -resource "aws_subnet" "dgraph" { - vpc_id = aws_vpc.dgraph.id - cidr_block = var.subnet_cidr_block - - availability_zone_id = data.aws_availability_zones.az.zone_ids[0] - - tags = { - Name = var.name - } -} - -resource "aws_subnet" "dgraph_secondary" { - vpc_id = aws_vpc.dgraph.id - cidr_block = var.secondary_subnet_cidr_block - - availability_zone_id = data.aws_availability_zones.az.zone_ids[1] - - tags = { - Name = var.name - Type = "secondary-subnet" - } -} - -resource "aws_security_group" "dgraph_client" { - name = "dgraph-cluster-client" - description = "Security group that can be used by the client to connect to the dgraph cluster alpha and ratel instance using ALB." - vpc_id = aws_vpc.dgraph.id - - egress { - from_port = 0 - to_port = 0 - protocol = "-1" - cidr_blocks = [var.cidr_block] - } -} - -resource "aws_security_group" "dgraph_alb" { - name = "dgraph-alb" - description = "Security group associated with the dgraph loadbalancer sitting in front of alpha and ratel instances." - vpc_id = aws_vpc.dgraph.id - - ingress { - from_port = 8000 - to_port = 8000 - protocol = "tcp" - - security_groups = [aws_security_group.dgraph_client.id] - } - - ingress { - from_port = 8080 - to_port = 8080 - protocol = "tcp" - - security_groups = [aws_security_group.dgraph_client.id] - } - - # Egress to the alpha and ratel instances port only. - egress { - from_port = 8080 - to_port = 8080 - protocol = "tcp" - - cidr_blocks = [var.subnet_cidr_block] - } - - egress { - from_port = 8000 - to_port = 8000 - protocol = "tcp" - - cidr_blocks = [var.subnet_cidr_block] - } -} - -resource "aws_security_group" "dgraph_services" { - name = "dgraph-services" - description = "Allow all traffic associated with this security group." - vpc_id = aws_vpc.dgraph.id - - ingress { - from_port = 5080 - to_port = 5080 - protocol = "tcp" - cidr_blocks = [var.subnet_cidr_block] - description = "For zero internal GRPC communication." - } - - ingress { - from_port = 6080 - to_port = 6080 - protocol = "tcp" - cidr_blocks = [var.cidr_block] - description = "For zero external GRPC communication." - } - - ingress { - from_port = 7080 - to_port = 7080 - protocol = "tcp" - cidr_blocks = [var.subnet_cidr_block] - description = "For alpha internal GRPC communication." - } - - ingress { - from_port = 8000 - to_port = 8000 - protocol = "tcp" - - security_groups = [aws_security_group.dgraph_alb.id] - description = "For external ratel communication, this is opened to everyone to try." - } - - ingress { - from_port = 8080 - to_port = 8080 - protocol = "tcp" - - security_groups = [aws_security_group.dgraph_alb.id] - description = "For alpha external HTTP communication." - } - - ingress { - from_port = 9080 - to_port = 9080 - protocol = "tcp" - cidr_blocks = [var.cidr_block] - description = "For alpha external GRPC communication." - } - - # Allow egress to everywhere from within any instance in the cluster, this - # is useful for bootstrap of the instance. - egress { - from_port = 0 - to_port = 0 - protocol = "-1" - cidr_blocks = ["0.0.0.0/0"] - } -} diff --git a/contrib/config/terraform/aws/ha/aws/vpc/outputs.tf b/contrib/config/terraform/aws/ha/aws/vpc/outputs.tf deleted file mode 100644 index 55068e0412a..00000000000 --- a/contrib/config/terraform/aws/ha/aws/vpc/outputs.tf +++ /dev/null @@ -1,34 +0,0 @@ -output "vpc_id" { - value = aws_vpc.dgraph.id - description = "ID of the VPC created using the module" -} - -output "subnet_id" { - value = aws_subnet.dgraph.id - description = "ID of the subnet created within the VPC for dgraph" -} - -output "secondary_subnet_id" { - value = aws_subnet.dgraph_secondary.id - description = "ID of the secondary subnet created within the VPC for dgraph" -} - -output "default_sg_id" { - value = aws_vpc.dgraph.default_security_group_id - description = "Default security group ID created with the VPC." -} - -output "sg_id" { - value = aws_security_group.dgraph_services.id - description = "Security group ID for the auxiliary security group created for dgraph." -} - -output "alb_sg_id" { - value = aws_security_group.dgraph_alb.id - description = "Security group ID of the sg associated with the load balancer." -} - -output "client_sg_id" { - value = aws_security_group.dgraph_client.id - description = "Security group that can be used by the client to connect to the dgraph cluster alpha and ratel instance using ALB." -} diff --git a/contrib/config/terraform/aws/ha/aws/vpc/variables.tf b/contrib/config/terraform/aws/ha/aws/vpc/variables.tf deleted file mode 100644 index 8fe74b9ceee..00000000000 --- a/contrib/config/terraform/aws/ha/aws/vpc/variables.tf +++ /dev/null @@ -1,19 +0,0 @@ -variable "name" { - type = string - description = "Name tag to apply to AWS VPC we are creating for dgraph" -} - -variable "cidr_block" { - type = string - description = "CIDR block to associate with the VPC." -} - -variable "subnet_cidr_block" { - type = string - description = "CIDR block for the subnet." -} - -variable "secondary_subnet_cidr_block" { - type = string - description = "Secondary CIDR block for the subnet to create within the VPC, this subnet will be used for dgraph deployment." -} diff --git a/contrib/config/terraform/aws/ha/dgraph/alpha/data.tf b/contrib/config/terraform/aws/ha/dgraph/alpha/data.tf deleted file mode 100644 index 8da7a33421b..00000000000 --- a/contrib/config/terraform/aws/ha/dgraph/alpha/data.tf +++ /dev/null @@ -1,17 +0,0 @@ -data "template_file" "service_template" { - template = file("${path.module}/../../templates/dgraph-alpha.service.tmpl") - - vars = { - healthy_zero_ip = var.healthy_zero_ip - } -} - -data "template_file" "setup_template" { - template = file("${path.module}/../../templates/setup-systemd-service.sh.tmpl") - - vars = { - systemd_service = data.template_file.service_template.rendered - service_name = "dgraph-alpha" - dgraph_version = var.dgraph_version - } -} diff --git a/contrib/config/terraform/aws/ha/dgraph/alpha/main.tf b/contrib/config/terraform/aws/ha/dgraph/alpha/main.tf deleted file mode 100644 index f73873fd3ef..00000000000 --- a/contrib/config/terraform/aws/ha/dgraph/alpha/main.tf +++ /dev/null @@ -1,48 +0,0 @@ -locals { - deployment_name = "${var.name}-alpha" - alpha_port = 8080 -} - -module "aws_lt" { - source = "./../../aws/launch_template" - - deployment_name = local.deployment_name - disk_size = var.disk_size - disk_iops = var.disk_iops - instance_type = var.instance_type - - ami_id = var.ami_id - vpc_sg_id = var.sg_id - subnet_id = var.subnet_id - - key_pair_name = var.key_pair_name - user_script = base64encode(data.template_file.setup_template.rendered) -} - -module "aws_tg" { - source = "./../../aws/target_group" - - vpc_id = var.vpc_id - port = local.alpha_port - - deployment_name = local.deployment_name -} - -module "aws_lb_listner" { - source = "./../../aws/load_balancer/lb_listner" - - load_balancer_arn = var.lb_arn - target_group_arn = module.aws_tg.arn - - port = local.alpha_port -} - -module "aws_asg" { - source = "./../../aws/auto_scaling_group" - - deployment_name = local.deployment_name - launch_template_id = module.aws_lt.id - subnet_id = var.subnet_id - instance_count = var.instance_count - target_group_arn = module.aws_tg.arn -} diff --git a/contrib/config/terraform/aws/ha/dgraph/alpha/outputs.tf b/contrib/config/terraform/aws/ha/dgraph/alpha/outputs.tf deleted file mode 100644 index 3c2d6f7be2e..00000000000 --- a/contrib/config/terraform/aws/ha/dgraph/alpha/outputs.tf +++ /dev/null @@ -1,18 +0,0 @@ -output "alpha_completed" { - value = true -} - -output "target_group_id" { - description = "ID of the target group associated with alpha autoscaling group." - value = module.aws_tg.id -} - -output "auto_scaling_group_id" { - description = "ID of the autoscaling group created for dgrpah alpha nodes." - value = module.aws_asg.id -} - -output "alpha_port" { - description = "HTTP port for dgraph alpha component." - value = local.alpha_port -} diff --git a/contrib/config/terraform/aws/ha/dgraph/alpha/variables.tf b/contrib/config/terraform/aws/ha/dgraph/alpha/variables.tf deleted file mode 100644 index d1f3a653491..00000000000 --- a/contrib/config/terraform/aws/ha/dgraph/alpha/variables.tf +++ /dev/null @@ -1,64 +0,0 @@ -variable "name" { - type = string - description = "Name of the dgraph deployment" -} - -variable "instance_count" { - type = number - description = "Number of dgraph alphas to run in the cluster, defaults to 3." -} - -variable "instance_type" { - type = string - description = "EC2 Instance type for dgraph alpha component." -} - -variable "disk_size" { - type = string - description = "Disk size for dgraph alpha node." -} - -variable "disk_iops" { - type = number - description = "IOPS limit for the disk associated with the instance." -} - -variable "vpc_id" { - type = string - description = "VPC ID of the dgraph cluster we created." -} - -variable "lb_arn" { - type = string - description = "Resource ARN of the dgraph load balancer." -} - -variable "sg_id" { - type = string - description = "Security group ID for the created dgraph VPC." -} - -variable "subnet_id" { - type = string - description = "Subnet ID within VPC for dgraph deployment." -} - -variable "ami_id" { - type = string - description = "AMI to use for the instances" -} - -variable "key_pair_name" { - type = string - description = "Key Pair name to associate with the instances." -} - -variable "healthy_zero_ip" { - type = string - description = "IP address of any healthy zero to which dgraph alpha can talk to." -} - -variable "dgraph_version" { - type = string - description = "Dgraph version for installation." -} diff --git a/contrib/config/terraform/aws/ha/dgraph/main.tf b/contrib/config/terraform/aws/ha/dgraph/main.tf deleted file mode 100644 index 4183ca3d7ef..00000000000 --- a/contrib/config/terraform/aws/ha/dgraph/main.tf +++ /dev/null @@ -1,105 +0,0 @@ -locals { - deployment_name = "${var.name}-dgraph" -} - -resource "aws_key_pair" "dgraph_key" { - key_name = var.key_pair_name - public_key = var.public_key -} - -module "aws_vpc" { - source = "./../aws/vpc" - - name = local.deployment_name - cidr_block = var.cidr_block - subnet_cidr_block = var.subnet_cidr_block - - secondary_subnet_cidr_block = var.secondary_subnet_cidr_block -} - -module "aws_lb" { - source = "./../aws/load_balancer" - - deployment_name = local.deployment_name - subnet_id = module.aws_vpc.subnet_id - secondary_subnet_id = module.aws_vpc.secondary_subnet_id - sg_id = module.aws_vpc.alb_sg_id -} - -module "zero" { - source = "./zero" - - ami_id = var.ami_id - - name = local.deployment_name - vpc_id = module.aws_vpc.vpc_id - sg_id = module.aws_vpc.sg_id - instance_count = var.zero_count - - subnet_id = module.aws_vpc.subnet_id - lb_arn = module.aws_lb.arn - - instance_type = var.zero_instance_type - disk_size = var.zero_disk_size - disk_iops = var.disk_iops - - key_pair_name = var.key_pair_name - subnet_cidr_block = var.subnet_cidr_block - - dgraph_version = var.dgraph_version -} - -module "alpha" { - source = "./alpha" - - ami_id = var.ami_id - - name = local.deployment_name - vpc_id = module.aws_vpc.vpc_id - sg_id = module.aws_vpc.sg_id - instance_count = var.alpha_count - - subnet_id = module.aws_vpc.subnet_id - lb_arn = module.aws_lb.arn - - instance_type = var.alpha_instance_type - disk_size = var.alpha_disk_size - disk_iops = var.disk_iops - - key_pair_name = var.key_pair_name - healthy_zero_ip = module.zero.healthy_zero_ip - - dgraph_version = var.dgraph_version - - # We first initialize zeros and then alphas because for starting alphas - # we need the address of a healthy zero. - # Terraform 0.12 does not support depends_on, use this later on. - # depends_on = [module.zero] -} - -module "ratel" { - source = "./ratel" - - ami_id = var.ami_id - - name = local.deployment_name - vpc_id = module.aws_vpc.vpc_id - sg_id = module.aws_vpc.sg_id - - subnet_id = module.aws_vpc.subnet_id - lb_arn = module.aws_lb.arn - - instance_type = var.ratel_instance_type - disk_size = var.ratel_disk_size - disk_iops = var.disk_iops - - key_pair_name = var.key_pair_name - alpha_completed = module.alpha.alpha_completed - subnet_cidr_block = var.subnet_cidr_block - - dgraph_version = var.dgraph_version - - # Temporary to create dependency among alpha and ratel, later use depends_on - # for explicit dependency specification. - # depends_on = [module.alpha] -} diff --git a/contrib/config/terraform/aws/ha/dgraph/outputs.tf b/contrib/config/terraform/aws/ha/dgraph/outputs.tf deleted file mode 100644 index 7c4a935ba7e..00000000000 --- a/contrib/config/terraform/aws/ha/dgraph/outputs.tf +++ /dev/null @@ -1,54 +0,0 @@ -output "vpc_id" { - description = "ID of the VPC created for dgraph cluster." - value = module.aws_vpc.vpc_id -} - -output "client_sg_id" { - description = "Security group that can be used with the client." - value = module.aws_vpc.client_sg_id -} - -output "lb_dns_name" { - description = "DNS associated with the application load balancer created for dgraph." - value = module.aws_lb.dns_name -} - -output "healthy_zero_ip" { - description = "IP address of a healthy zero(initial zero) created." - value = module.zero.healthy_zero_ip -} - -output "zero_private_ips" { - description = "IP addresses of the created dgraph zero instances." - value = module.zero.private_ips -} - -output "ratel_target_group_id" { - description = "Target group associated with the created ratel instances." - value = module.ratel.target_group_id -} - -output "ratel_private_ips" { - description = "IP addresses of the created dgraph ratel instances." - value = module.ratel.private_ips -} - -output "ratel_port" { - description = "Port to which ratel UI server is listening." - value = module.ratel.ratel_port -} - -output "alpha_target_group_id" { - description = "ID of the target group associated with alpha autoscaling group." - value = module.alpha.target_group_id -} - -output "alpha_auto_scaling_group_id" { - description = "ID of the autoscaling group created for dgrpah alpha nodes." - value = module.alpha.auto_scaling_group_id -} - -output "alpha_port" { - description = "HTTP port for dgraph alpha component." - value = module.alpha.alpha_port -} diff --git a/contrib/config/terraform/aws/ha/dgraph/ratel/data.tf b/contrib/config/terraform/aws/ha/dgraph/ratel/data.tf deleted file mode 100644 index 3c5248c7533..00000000000 --- a/contrib/config/terraform/aws/ha/dgraph/ratel/data.tf +++ /dev/null @@ -1,22 +0,0 @@ -locals { - ratel_service = file("${path.module}/../../templates/dgraph-ratel.service.tmpl") -} - -data "template_file" "setup_template" { - count = var.instance_count - template = file("${path.module}/../../templates/setup-systemd-service.sh.tmpl") - - vars = { - systemd_service = local.ratel_service - service_name = "dgraph-ratel" - dgraph_version = var.dgraph_version - } -} - -data "null_data_source" "ips" { - count = var.instance_count - - inputs = { - private = cidrhost(var.subnet_cidr_block, count.index + 5) - } -} diff --git a/contrib/config/terraform/aws/ha/dgraph/ratel/main.tf b/contrib/config/terraform/aws/ha/dgraph/ratel/main.tf deleted file mode 100644 index 368f345b16a..00000000000 --- a/contrib/config/terraform/aws/ha/dgraph/ratel/main.tf +++ /dev/null @@ -1,49 +0,0 @@ -locals { - deployment_name = "${var.name}-ratel" - ratel_port = 8000 -} - -module "aws_tg" { - source = "./../../aws/target_group" - - vpc_id = var.vpc_id - port = local.ratel_port - - deployment_name = local.deployment_name - health_check_path = "/" -} - -module "aws_lb_listner" { - source = "./../../aws/load_balancer/lb_listner" - - load_balancer_arn = var.lb_arn - target_group_arn = module.aws_tg.arn - - port = local.ratel_port -} - -module "aws_instance" { - source = "./../../aws/instance" - - deployment_name = local.deployment_name - - disk_size = var.disk_size - disk_iops = var.disk_iops - instance_type = var.instance_type - ami_id = var.ami_id - key_pair_name = var.key_pair_name - sg_id = var.sg_id - subnet_id = var.subnet_id - private_ips = data.null_data_source.ips - - user_scripts = data.template_file.setup_template - instance_count = var.instance_count -} - -resource "aws_lb_target_group_attachment" "dgraph_ratel" { - count = var.instance_count - - target_group_arn = module.aws_tg.arn - target_id = module.aws_instance.instance_ids[count.index] - port = local.ratel_port -} diff --git a/contrib/config/terraform/aws/ha/dgraph/ratel/outputs.tf b/contrib/config/terraform/aws/ha/dgraph/ratel/outputs.tf deleted file mode 100644 index ed185c6a04c..00000000000 --- a/contrib/config/terraform/aws/ha/dgraph/ratel/outputs.tf +++ /dev/null @@ -1,17 +0,0 @@ -output "target_group_id" { - description = "Target group associated with the created ratel instances." - value = module.aws_tg.id -} - -output "private_ips" { - description = "IP addresses of the created dgraph ratel instances." - value = [ - for ip_obj in data.null_data_source.ips: - ip_obj.outputs.private - ] -} - -output "ratel_port" { - description = "Port to which ratel UI server is listening." - value = local.ratel_port -} diff --git a/contrib/config/terraform/aws/ha/dgraph/ratel/variables.tf b/contrib/config/terraform/aws/ha/dgraph/ratel/variables.tf deleted file mode 100644 index 74d46516644..00000000000 --- a/contrib/config/terraform/aws/ha/dgraph/ratel/variables.tf +++ /dev/null @@ -1,65 +0,0 @@ -variable "name" { - type = string - description = "Name of the dgraph deployment" -} - -variable "instance_type" { - type = string - description = "EC2 Instance type for dgraph ratel component." -} - -variable "instance_count" { - type = number - description = "Instance count for ratel." - default = 1 -} - -variable "disk_size" { - type = string - description = "Disk size for dgraph ratel node." -} - -variable "disk_iops" { - type = number - description = "IOPS limit for the disk associated with the instance." -} - -variable "vpc_id" { - type = string - description = "VPC ID of the dgraph cluster we created." -} - -variable "sg_id" { - type = string - description = "Security group ID for the created dgraph VPC." -} - -variable "subnet_id" { - type = string - description = "Subnet ID within VPC for dgraph deployment." -} - -variable "lb_arn" { - type = string - description = "Resource ARN of the dgraph load balancer." -} - -variable "subnet_cidr_block" { - type = string - description = "CIDR block corresponding to the dgraph subnet." -} - -variable "ami_id" { - type = string - description = "AMI to use for the instances" -} - -variable "key_pair_name" { - type = string - description = "Key Pair name to associate with the instances." -} - -variable "dgraph_version" { - type = string - description = "Dgraph version for installation." -} \ No newline at end of file diff --git a/contrib/config/terraform/aws/ha/dgraph/variables.tf b/contrib/config/terraform/aws/ha/dgraph/variables.tf deleted file mode 100644 index 774628678f9..00000000000 --- a/contrib/config/terraform/aws/ha/dgraph/variables.tf +++ /dev/null @@ -1,84 +0,0 @@ -variable "name" { - type = string - description = "Name of the dgraph deployment" -} - -variable "alpha_count" { - type = number - description = "Number of dgraph alphas to run in the cluster, defaults to 3." -} - -variable "zero_count" { - type = number - description = "Number of dgraph zeros to run in the cluster, defaults to 3." -} - -variable "alpha_instance_type" { - type = string - description = "EC2 Instance type for dgraph alpha component." -} - -variable "zero_instance_type" { - type = string - description = "EC2 instance type for dgraph zero component." -} - -variable "ratel_instance_type" { - type = string - description = "EC2 instance type for dgraph ratel component." -} - -variable "alpha_disk_size" { - type = string - description = "Disk size for dgraph alpha node." -} - -variable "zero_disk_size" { - type = string - description = "Disk size for dgraph zero node." -} - -variable "ratel_disk_size" { - type = string - description = "Disk size for dgraph ratel node." -} - -variable "disk_iops" { - type = number - description = "IOPS limit for the disk associated with the instance." -} - -variable "cidr_block" { - type = string - description = "CIDR block to assign to the VPC running the dgraph cluster, only used if a new VPC is created." -} - -variable "subnet_cidr_block" { - type = string - description = "CIDR block to create the subnet with in the VPC." -} - -variable "secondary_subnet_cidr_block" { - type = string - description = "Secondary CIDR block for the subnet to create within the VPC, this subnet will be used for dgraph deployment." -} - -variable "ami_id" { - type = string - description = "AMI to use for the instances" -} - -variable "key_pair_name" { - type = string - description = "Key Pair to create for the instances." -} - -variable "public_key" { - type = string - description = "Public key corresponding to the key pair." -} - -variable "dgraph_version" { - type = string - description = "Dgraph version for installation." -} diff --git a/contrib/config/terraform/aws/ha/dgraph/zero/data.tf b/contrib/config/terraform/aws/ha/dgraph/zero/data.tf deleted file mode 100644 index a1a5f884c1e..00000000000 --- a/contrib/config/terraform/aws/ha/dgraph/zero/data.tf +++ /dev/null @@ -1,32 +0,0 @@ -data "template_file" "service_template" { - count = var.instance_count - - template = count.index == 0 ? file("${path.module}/../../templates/dgraph-zero-init.service.tmpl") : file("${path.module}/../../templates/dgraph-zero.service.tmpl") - - vars = { - private_ip = cidrhost(var.subnet_cidr_block, count.index + 10) - healthy_zero_ip = local.healthy_zero_ip - index = count.index + 1 - replicas_count = local.replicas_count - } -} - -data "template_file" "setup_template" { - count = var.instance_count - - template = file("${path.module}/../../templates/setup-systemd-service.sh.tmpl") - - vars = { - systemd_service = data.template_file.service_template[count.index].rendered - service_name = "dgraph-zero" - dgraph_version = var.dgraph_version - } -} - -data "null_data_source" "ips" { - count = var.instance_count - - inputs = { - private = cidrhost(var.subnet_cidr_block, count.index + 10) - } -} diff --git a/contrib/config/terraform/aws/ha/dgraph/zero/main.tf b/contrib/config/terraform/aws/ha/dgraph/zero/main.tf deleted file mode 100644 index 6631499bd4a..00000000000 --- a/contrib/config/terraform/aws/ha/dgraph/zero/main.tf +++ /dev/null @@ -1,23 +0,0 @@ -locals { - deployment_name = "${var.name}-zero" - healthy_zero_ip = cidrhost(var.subnet_cidr_block, 10) - replicas_count = var.instance_count > 3 ? 3 : var.instance_count -} - -module "aws_instance" { - source = "./../../aws/instance" - - instance_count = var.instance_count - - deployment_name = local.deployment_name - - disk_size = var.disk_size - disk_iops = var.disk_iops - instance_type = var.instance_type - ami_id = var.ami_id - key_pair_name = var.key_pair_name - sg_id = var.sg_id - subnet_id = var.subnet_id - private_ips = data.null_data_source.ips - user_scripts = data.template_file.setup_template -} diff --git a/contrib/config/terraform/aws/ha/dgraph/zero/outputs.tf b/contrib/config/terraform/aws/ha/dgraph/zero/outputs.tf deleted file mode 100644 index 9f436c38e70..00000000000 --- a/contrib/config/terraform/aws/ha/dgraph/zero/outputs.tf +++ /dev/null @@ -1,12 +0,0 @@ -output "healthy_zero_ip" { - description = "IP address of a healthy zero created by the module." - value = local.healthy_zero_ip -} - -output "private_ips" { - description = "IP addresses of the created dgraph zero instances." - value = [ - for ip_obj in data.null_data_source.ips: - ip_obj.outputs.private - ] -} diff --git a/contrib/config/terraform/aws/ha/dgraph/zero/variables.tf b/contrib/config/terraform/aws/ha/dgraph/zero/variables.tf deleted file mode 100644 index 5e9ee19fc9a..00000000000 --- a/contrib/config/terraform/aws/ha/dgraph/zero/variables.tf +++ /dev/null @@ -1,54 +0,0 @@ -variable "name" { - type = string - description = "Name of the dgraph deployment" -} - -variable "instance_count" { - type = number - description = "Number of dgraph zeros to run in the cluster." -} - -variable "instance_type" { - type = string - description = "EC2 Instance type for dgraph zero component." -} - -variable "disk_size" { - type = string - description = "Disk size for dgraph zero node." -} - -variable "disk_iops" { - type = number - description = "IOPS limit for the disk associated with the instance." -} - -variable "sg_id" { - type = string - description = "Security group ID for the created dgraph VPC." -} - -variable "subnet_id" { - type = string - description = "Subnet ID within VPC for dgraph deployment." -} - -variable "subnet_cidr_block" { - type = string - description = "CIDR block corresponding to the dgraph subnet." -} - -variable "ami_id" { - type = string - description = "AMI to use for the instances" -} - -variable "key_pair_name" { - type = string - description = "Key Pair name to associate with the instances." -} - -variable "dgraph_version" { - type = string - description = "Dgraph version for installation." -} \ No newline at end of file diff --git a/contrib/config/terraform/aws/ha/main.tf b/contrib/config/terraform/aws/ha/main.tf deleted file mode 100644 index f0df72b10bc..00000000000 --- a/contrib/config/terraform/aws/ha/main.tf +++ /dev/null @@ -1,60 +0,0 @@ -# -------------------------------------------------------------------------------- -# S3 based terraform remote state setup, uncomment and complete to use the mentio- -# ned bucket and table for remote state store. -# -------------------------------------------------------------------------------- -# terraform { -# required_version = ">= 0.12" - -# backend "s3" { -# bucket = "" -# dynamodb_table = "" -# key = "dgraph/terraform_state" -# region = "ap-southeast-1" -# encrypt = true -# } -# } - -# -------------------------------------------------------------------------------- -# Setup AWS provider -# -------------------------------------------------------------------------------- -provider "aws" { - access_key = var.aws_access_key - secret_key = var.aws_secret_key - region = var.region - profile = var.profile -} - -locals { - deployment_name = "${var.service_prefix}${var.deployment_name}" -} - -# -------------------------------------------------------------------------------- -# Setup Dgraph module to create the cluster with dgraph running -# -------------------------------------------------------------------------------- -module "dgraph" { - source = "./dgraph" - - name = local.deployment_name - ami_id = var.ami_id - - alpha_count = var.alpha_count - zero_count = var.zero_count - - alpha_instance_type = var.alpha_instance_type - zero_instance_type = var.zero_instance_type - ratel_instance_type = var.ratel_instance_type - - alpha_disk_size = var.alpha_disk_size - zero_disk_size = var.zero_disk_size - ratel_disk_size = var.ratel_disk_size - disk_iops = var.disk_iops - - key_pair_name = var.key_pair_name - public_key = var.public_key - - cidr_block = var.vpc_cidr_block - subnet_cidr_block = var.vpc_subnet_cidr_block - secondary_subnet_cidr_block = var.vpc_secondary_subnet_cidr_block - - dgraph_version = var.dgraph_version -} diff --git a/contrib/config/terraform/aws/ha/outputs.tf b/contrib/config/terraform/aws/ha/outputs.tf deleted file mode 100644 index ce145f9010e..00000000000 --- a/contrib/config/terraform/aws/ha/outputs.tf +++ /dev/null @@ -1,54 +0,0 @@ -output "lb_dns_name" { - description = "DNS associated with the application load balancer created for dgraph." - value = module.dgraph.lb_dns_name -} - -output "vpc_id" { - description = "ID of the VPC created for dgraph cluster." - value = module.dgraph.vpc_id -} - -output "client_sg_id" { - description = "Security group that can be used with the client." - value = module.dgraph.client_sg_id -} - -output "healthy_zero_ip" { - description = "IP address of a healthy zero(initial zero) created." - value = module.dgraph.healthy_zero_ip -} - -output "zero_private_ips" { - description = "IP addresses of the created dgraph zero instances." - value = module.dgraph.zero_private_ips -} - -output "ratel_target_group_id" { - description = "Target group associated with the created ratel instances." - value = module.dgraph.ratel_target_group_id -} - -output "ratel_private_ips" { - description = "IP addresses of the created dgraph ratel instances." - value = module.dgraph.ratel_private_ips -} - -output "ratel_port" { - description = "Port to which ratel UI server is listening." - value = module.dgraph.ratel_port -} - -output "alpha_target_group_id" { - description = "ID of the target group associated with alpha autoscaling group." - value = module.dgraph.alpha_target_group_id -} - -output "alpha_auto_scaling_group_id" { - description = "ID of the autoscaling group created for dgrpah alpha nodes." - value = module.dgraph.alpha_auto_scaling_group_id -} - -output "alpha_port" { - description = "HTTP port for dgraph alpha component." - value = module.dgraph.alpha_port -} diff --git a/contrib/config/terraform/aws/ha/templates/dgraph-alpha.service.tmpl b/contrib/config/terraform/aws/ha/templates/dgraph-alpha.service.tmpl deleted file mode 100644 index a0c14b83212..00000000000 --- a/contrib/config/terraform/aws/ha/templates/dgraph-alpha.service.tmpl +++ /dev/null @@ -1,15 +0,0 @@ -[Unit] -Description=dgraph.io data server -Wants=network.target -After=network.target - -[Service] -Type=simple -ExecStart=/usr/local/bin/dgraph alpha --my=$(hostname -f):7080 --zero ${healthy_zero_ip}:5080 -p /var/run/dgraph/p -w /var/run/dgraph/w -StandardOutput=journal -StandardError=journal -User=dgraph -Group=dgraph - -[Install] -WantedBy=multi-user.target diff --git a/contrib/config/terraform/aws/ha/templates/dgraph-ratel.service.tmpl b/contrib/config/terraform/aws/ha/templates/dgraph-ratel.service.tmpl deleted file mode 100644 index fdfddcd792b..00000000000 --- a/contrib/config/terraform/aws/ha/templates/dgraph-ratel.service.tmpl +++ /dev/null @@ -1,15 +0,0 @@ -[Unit] -Description=dgraph.io UI server -Wants=network.target -After=network.target - -[Service] -Type=simple -ExecStart=/usr/local/bin/dgraph-ratel -StandardOutput=journal -StandardError=journal -User=dgraph -Group=dgraph - -[Install] -WantedBy=multi-user.target diff --git a/contrib/config/terraform/aws/ha/templates/dgraph-zero-init.service.tmpl b/contrib/config/terraform/aws/ha/templates/dgraph-zero-init.service.tmpl deleted file mode 100644 index 4ba59b25e17..00000000000 --- a/contrib/config/terraform/aws/ha/templates/dgraph-zero-init.service.tmpl +++ /dev/null @@ -1,16 +0,0 @@ -[Unit] -Description=dgraph.io zero server -Wants=network.target -After=network.target - -[Service] -Type=simple -ExecStart=/usr/local/bin/dgraph zero --my=${private_ip}:5080 -w /var/run/dgraph/w --raft="idx=${index}" -StandardOutput=journal -StandardError=journal -User=dgraph -Group=dgraph - -[Install] -WantedBy=multi-user.target -RequiredBy=dgraph.service diff --git a/contrib/config/terraform/aws/ha/templates/dgraph-zero.service.tmpl b/contrib/config/terraform/aws/ha/templates/dgraph-zero.service.tmpl deleted file mode 100644 index 0c84c16ea7f..00000000000 --- a/contrib/config/terraform/aws/ha/templates/dgraph-zero.service.tmpl +++ /dev/null @@ -1,17 +0,0 @@ -[Unit] -Description=dgraph.io zero server -Wants=network.target -After=network.target - -[Service] -Type=simple -ExecStart=/usr/local/bin/dgraph zero --my=${private_ip}:5080 --peer ${healthy_zero_ip}:5080 ---raft="idx=${index}" --replicas ${replicas_count} -w /var/run/dgraph/w -StandardOutput=journal -StandardError=journal -User=dgraph -Group=dgraph - -[Install] -WantedBy=multi-user.target -RequiredBy=dgraph.service diff --git a/contrib/config/terraform/aws/ha/templates/setup-systemd-service.sh.tmpl b/contrib/config/terraform/aws/ha/templates/setup-systemd-service.sh.tmpl deleted file mode 100644 index a94c2b7efc1..00000000000 --- a/contrib/config/terraform/aws/ha/templates/setup-systemd-service.sh.tmpl +++ /dev/null @@ -1,23 +0,0 @@ -#!/bin/bash - -set -euxo pipefail - -# Currently we are downloading dgraph binary manually, later we can create an AMI with dgraph pre-installed -# and maintain that on AWS. -wget https://github.com/hypermodeinc/dgraph/releases/download/v${dgraph_version}/dgraph-linux-amd64.tar.gz -tar -C /usr/local/bin -xzf dgraph-linux-amd64.tar.gz - -groupadd --system dgraph -useradd --system -d /var/run/dgraph -s /bin/false -g dgraph dgraph - -mkdir -p /var/log/dgraph/ -mkdir -p /var/run/dgraph/ - -chown -R dgraph:dgraph /var/run/dgraph -chown -R dgraph:dgraph /var/log/dgraph - -echo "${systemd_service}" > /etc/systemd/system/${service_name}.service -chmod +x /etc/systemd/system/dgraph* - -systemctl daemon-reload -systemctl enable --now ${service_name} diff --git a/contrib/config/terraform/aws/ha/terraform.tfvars.example b/contrib/config/terraform/aws/ha/terraform.tfvars.example deleted file mode 100644 index 32cc8d4f31b..00000000000 --- a/contrib/config/terraform/aws/ha/terraform.tfvars.example +++ /dev/null @@ -1,5 +0,0 @@ -aws_access_key = "XXXXXXXXXXXXXXX" -aws_secret_key = "XXXXXXXXXXXXXXX" - -public_key = "ssh-rsa AAAXXXXXX" -deployment_name = "dgraph-test" diff --git a/contrib/config/terraform/aws/ha/variables.tf b/contrib/config/terraform/aws/ha/variables.tf deleted file mode 100644 index 86dba19260a..00000000000 --- a/contrib/config/terraform/aws/ha/variables.tf +++ /dev/null @@ -1,126 +0,0 @@ -variable "region" { - type = string - default = "us-east-2" - description = "The region to deploy the EC2 instance in." -} - -variable "profile" { - type = string - default = "terraform" -} - -variable "aws_access_key" { - type = string - description = "Access key for the AWS account to create the dgraph deployment in." -} - -variable "aws_secret_key" { - type = string - description = "Secret key for the AWS account." -} - -variable "deployment_name" { - type = string - description = "Name of the deployment for dgraph, this is used in various places to tag the created resources." -} - -variable "alpha_count" { - type = number - description = "Number of dgraph alphas to run in the cluster, defaults to 3." - default = 3 -} - -variable "zero_count" { - type = number - description = "Number of dgraph zeros to run in the cluster, defaults to 3." - default = 3 -} - -variable "alpha_instance_type" { - type = string - description = "EC2 Instance type for dgraph alpha component." - default = "m5a.large" -} - -variable "zero_instance_type" { - type = string - description = "EC2 instance type for dgraph zero component." - default = "m5.large" -} - -variable "ratel_instance_type" { - type = string - description = "EC2 instance type for dgraph ratel component." - default = "m5.large" -} - -variable "alpha_disk_size" { - type = number - description = "Disk size for the alpha node." - default = 500 -} - -variable "zero_disk_size" { - type = number - description = "Disk size for dgraph zero node." - default = 250 -} - -variable "ratel_disk_size" { - type = number - description = "Disk size for the ratel node." - default = 100 -} - -variable "disk_iops" { - type = number - description = "IOPS limit for the disk associated with the instance." - default = 1000 -} - -variable "service_prefix" { - type = string - description = "Prefix to add in all the names and tags of EC2 components, defaults to empty" - default = "" -} - -variable "vpc_cidr_block" { - type = string - description = "CIDR block to assign to the VPC running the dgraph cluster, only used if a new VPC is created" - default = "10.200.0.0/16" -} - -variable "vpc_subnet_cidr_block" { - type = string - description = "CIDR block for the subnet to create within the VPC, this subnet will be used for dgraph deployment." - default = "10.200.200.0/24" -} - -variable "vpc_secondary_subnet_cidr_block" { - type = string - description = "Secondary CIDR block for the subnet to create within the VPC, this subnet will be used for dgraph deployment." - default = "10.200.201.0/24" -} - -variable "ami_id" { - type = string - description = "AMI to use for the instances" - default = "ami-0c55b159cbfafe1f0" -} - -variable "key_pair_name" { - type = string - description = "Name of the key pair to create for attaching to each instance." - default = "dgraph_ha_key" -} - -variable "public_key" { - type = string - description = "Public key corresponding to the key pair." -} - -variable "dgraph_version" { - type = string - description = "Dgraph version for installation." - default = "1.1.1" -} diff --git a/contrib/config/terraform/aws/standalone/README.md b/contrib/config/terraform/aws/standalone/README.md deleted file mode 100644 index 96b3a1d5723..00000000000 --- a/contrib/config/terraform/aws/standalone/README.md +++ /dev/null @@ -1,34 +0,0 @@ -# Deploy Dgraph on AWS using Terraform - -> **NOTE: This Terraform template creates a Dgraph database cluster with a public IP accessible to -> anyone. You can set the `assign_public_ip` variable to false to skip creating a public IP address -> and you can configure access to Dgraph yourself.** - -[Terraform](https://terraform.io/) automates the process spinning up the EC2 instance, setting up -and running Dgraph in it. This setup deploys terraform in standalone mode inside a single EC2 -instance. - -Here are the steps to follow: - -1. You must have an AWS account set up. - -2. [Download](https://terraform.io/downloads.html) and install terraform. - -3. Create a `terraform.tfvars` file similar to that of - [terraform.tfvars.example](./terraform.tfvars.example) and edit the variables inside accordingly. - You can override any variable present in [variables.tf](./variables.tf) by providing an explicit - value in `terraform.tfvars` file. - -4. Execute the following commands: - -```sh -terraform init -terraform plan -terraform apply -``` - -The output of `terraform apply` will contain the IP address assigned to your EC2 instance. -Dgraph-ratel will be available on `:8000`. Change the server URL in the dashboard to -`:8080` and start playing with dgraph. - -5. Use `terraform destroy` to delete the setup and restore the previous state. diff --git a/contrib/config/terraform/aws/standalone/data.tf b/contrib/config/terraform/aws/standalone/data.tf deleted file mode 100644 index 7634cf1c9a7..00000000000 --- a/contrib/config/terraform/aws/standalone/data.tf +++ /dev/null @@ -1,14 +0,0 @@ -# -------------------------------------------------------------------------------- -# Setup template script for dgraph in standalone mode -# -------------------------------------------------------------------------------- -data "template_file" "setup_template" { - template = "${file("${path.module}/templates/setup.tmpl")}" - - # Systemd service description for dgraph components. - vars = { - dgraph_ui_service = "${file("${path.module}/templates/dgraph-ui.service")}" - dgraph_zero_service = "${file("${path.module}/templates/dgraph-zero.service")}" - dgraph_service = "${file("${path.module}/templates/dgraph.service")}" - dgraph_version = "${var.dgraph_version}" - } -} diff --git a/contrib/config/terraform/aws/standalone/main.tf b/contrib/config/terraform/aws/standalone/main.tf deleted file mode 100644 index fde56ba3aae..00000000000 --- a/contrib/config/terraform/aws/standalone/main.tf +++ /dev/null @@ -1,92 +0,0 @@ -# -------------------------------------------------------------------------------- -# Setup AWS provider -# -------------------------------------------------------------------------------- -provider "aws" { - access_key = var.aws_access_key - secret_key = var.aws_secret_key - region = var.region - profile = var.profile -} - -# -------------------------------------------------------------------------------- -# Security group for dgraph instance in standalone mode. -# -------------------------------------------------------------------------------- -resource "aws_security_group" "dgraph_standalone" { - name = var.instance_name - - egress { - from_port = 0 - to_port = 0 - protocol = "-1" - cidr_blocks = ["0.0.0.0/0"] - } - - ingress { - from_port = var.ssh_port - to_port = var.ssh_port - protocol = "tcp" - - # To keep this setup simple, we allow incoming SSH requests from any IP. - # In real-world usage, you should only allow SSH requests from trusted servers, - # such as a bastion host or VPN server. - cidr_blocks = ["0.0.0.0/0"] - } - - - ingress { - from_port = var.dgraph_ui_port - to_port = var.dgraph_ui_port - protocol = "tcp" - cidr_blocks = ["0.0.0.0/0"] - } - - ingress { - from_port = var.dgraph_server_port - to_port = var.dgraph_server_port - protocol = "tcp" - cidr_blocks = ["0.0.0.0/0"] - } - -} - -# -------------------------------------------------------------------------------- -# Create an AWS key pair for ssh purposes. -# -------------------------------------------------------------------------------- -resource "aws_key_pair" "dgraph_standalone_key" { - key_name = var.key_pair_name - public_key = var.public_key -} - -# -------------------------------------------------------------------------------- -# Launch a dgraph standalone EC2 instance. -# -------------------------------------------------------------------------------- -resource "aws_instance" "dgraph_standalone" { - ami = var.aws_ami - associate_public_ip_address = var.assign_public_ip - - monitoring = true - disable_api_termination = false - instance_initiated_shutdown_behavior = "terminate" - - instance_type = var.instance_type - key_name = var.key_pair_name - - # We are not using security group ID here as this is a standalone mode - # which deploys dgraph in a single EC2 instance without any VPC constraints. - security_groups = [aws_security_group.dgraph_standalone.name] - - ebs_block_device { - device_name = "/dev/sda1" - volume_size = 20 - volume_type = "standard" - delete_on_termination = true - } - - # base64encoded user provided script to run at the time of instance - # initialization. - user_data_base64 = base64encode(data.template_file.setup_template.rendered) - - tags = { - Name = "dgraph-standalone" - } -} diff --git a/contrib/config/terraform/aws/standalone/output.tf b/contrib/config/terraform/aws/standalone/output.tf deleted file mode 100644 index 7539006008a..00000000000 --- a/contrib/config/terraform/aws/standalone/output.tf +++ /dev/null @@ -1,3 +0,0 @@ -output dgraph_ip { - value = aws_instance.dgraph_standalone.public_ip -} diff --git a/contrib/config/terraform/aws/standalone/templates/dgraph-ui.service b/contrib/config/terraform/aws/standalone/templates/dgraph-ui.service deleted file mode 100644 index fdfddcd792b..00000000000 --- a/contrib/config/terraform/aws/standalone/templates/dgraph-ui.service +++ /dev/null @@ -1,15 +0,0 @@ -[Unit] -Description=dgraph.io UI server -Wants=network.target -After=network.target - -[Service] -Type=simple -ExecStart=/usr/local/bin/dgraph-ratel -StandardOutput=journal -StandardError=journal -User=dgraph -Group=dgraph - -[Install] -WantedBy=multi-user.target diff --git a/contrib/config/terraform/aws/standalone/templates/dgraph-zero.service b/contrib/config/terraform/aws/standalone/templates/dgraph-zero.service deleted file mode 100644 index 2beb92f4269..00000000000 --- a/contrib/config/terraform/aws/standalone/templates/dgraph-zero.service +++ /dev/null @@ -1,16 +0,0 @@ -[Unit] -Description=dgraph.io zero server -Wants=network.target -After=network.target - -[Service] -Type=simple -ExecStart=/usr/local/bin/dgraph zero --wal /var/run/dgraph/zw -StandardOutput=journal -StandardError=journal -User=dgraph -Group=dgraph - -[Install] -WantedBy=multi-user.target -RequiredBy=dgraph.service diff --git a/contrib/config/terraform/aws/standalone/templates/dgraph.service b/contrib/config/terraform/aws/standalone/templates/dgraph.service deleted file mode 100644 index 64a2dbf459c..00000000000 --- a/contrib/config/terraform/aws/standalone/templates/dgraph.service +++ /dev/null @@ -1,16 +0,0 @@ -[Unit] -Description=dgraph.io data server -Wants=network.target -After=network.target dgraph-zero.service -Requires=dgraph-zero.service - -[Service] -Type=simple -ExecStart=/usr/local/bin/dgraph alpha -p /var/run/dgraph/p -w /var/run/dgraph/w -StandardOutput=journal -StandardError=journal -User=dgraph -Group=dgraph - -[Install] -WantedBy=multi-user.target diff --git a/contrib/config/terraform/aws/standalone/templates/setup.tmpl b/contrib/config/terraform/aws/standalone/templates/setup.tmpl deleted file mode 100644 index f170582007d..00000000000 --- a/contrib/config/terraform/aws/standalone/templates/setup.tmpl +++ /dev/null @@ -1,24 +0,0 @@ -#!/bin/bash - -set -euxo pipefail - -wget https://github.com/hypermodeinc/dgraph/releases/download/v${dgraph_version}/dgraph-linux-amd64.tar.gz -tar -C /usr/local/bin -xzf dgraph-linux-amd64.tar.gz - -groupadd --system dgraph -useradd --system -d /var/run/dgraph -s /bin/false -g dgraph dgraph - -mkdir -p /var/log/dgraph/ -mkdir -p /var/run/dgraph/ - -chown -R dgraph:dgraph /var/run/dgraph -chown -R dgraph:dgraph /var/log/dgraph - -echo "${dgraph_ui_service}" > /etc/systemd/system/dgraph-ui.service -echo "${dgraph_zero_service}" > /etc/systemd/system/dgraph-zero.service -echo "${dgraph_service}" > /etc/systemd/system/dgraph.service -chmod +x /etc/systemd/system/dgraph* - -systemctl daemon-reload -systemctl enable --now dgraph -systemctl enable --now dgraph-ui diff --git a/contrib/config/terraform/aws/standalone/terraform.tfvars.example b/contrib/config/terraform/aws/standalone/terraform.tfvars.example deleted file mode 100644 index f6897288a9f..00000000000 --- a/contrib/config/terraform/aws/standalone/terraform.tfvars.example +++ /dev/null @@ -1,5 +0,0 @@ -aws_access_key = "XXXXXXXXXXXXXXX" -aws_secret_key = "XXXXXXXXXXXXXXX" - -# Public key for SSH associated with the created instance. -public_key = "ssh-rsa AAAXXXXXX" diff --git a/contrib/config/terraform/aws/standalone/variables.tf b/contrib/config/terraform/aws/standalone/variables.tf deleted file mode 100644 index d5cd4f706f4..00000000000 --- a/contrib/config/terraform/aws/standalone/variables.tf +++ /dev/null @@ -1,79 +0,0 @@ -variable "region" { - type = string - default = "us-east-2" - description = "The region to deploy the EC2 instance in." -} - -variable "profile" { - type = string - default = "terraform" -} - -variable "aws_access_key" { - type = string - description = "Access key for the AWS account to create the dgraph deployment in." -} - -variable "aws_secret_key" { - type = string - description = "Secret key for the AWS account." -} - -variable "aws_ami" { - type = string - default = "ami-0c55b159cbfafe1f0" - description = "Type of Amazon machine image to use for the instance." -} - -variable "key_pair_name" { - type = string - default = "dgraph-standalone-key" - description = "The EC2 Key Pair to associate with the EC2 Instance for SSH access." -} - -variable "public_key" { - type = string - description = "Public SSH key to be associated with the instance." -} - -variable "ssh_port" { - type = number - default = 22 - description = "The port the EC2 Instance should listen on for SSH requests." -} - -variable "instance_type" { - type = string - default = "t2.micro" - description = "EC2 instance resource type" -} - -variable "instance_name" { - type = string - default = "dgraph-standalone" - description = "The Name tag to set for the EC2 Instance." -} - -variable "dgraph_version" { - type = string - description = "Dgraph version for installation" - default = "1.1.0" -} - -variable "dgraph_ui_port" { - type = string - description = "Port number of ratel interface" - default = "8000" -} - -variable "dgraph_server_port" { - type = string - description = "Port number of dgraph server for ratel to connect to." - default = "8080" -} - -variable "assign_public_ip" { - type = string - default = true - description = "Should a public IP address be assigned to the EC2 instance running dgraph in standalone mode." -} diff --git a/contrib/config/terraform/gcp/standalone/README.md b/contrib/config/terraform/gcp/standalone/README.md deleted file mode 100644 index cc120c7e847..00000000000 --- a/contrib/config/terraform/gcp/standalone/README.md +++ /dev/null @@ -1,45 +0,0 @@ -# Deploy Dgraph on GCP using Terraform - -> **NOTE: This Terraform template creates a Dgraph database cluster with a public IP accessible to -> anyone. You can set the `assign_public_ip` variable to false to skip creating a public IP address -> and you can configure access to Dgraph yourself.** - -[Terraform](https://terraform.io/) automates the process spinning up GCP compute instance, setting -up and running Dgraph in it. This setup deploys terraform in standalone mode inside a single GCP -compute instance. - -Here are the steps to be followed: - -1. You must have a GCP account set up. - -2. [Download](https://terraform.io/downloads.html) and install terraform. - -3. Generate service account keys for your GCP account either using the dashboard or `gcloud` CLI as - shown below: - -```sh -gcloud iam service-accounts keys create ./account.json \ - --iam-account [SA-NAME]@[PROJECT-ID].iam.gserviceaccount.com -``` - -4. Execute the following commands: - -```sh -$ terraform init - -$ TF_VAR_project_name= terraform plan - -$ terraform apply - -Apply complete! Resources: 1 added, 0 changed, 0 destroyed. - -Outputs: - -dgraph_ip = -``` - -The output of `terraform apply` will contain the IP address assigned to your instance. Dgraph-ratel -will be available on `:8000`. Change the server URL in the dashboard to -`:8080` and start playing with dgraph. - -5. Use `terraform destroy` to delete the setup and restore the state. diff --git a/contrib/config/terraform/gcp/standalone/data.tf b/contrib/config/terraform/gcp/standalone/data.tf deleted file mode 100644 index 7634cf1c9a7..00000000000 --- a/contrib/config/terraform/gcp/standalone/data.tf +++ /dev/null @@ -1,14 +0,0 @@ -# -------------------------------------------------------------------------------- -# Setup template script for dgraph in standalone mode -# -------------------------------------------------------------------------------- -data "template_file" "setup_template" { - template = "${file("${path.module}/templates/setup.tmpl")}" - - # Systemd service description for dgraph components. - vars = { - dgraph_ui_service = "${file("${path.module}/templates/dgraph-ui.service")}" - dgraph_zero_service = "${file("${path.module}/templates/dgraph-zero.service")}" - dgraph_service = "${file("${path.module}/templates/dgraph.service")}" - dgraph_version = "${var.dgraph_version}" - } -} diff --git a/contrib/config/terraform/gcp/standalone/main.tf b/contrib/config/terraform/gcp/standalone/main.tf deleted file mode 100644 index 570c2b0304d..00000000000 --- a/contrib/config/terraform/gcp/standalone/main.tf +++ /dev/null @@ -1,50 +0,0 @@ -# -------------------------------------------------------------------------------- -# Setup GCP provider -# -------------------------------------------------------------------------------- -provider "google" { - credentials = file(var.credential_file) - project = var.project_name - region = var.region - zone = var.zone -} - -# -------------------------------------------------------------------------------- -# Dgraph instance in GCP running in standalone mode. -# -------------------------------------------------------------------------------- -resource "google_compute_instance" "dgraph_standalone" { - name = var.instance_name - machine_type = var.instance_type - description = "GCP compute instance for dgrpah in standalone mode, this instance alone hosts everything including zero, alpha and ratel." - - tags = ["dgraph", "dgraph-standalone"] - - deletion_protection = false - - boot_disk { - auto_delete = true - - initialize_params { - image = var.instance_image - size = var.instance_disk_size - } - } - - network_interface { - network = "default" - - dynamic "access_config" { - for_each = var.assign_public_ip == "false" ? [] : ["STANDARD"] - content { - network_tier = access_config.value - } - } - } - - metadata = { - type = "dgraph-standalone" - } - - # Startup script to run for the instance. This will download the dgraph binary - # and run it as a systemd service. - metadata_startup_script = data.template_file.setup_template.rendered -} diff --git a/contrib/config/terraform/gcp/standalone/outputs.tf b/contrib/config/terraform/gcp/standalone/outputs.tf deleted file mode 100644 index 2dc1a17c628..00000000000 --- a/contrib/config/terraform/gcp/standalone/outputs.tf +++ /dev/null @@ -1,7 +0,0 @@ -# ---------------------------------------------------------------------------------- -# The ouptut contains the IP address associated with the compute instance. The Ratel -# UI is then accessible using :8000 -# ---------------------------------------------------------------------------------- -output dgraph_ip { -} - value = length(google_compute_instance.dgraph_standalone.network_interface[0].access_config) == 0 ? "" : google_compute_instance.dgraph_standalone.network_interface[0].access_config[0].nat_ip \ No newline at end of file diff --git a/contrib/config/terraform/gcp/standalone/templates/dgraph-ui.service b/contrib/config/terraform/gcp/standalone/templates/dgraph-ui.service deleted file mode 100644 index fdfddcd792b..00000000000 --- a/contrib/config/terraform/gcp/standalone/templates/dgraph-ui.service +++ /dev/null @@ -1,15 +0,0 @@ -[Unit] -Description=dgraph.io UI server -Wants=network.target -After=network.target - -[Service] -Type=simple -ExecStart=/usr/local/bin/dgraph-ratel -StandardOutput=journal -StandardError=journal -User=dgraph -Group=dgraph - -[Install] -WantedBy=multi-user.target diff --git a/contrib/config/terraform/gcp/standalone/templates/dgraph-zero.service b/contrib/config/terraform/gcp/standalone/templates/dgraph-zero.service deleted file mode 100644 index 2beb92f4269..00000000000 --- a/contrib/config/terraform/gcp/standalone/templates/dgraph-zero.service +++ /dev/null @@ -1,16 +0,0 @@ -[Unit] -Description=dgraph.io zero server -Wants=network.target -After=network.target - -[Service] -Type=simple -ExecStart=/usr/local/bin/dgraph zero --wal /var/run/dgraph/zw -StandardOutput=journal -StandardError=journal -User=dgraph -Group=dgraph - -[Install] -WantedBy=multi-user.target -RequiredBy=dgraph.service diff --git a/contrib/config/terraform/gcp/standalone/templates/dgraph.service b/contrib/config/terraform/gcp/standalone/templates/dgraph.service deleted file mode 100644 index 64a2dbf459c..00000000000 --- a/contrib/config/terraform/gcp/standalone/templates/dgraph.service +++ /dev/null @@ -1,16 +0,0 @@ -[Unit] -Description=dgraph.io data server -Wants=network.target -After=network.target dgraph-zero.service -Requires=dgraph-zero.service - -[Service] -Type=simple -ExecStart=/usr/local/bin/dgraph alpha -p /var/run/dgraph/p -w /var/run/dgraph/w -StandardOutput=journal -StandardError=journal -User=dgraph -Group=dgraph - -[Install] -WantedBy=multi-user.target diff --git a/contrib/config/terraform/gcp/standalone/templates/setup.tmpl b/contrib/config/terraform/gcp/standalone/templates/setup.tmpl deleted file mode 100644 index f170582007d..00000000000 --- a/contrib/config/terraform/gcp/standalone/templates/setup.tmpl +++ /dev/null @@ -1,24 +0,0 @@ -#!/bin/bash - -set -euxo pipefail - -wget https://github.com/hypermodeinc/dgraph/releases/download/v${dgraph_version}/dgraph-linux-amd64.tar.gz -tar -C /usr/local/bin -xzf dgraph-linux-amd64.tar.gz - -groupadd --system dgraph -useradd --system -d /var/run/dgraph -s /bin/false -g dgraph dgraph - -mkdir -p /var/log/dgraph/ -mkdir -p /var/run/dgraph/ - -chown -R dgraph:dgraph /var/run/dgraph -chown -R dgraph:dgraph /var/log/dgraph - -echo "${dgraph_ui_service}" > /etc/systemd/system/dgraph-ui.service -echo "${dgraph_zero_service}" > /etc/systemd/system/dgraph-zero.service -echo "${dgraph_service}" > /etc/systemd/system/dgraph.service -chmod +x /etc/systemd/system/dgraph* - -systemctl daemon-reload -systemctl enable --now dgraph -systemctl enable --now dgraph-ui diff --git a/contrib/config/terraform/gcp/standalone/variables.tf b/contrib/config/terraform/gcp/standalone/variables.tf deleted file mode 100644 index d075799fd10..00000000000 --- a/contrib/config/terraform/gcp/standalone/variables.tf +++ /dev/null @@ -1,58 +0,0 @@ -variable "region" { - type = string - default = "us-central1" - description = "The region to deploy the compute instance in." -} - -variable "zone" { - type = string - default = "us-central1-a" - description = "Zone to create the instance in." -} - -variable "project_name" { - type = string - description = "Name of the GCP project to create the instance in." -} - -variable "credential_file" { - type = string - description = "Credential file for the GCP account." - default = "account.json" -} - -variable "instance_image" { - type = string - default = "ubuntu-os-cloud/ubuntu-1804-lts" - description = "Type of GCP machine image to use for the instance." -} - -variable "instance_type" { - type = string - default = "n1-standard-4" - description = "Type of GCP instance to use." -} - -variable "instance_disk_size" { - type = number - default = 50 - description = "Size of the boot disk to use with the GCP instance." -} - -variable "instance_name" { - type = string - default = "dgraph-standalone" - description = "The Name tag to set for the GCP compute Instance." -} - -variable "dgraph_version" { - type = string - description = "Dgraph version for installation" - default = "1.1.0" -} - -variable "assign_public_ip" { - type = string - default = "true" - description = "Should a public IP address be assigned to the compute instance running dgraph in standalone mode." -} \ No newline at end of file diff --git a/contrib/config/terraform/kubernetes/.gitignore b/contrib/config/terraform/kubernetes/.gitignore deleted file mode 100644 index aa2799923ab..00000000000 --- a/contrib/config/terraform/kubernetes/.gitignore +++ /dev/null @@ -1,32 +0,0 @@ -# Local .terraform directories -**/.terraform/* - -# .tfstate files -*.tfstate -*.tfstate.* - -# Crash log files -crash.log - -# Ignore any .tfvars files that are generated automatically for each Terraform run. Most -# .tfvars files are managed as part of configuration and so should be included in -# version control. -# -# example.tfvars - -# Ignore override files as they are usually used to override resources locally and so -# are not checked in -override.tf -override.tf.json -*_override.tf -*_override.tf.json - -# Include override files you do wish to add to version control using negated pattern -# -# !example_override.tf - -# Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan -# example: *tfplan* -# -# Kubeconfig -kubeconfig diff --git a/contrib/config/terraform/kubernetes/README.md b/contrib/config/terraform/kubernetes/README.md deleted file mode 100644 index 0835ac911c7..00000000000 --- a/contrib/config/terraform/kubernetes/README.md +++ /dev/null @@ -1,121 +0,0 @@ -# Deploy Dgraph on AWS EKS using Terraform - -Dgraph is a horizontally scalable and distributed graph database, providing ACID transactions, -consistent replication and linearizable reads. It's built from ground up to perform for a rich set -of queries. Being a native graph database, it tightly controls how the data is arranged on disk to -optimize for query performance and throughput, reducing disk seeks and network calls in a cluster. - -## Introduction - -The Terraform template creates the following resources towards setting up a Dgraph cluster on AWS -EKS. - -- AWS VPC with 2 private subnets for hosting the EKS cluster, 2 public subnets to host the load - balancers to expose the services and one NAT subnet to provision the NAT gateway required for the - nodes/pods in the private subnet to communicate with the internet. Also sets up the NACL rules for - secure inter subnet communication. -- AWS EKS in the private subnets to host the Dgraph cluster. -- The Dgraph cluster Kubernetes resources in either a standalone mode or a HA mode(refer to the - variables available to tweak the provisioning of the Dgraph cluster below) on the EKS cluster. - -## Prerequisites - -- Terraform > 0.12.0 -- awscli >= 1.18.32 - -## Steps to follow to get the Dgraph cluster on AWS EKS up and running - -1. You must have an AWS account with privileges to create VPC, EKS and associated resources. Ensure - awscli setup with the right credentials (One can also use AWS_PROFILE=\ terraform - \ alternatively). - -2. [Download](https://terraform.io/downloads.html) and install Terraform. - -3. Create a `terraform.tfvars` file similar to that of `terraform.tfvars.example` and edit the - variables inside accordingly. You can override any variable present in - [variables.tf](./variables.tf) by providing an explicit value in `terraform.tfvars` file. - -4. Execute the following commands: - -```sh -$ terraform init -$ terraform plan -target=module.aws -$ terraform apply -target=module.aws -# One can choose to not run the following commands if they intend to use [Helm charts](https://github.com/dgraph-io/charts) -# to provision their resources on the Kubernetes cluster. -# If you want to manage the state of the Kubernetes resources using Terraform, run the following commands as well: -$ terraform plan -target=module.dgraph -$ terraform apply -target=module.dgraph -``` - -> Note: Both the modules cannot be applied in the same run owing to the way Terraform -> [evaluates](https://www.terraform.io/docs/providers/kubernetes/index.html#stacking-with-managed-kubernetes-cluster-resources) -> the provider blocks. - -The command `terraform apply -target=module.dgraph` would output the hostnames of the Load Balancers -exposing the Alpha, Zero and Ratel services. - -5. Use `terraform destroy -target=module.aws` to delete the setup and restore the previous state. - -The following table lists the configurable parameters of the template and their default values: - -| Parameter | Description | Default | -| ------------------------- | ------------------------------------------------------------------------------------------------------------------------------- | ------------- | -| `prefix` | The namespace prefix for all resources | dgraph | -| `cidr` | The CIDR of the VPC | 10.20.0.0/16 | -| `region` | The region to deploy the resources in | ap-south-1 | -| `ha` | Enable or disable HA deployment of Dgraph | true | -| `ingress_whitelist_cidrs` | The CIDRs whitelisted at the service Load Balancer | ["0.0.0.0/0"] | -| `only_whitelist_local_ip` | "Only whitelist the IP of the executioner at the service Load Balancers | true | -| `worker_nodes_count` | The number of worker nodes to provision with the EKS cluster | 3 | -| `instance_types` | The list of instance types to run as worker nodes | ["m5.large"] | -| `namespace` | The namespace to deploy the Dgraph pods to | dgraph | -| `zero_replicas` | The number of Zero replicas to create. Overridden by the ha variable which when disabled leads to creation of only 1 Zero pod | 3 | -| `zero_persistence` | If enabled mounts a persistent disk to the Zero pods | true | -| `zero_storage_size_gb` | The size of the persistent disk to attach to the Zero pods in GiB | 10 | -| `alpha_replicas` | The number of Alpha replicas to create. Overridden by the ha variable which when disabled leads to creation of only 1 Alpha pod | 3 | -| `alpha_initialize_data` | If set, runs an init container to help with loading the data into Alpha | false | -| `alpha_persistence` | If enabled, mounts a persistent disk to the Alpha pods | true | -| `alpha_storage_size_gb` | The size of the persistent disk to attach to the Alpha pods in GiB | 10 | -| `alpha_lru_size_mb` | The LRU cache to enable on Alpha pods in MiB | 2048 | - -> NOTE: -> -> 1. If `ha` is set to `false` the `worker_node_count` is overridden to `1`. -> 2. If `only_whitelist_local_ip` is set to`true`, the `ingress_whitelist_cidrs is overridden` to -> local IP of the executioner. -> 3. The `kubeconfig` file is created in the root directory of this repository. -> 4. One could use Helm to install the Kubernetes resources onto the cluster, in which case comment -> out the `dgraph` module in `main.tf`. -> 5. The number of `worker_nodes` needs to be more than the greater of replicas of Zero/Alpha when -> `ha` is enabled to ensure the topological scheduling based on hostnames works. -> 6. The hostnames of the service Load Balancers are part of the output of the run. Please use the -> respective service ports in conjunction with the hostnames. TLS is not enabled. -> 7. When `alpha_initialize_data`is set to `true`, an init container is provisioned to help with -> loading the data as follows: -> -> ```bash -> # Initializing the Alphas: -> # -> # You may want to initialize the Alphas with data before starting, e.g. -> # with data from the Dgraph Bulk Loader: https://dgraph.io/docs/deploy/#bulk-loader. -> # You can accomplish by uncommenting this initContainers config. This -> # starts a container with the same /dgraph volume used by Alpha and runs -> # before Alpha starts. -> # -> # You can copy your local p directory to the pod's /dgraph/p directory -> # with this command: -> # -> # kubectl cp path/to/p dgraph-alpha-0:/dgraph/ -c init-alpha -> # (repeat for each alpha pod) -> # -> # When you're finished initializing each Alpha data directory, you can signal -> # it to terminate successfully by creating a /dgraph/doneinit file: -> # -> # kubectl exec dgraph-alpha-0 -c init-alpha touch /dgraph/doneinit -> # -> # Note that pod restarts cause re-execution of Init Containers. If persistance is -> # enabled /dgraph is persisted across pod restarts, the Init Container will exit -> # automatically when /dgraph/doneinit is present and proceed with starting -> # the Alpha process. -> ``` diff --git a/contrib/config/terraform/kubernetes/main.tf b/contrib/config/terraform/kubernetes/main.tf deleted file mode 100644 index a5d00dae169..00000000000 --- a/contrib/config/terraform/kubernetes/main.tf +++ /dev/null @@ -1,47 +0,0 @@ -terraform { - required_version = ">= 0.12.0" -} - -data "http" "localip" { - url = "http://ipv4.icanhazip.com" -} - -locals { - whitelisted_cidrs = var.only_whitelist_local_ip ? ["${chomp(data.http.localip.body)}/32"] : var.ingress_whitelist_cidrs -} - -module "aws" { - source = "./modules/aws" - - prefix = var.prefix - cidr = var.cidr - region = var.region - - ha = var.ha - worker_nodes_count = var.worker_nodes_count - instance_types = var.instance_types - ingress_whitelist_cidrs = local.whitelisted_cidrs -} - -module "dgraph" { - source = "./modules/dgraph" - - prefix = var.prefix - ha = var.ha - namespace = var.namespace - kubeconfig_path = module.aws.kubeconfig_path - - zero_replicas = var.zero_replicas - zero_persistence = var.zero_persistence - zero_storage_size_gb = var.zero_storage_size_gb - - alpha_initialize_data = var.alpha_initialize_data - alpha_replicas = var.alpha_replicas - alpha_persistence = var.alpha_persistence - alpha_storage_size_gb = var.alpha_storage_size_gb - alpha_lru_size_mb = var.alpha_lru_size_mb - # The Kubernetes Service Terraform resource does not expose any attributes - zero_address = "${var.prefix}-dgraph-zero-0.${var.prefix}-dgraph-zero.${var.namespace}.svc.cluster.local" - - ingress_whitelist_cidrs = local.whitelisted_cidrs -} diff --git a/contrib/config/terraform/kubernetes/modules/aws/main.tf b/contrib/config/terraform/kubernetes/modules/aws/main.tf deleted file mode 100644 index 8d15210bbf2..00000000000 --- a/contrib/config/terraform/kubernetes/modules/aws/main.tf +++ /dev/null @@ -1,26 +0,0 @@ -terraform { - required_version = ">= 0.12.0" -} - -module "vpc" { - source = "./modules/vpc" - - cluster_name = var.prefix - cidr = var.cidr - region = var.region -} - -module "eks" { - source = "./modules/eks" - - cluster_name = var.prefix - ha = var.ha - region = var.region - - vpc_id = module.vpc.vpc_id - cluster_subnet_ids = module.vpc.cluster_subnet_ids - db_subnet_ids = module.vpc.db_subnet_ids - worker_nodes_count = var.worker_nodes_count - instance_types = var.instance_types - ingress_whitelist_cidrs = var.ingress_whitelist_cidrs -} diff --git a/contrib/config/terraform/kubernetes/modules/aws/modules/eks/data.tf b/contrib/config/terraform/kubernetes/modules/aws/modules/eks/data.tf deleted file mode 100644 index 972f1ca90e9..00000000000 --- a/contrib/config/terraform/kubernetes/modules/aws/modules/eks/data.tf +++ /dev/null @@ -1,3 +0,0 @@ -data "aws_vpc" "vpc" { - id = "${var.vpc_id}" -} diff --git a/contrib/config/terraform/kubernetes/modules/aws/modules/eks/eks-cluster.tf b/contrib/config/terraform/kubernetes/modules/aws/modules/eks/eks-cluster.tf deleted file mode 100644 index 15d67c480a3..00000000000 --- a/contrib/config/terraform/kubernetes/modules/aws/modules/eks/eks-cluster.tf +++ /dev/null @@ -1,75 +0,0 @@ -resource "aws_iam_role" "cluster_role" { - name = "${var.cluster_name}-cluster-iam" - - assume_role_policy = < ./vault/policy_admin.json -{ - "policy": "$(sed -e ':a;N;$!ba;s/\n/\\n/g' \ - -e 's/"/\\"/g' vault/policy_admin.hcl)" -} -EOF - -## create the admin policy -curl --silent \ - --header "X-Vault-Token: $VAULT_ROOT_TOKEN" \ - --request PUT --data @./vault/policy_admin.json \ - http://$VAULT_ADDRESS/v1/sys/policies/acl/admin - -curl --silent \ - --header "X-Vault-Token: $VAULT_ROOT_TOKEN" \ - --request GET \ - http://$VAULT_ADDRESS/v1/sys/policies/acl/admin | jq -``` - -### Step 5: Create an `admin` role with the attached policy - -```bash - -## create the admin role with an attached policy -curl --silent \ - --header "X-Vault-Token: $VAULT_ROOT_TOKEN" \ - --request POST \ - --data '{ - "token_policies": "admin", - "token_ttl": "1h", - "token_max_ttl": "4h" -}' \ - http://$VAULT_ADDRESS/v1/auth/approle/role/admin - -## verify the role -curl --silent \ - --header "X-Vault-Token: $VAULT_ROOT_TOKEN" \ - --request GET \ - http://$VAULT_ADDRESS/v1/auth/approle/role/admin | jq -``` - -### Step 6: Retrieve the admin token - -From here, we'll want to get a admin token that we can use for the rest of the process: - -```bash -VAULT_ADMIN_ROLE_ID=$(curl --silent \ - --header "X-Vault-Token: $VAULT_ROOT_TOKEN" \ - http://$VAULT_ADDRESS/v1/auth/approle/role/admin/role-id | jq -r '.data.role_id' -) - -VAULT_ADMIN_SECRET_ID=$(curl --silent \ - --header "X-Vault-Token: $VAULT_ROOT_TOKEN" \ - --request POST \ - http://$VAULT_ADDRESS/v1/auth/approle/role/admin/secret-id | jq -r '.data.secret_id' -) - -export VAULT_ADMIN_TOKEN=$(curl --silent \ - --request POST \ - --data "{ - \"role_id\": \"$VAULT_ADMIN_ROLE_ID\", - \"secret_id\": \"$VAULT_ADMIN_SECRET_ID\" -}" \ - http://$VAULT_ADDRESS/v1/auth/approle/login | jq -r '.auth.client_token' -) -``` - -### Step 7: Create a `dgraph` policy to access the secrets - -```bash -## convert policies to json format -cat < ./vault/policy_dgraph.json -{ - "policy": "$(sed -e ':a;N;$!ba;s/\n/\\n/g' \ - -e 's/"/\\"/g' vault/policy_dgraph.hcl)" -} -EOF - -## create the dgraph policy -curl --silent \ - --header "X-Vault-Token: $VAULT_ADMIN_TOKEN" \ - --request PUT --data @./vault/policy_dgraph.json \ - http://$VAULT_ADDRESS/v1/sys/policies/acl/dgraph - -## verify the policy -curl --silent \ - --header "X-Vault-Token: $VAULT_ADMIN_TOKEN" \ - --request GET \ - http://$VAULT_ADDRESS/v1/sys/policies/acl/dgraph | jq -``` - -### Step 8: Create a `dgraph` role with the attached policy - -```bash -## create the dgraph role with an attached policy -curl --silent \ - --header "X-Vault-Token: $VAULT_ADMIN_TOKEN" \ - --request POST \ - --data '{ - "token_policies": "dgraph", - "token_ttl": "1h", - "token_max_ttl": "4h" -}' \ - http://$VAULT_ADDRESS/v1/auth/approle/role/dgraph - -## verify the role -curl --silent \ - --header "X-Vault-Token: $VAULT_ADMIN_TOKEN" --request GET \ - http://$VAULT_ADDRESS/v1/auth/approle/role/dgraph | jq -``` - -### Step 9: Save secrets using admin persona - -This will save secrets for both -[Encryption at Rest](https://dgraph.io/docs/enterprise-features/encryption-at-rest/) and -[Access Control Lists](https://dgraph.io/docs/enterprise-features/access-control-lists/). - -```bash -curl --silent \ - --header "X-Vault-Token: $VAULT_ADMIN_TOKEN" \ - --request POST \ - --data @./vault/payload_alpha_secrets.json \ - http://$VAULT_ADDRESS/v1/secret/data/dgraph/alpha | jq -``` - -**NOTE**: When updating K/V Version 2 secrets, be sure to increment the `options.cas` value to -increase the version. For example, if updating the `enc_key` value to 32-bits, you would update -`./vault/payload_alpha_secrets.json` to look like the following: - -```json -{ - "options": { - "cas": 1 - }, - "data": { - "enc_key": "12345678901234567890123456789012", - "hmac_secret": "12345678901234567890123456789012" - } -} -``` - -### Step 10: Retrieve the dgraph token and save credentials - -```bash -VAULT_DGRAPH_ROLE_ID=$(curl --silent \ - --header "X-Vault-Token: $VAULT_ADMIN_TOKEN" \ - http://$VAULT_ADDRESS/v1/auth/approle/role/dgraph/role-id | jq -r '.data.role_id' -) - -VAULT_DGRAPH_SECRET_ID=$(curl --silent \ - --header "X-Vault-Token: $VAULT_ADMIN_TOKEN" \ - --request POST \ - http://$VAULT_ADDRESS/v1/auth/approle/role/dgraph/secret-id | jq -r '.data.secret_id' -) - -export VAULT_DGRAPH_TOKEN=$(curl --silent \ - --request POST \ - --data "{ - \"role_id\": \"$VAULT_DGRAPH_ROLE_ID\", - \"secret_id\": \"$VAULT_DGRAPH_SECRET_ID\" -}" \ - http://$VAULT_ADDRESS/v1/auth/approle/login | jq -r '.auth.client_token' -) -``` - -Also, we want to save the role-id and secret-id for the Dgraph Alpha server. - -```bash -echo $VAULT_DGRAPH_ROLE_ID > ./vault/role_id -echo $VAULT_DGRAPH_SECRET_ID > ./vault/secret_id -``` - -### Step 11: Verify secrets access using app persona - -```bash -curl --silent \ - --header "X-Vault-Token: $VAULT_DGRAPH_TOKEN" \ - --request GET \ - http://$VAULT_ADDRESS/v1/secret/data/dgraph/alpha | jq -``` - -### Step 12: Launch Dgraph - -```bash -export DGRAPH_VERSION="" # default 'latest' -docker-compose up --detach -``` - -You can verify encryption features are enabled with: - -```bash -curl localhost:8080/health | jq -r '.[].ee_features | .[]' | sed 's/^/* /' -``` - -## Using HashiCorp Vault CIDR List for Authentication - -As an alternative, you can restrict access to a limited range of IP addresses and disable the -requirement for a `secret-id`. In this scenario, we will set `bind_seccret_id` to `false`, and -supply a list of IP address ranges for the `bound_cidr_list` key. - -Only two steps will need to be changed, but otherwise the other steps are the same: - -### Step 8: Create a `dgraph` role using `bound_cidr_list` - -```bash -## create the dgraph role with an attached policy -curl --silent \ - --header "X-Vault-Token: $VAULT_ADMIN_TOKEN" \ - --request POST \ - --data '{ - "token_policies": "dgraph", - "token_ttl": "1h", - "token_max_ttl": "4h", - "bind_secret_id": false, - "bound_cidr_list": [ - "10.0.0.0/8", - "172.0.0.0/8", - "192.168.0.0/16", - "127.0.0.1/32" - ] -}' \ - http://$VAULT_ADDRESS/v1/auth/approle/role/dgraph - -## verify the role -curl --silent \ - --header "X-Vault-Token: $VAULT_ADMIN_TOKEN" --request GET \ - http://$VAULT_ADDRESS/v1/auth/approle/role/dgraph | jq -``` - -### Step 10: Retrieve the dgraph token using only the `role-id` - -```bash -VAULT_DGRAPH_ROLE_ID=$(curl --silent \ - --header "X-Vault-Token: $VAULT_ADMIN_TOKEN" \ - http://$VAULT_ADDRESS/v1/auth/approle/role/dgraph/role-id | jq -r '.data.role_id' -) - -export VAULT_DGRAPH_TOKEN=$(curl --silent \ - --request POST \ - --data "{ \"role_id\": \"$VAULT_DGRAPH_ROLE_ID\" }" \ - http://$VAULT_ADDRESS/v1/auth/approle/login | jq -r '.auth.client_token' -) -``` - -Also, we want to save only the `role-id` for the Dgraph Alpha server. - -```bash -echo $VAULT_DGRAPH_ROLE_ID > ./vault/role_id -``` diff --git a/contrib/config/vault/docker/dgraph_alpha_config.yaml b/contrib/config/vault/docker/dgraph_alpha_config.yaml deleted file mode 100644 index 4cdd42038cf..00000000000 --- a/contrib/config/vault/docker/dgraph_alpha_config.yaml +++ /dev/null @@ -1,11 +0,0 @@ -vault: - addr: http://vault:8200 - acl_field: hmac_secret - acl_format: raw - enc_field: enc_key - enc_format: raw - path: secret/data/dgraph/alpha - role_id_file: /dgraph/vault/role_id - secret_id_file: /dgraph/vault/secret_id -security: - whitelist: 10.0.0.0/8,172.0.0.0/8,192.168.0.0/16 diff --git a/contrib/config/vault/docker/docker-compose.yaml b/contrib/config/vault/docker/docker-compose.yaml deleted file mode 100644 index 4c28dd66403..00000000000 --- a/contrib/config/vault/docker/docker-compose.yaml +++ /dev/null @@ -1,36 +0,0 @@ -version: "3.5" -services: - zero1: - image: dgraph/dgraph:${DGRAPH_VERSION} - command: dgraph zero --my=zero1:5080 --replicas 1 --raft idx=1 - ports: - - 6080:6080 - container_name: zero1 - - alpha1: - image: dgraph/dgraph:${DGRAPH_VERSION} - ports: - - 8080:8080 - - 9080:9080 - environment: - DGRAPH_ALPHA_CONFIG: /dgraph/config/config.yaml - volumes: - - ./dgraph_alpha_config.yaml:/dgraph/config/config.yaml - - ./vault/secret_id:/dgraph/vault/secret_id - - ./vault/role_id:/dgraph/vault/role_id - command: dgraph alpha --my=alpha1:7080 --zero=zero1:5080 - container_name: alpha1 - - vault: - image: vault:${VAULT_VERSION} - container_name: vault - ports: - - 8200:8200 - volumes: - - ./vault/config.hcl:/vault/config/config.hcl - - ./vault/data:/vault/data - environment: - VAULT_ADDR: http://127.0.0.1:8200 - entrypoint: vault server -config=/vault/config/config.hcl - cap_add: - - IPC_LOCK diff --git a/contrib/config/vault/docker/vault/.gitkeep b/contrib/config/vault/docker/vault/.gitkeep deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/contrib/config/vault/docker/vault/config.hcl b/contrib/config/vault/docker/vault/config.hcl deleted file mode 100644 index 302f99e801a..00000000000 --- a/contrib/config/vault/docker/vault/config.hcl +++ /dev/null @@ -1,14 +0,0 @@ -storage "raft" { - path = "/vault/data" - node_id = "vault1" -} - -listener "tcp" { - address = "0.0.0.0:8200" - tls_disable = "true" -} - -api_addr = "http://127.0.0.1:8200" -cluster_addr = "http://127.0.0.1:8201" -ui = true -disable_mlock = true diff --git a/contrib/config/vault/docker/vault/payload_alpha_secrets.json b/contrib/config/vault/docker/vault/payload_alpha_secrets.json deleted file mode 100644 index b579520c9bc..00000000000 --- a/contrib/config/vault/docker/vault/payload_alpha_secrets.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "options": { - "cas": 0 - }, - "data": { - "enc_key": "1234567890123456", - "hmac_secret": "12345678901234567890123456789012" - } -} diff --git a/contrib/config/vault/docker/vault/policy_admin.hcl b/contrib/config/vault/docker/vault/policy_admin.hcl deleted file mode 100644 index 5bac8082dde..00000000000 --- a/contrib/config/vault/docker/vault/policy_admin.hcl +++ /dev/null @@ -1,22 +0,0 @@ -path "secret/data/dgraph/*" { - capabilities = [ "create", "read", "update", "delete", "list" ] -} - -path "sys/auth/approle" { - capabilities = [ "create", "read", "update", "delete", "sudo" ] -} - -# Configure the AppRole auth method -path "sys/auth/approle/*" { - capabilities = [ "create", "read", "update", "delete" ] -} - -# Create and manage roles -path "auth/approle/*" { - capabilities = [ "create", "read", "update", "delete", "list" ] -} - -# Write ACL policies -path "sys/policies/acl/*" { - capabilities = [ "create", "read", "update", "delete", "list" ] -} diff --git a/contrib/config/vault/docker/vault/policy_dgraph.hcl b/contrib/config/vault/docker/vault/policy_dgraph.hcl deleted file mode 100644 index 7e426cd6da3..00000000000 --- a/contrib/config/vault/docker/vault/policy_dgraph.hcl +++ /dev/null @@ -1,3 +0,0 @@ -path "secret/data/dgraph/*" { - capabilities = [ "read", "update" ] -} diff --git a/contrib/docker-build/Makefile b/contrib/docker-build/Makefile deleted file mode 100644 index c47a6e5a5d1..00000000000 --- a/contrib/docker-build/Makefile +++ /dev/null @@ -1,4 +0,0 @@ -install: - @docker-compose up - @sudo chown $(USER) ../../dgraph/dgraph - @mv ../../dgraph/dgraph $(GOPATH)/bin diff --git a/contrib/docker-build/README.md b/contrib/docker-build/README.md deleted file mode 100644 index 8374cae96e1..00000000000 --- a/contrib/docker-build/README.md +++ /dev/null @@ -1,12 +0,0 @@ -# Docker build script - -This directory contains a Makefile that can be used to build Dgraph inside the official Dgraph -Docker container. This is useful for situations when the host system cannot be used to build a -binary that will work with the container (for example, if the host system has a different version of -glibc). - -## Usage - -Run `make install` in this directory. The script will ask you for your password in order to change -ownership of the compiled binary. By default, files written by Docker will be owned by root. This -script also takes care of moving the binary to $GOPATH/bin. diff --git a/contrib/docker-build/build.sh b/contrib/docker-build/build.sh deleted file mode 100755 index 2baf0054eeb..00000000000 --- a/contrib/docker-build/build.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash - -export DEBIAN_FRONTEND=noninteractive -apt-get update -apt-get install -y build-essential git golang -cd /dgraph/dgraph || exit -make diff --git a/contrib/docker-build/docker-compose.yml b/contrib/docker-build/docker-compose.yml deleted file mode 100644 index da1339b06a8..00000000000 --- a/contrib/docker-build/docker-compose.yml +++ /dev/null @@ -1,14 +0,0 @@ -version: "3.5" -services: - build: - image: dgraph/dgraph:latest - container_name: build - working_dir: /data/build - labels: - cluster: test - service: build - volumes: - - type: bind - source: ../../ - target: /dgraph - command: /dgraph/contrib/docker-build/build.sh diff --git a/contrib/integration/acctupsert/.gitignore b/contrib/integration/acctupsert/.gitignore deleted file mode 100644 index 912270a50e6..00000000000 --- a/contrib/integration/acctupsert/.gitignore +++ /dev/null @@ -1 +0,0 @@ -acctupsert diff --git a/contrib/integration/acctupsert/main.go b/contrib/integration/acctupsert/main.go deleted file mode 100644 index f7bd9ef7dc1..00000000000 --- a/contrib/integration/acctupsert/main.go +++ /dev/null @@ -1,238 +0,0 @@ -/* - * SPDX-FileCopyrightText: © Hypermode Inc. - * SPDX-License-Identifier: Apache-2.0 - */ - -package main - -import ( - "context" - "encoding/json" - "flag" - "fmt" - "math/rand" - "strings" - "sync" - "sync/atomic" - "time" - - "github.com/golang/glog" - - "github.com/dgraph-io/dgo/v240" - "github.com/dgraph-io/dgo/v240/protos/api" - "github.com/hypermodeinc/dgraph/v24/testutil" - "github.com/hypermodeinc/dgraph/v24/x" -) - -var ( - alpha = flag.String("alpha", "localhost:9180", "dgraph alpha address") - concurr = flag.Int("c", 3, "number of concurrent upserts per account") -) - -var ( - firsts = []string{"Paul", "Eric", "Jack", "John", "Martin"} - lasts = []string{"Brown", "Smith", "Robinson", "Waters", "Taylor"} - ages = []int{20, 25, 30, 35} - types = []string{"CEO", "COO", "CTO", "CFO"} -) - -type account struct { - first string - last string - age int -} - -var accounts []account - -func init() { - for _, first := range firsts { - for _, last := range lasts { - for _, age := range ages { - accounts = append(accounts, account{ - first: first, - last: last, - age: age, - }) - } - } - } -} - -func main() { - flag.Parse() - c, err := testutil.DgraphClientWithGroot(*alpha) - x.Check(err) - setup(c) - fmt.Println("Doing upserts") - doUpserts(c) - fmt.Println("Checking integrity") - checkIntegrity(c) -} - -func setup(c *dgo.Dgraph) { - ctx := context.Background() - x.Check(c.Alter(ctx, &api.Operation{ - DropAll: true, - })) - op := &api.Operation{ - Schema: ` - first: string @index(term) @upsert . - last: string @index(hash) @upsert . - age: int @index(int) @upsert . - when: int . - `, - } - x.Check(c.Alter(ctx, op)) -} - -func doUpserts(c *dgo.Dgraph) { - var wg sync.WaitGroup - wg.Add(len(accounts) * *concurr) - for _, acct := range accounts { - for range *concurr { - go func(acct account) { - upsert(c, acct) - wg.Done() - }(acct) - } - } - wg.Wait() -} - -var ( - successCount uint64 - retryCount uint64 - lastStatus time.Time -) - -func upsert(c *dgo.Dgraph, acc account) { - for { - if time.Since(lastStatus) > 100*time.Millisecond { - fmt.Printf("[%s] Success: %d Retries: %d\n", time.Now().Format(time.Stamp), - atomic.LoadUint64(&successCount), atomic.LoadUint64(&retryCount)) - lastStatus = time.Now() - } - err := tryUpsert(c, acc) - switch err { - case nil: - atomic.AddUint64(&successCount, 1) - return - case dgo.ErrAborted: - // pass - default: - fmt.Printf("ERROR: %v", err) - } - atomic.AddUint64(&retryCount, 1) - } -} - -func tryUpsert(c *dgo.Dgraph, acc account) error { - ctx := context.Background() - - txn := c.NewTxn() - defer func() { - if err := txn.Discard(ctx); err != nil { - glog.Warningf("error in discarding txn: %v", err) - } - }() - - q := fmt.Sprintf(` - { - get(func: eq(first, %q)) @filter(eq(last, %q) AND eq(age, %d)) { - uid - expand(_all_) {uid} - } - } - `, acc.first, acc.last, acc.age) - resp, err := txn.Query(ctx, q) - if err != nil && - (strings.Contains(err.Error(), "Transaction is too old") || - strings.Contains(err.Error(), "less than minTs")) { - return err - } - x.Check(err) - decode := struct { - Get []struct { - Uid *string - } - }{} - x.Check(json.Unmarshal(resp.GetJson(), &decode)) - - x.AssertTrue(len(decode.Get) <= 1) - t := rand.Intn(len(types)) - - var uid string - if len(decode.Get) == 1 { - x.AssertTrue(decode.Get[0].Uid != nil) - uid = *decode.Get[0].Uid - } else { - nqs := fmt.Sprintf(` - _:acct %q . - _:acct %q . - _:acct "%d"^^ . - _:acct <%s> "" . - `, - acc.first, acc.last, acc.age, types[t], - ) - mu := &api.Mutation{SetNquads: []byte(nqs)} - assigned, err := txn.Mutate(ctx, mu) - if err != nil { - return err - } - uid = assigned.GetUids()["acct"] - x.AssertTrue(uid != "") - } - - nq := fmt.Sprintf(` - <%s> "%d"^^ . - `, - uid, time.Now().Nanosecond(), - ) - mu := &api.Mutation{SetNquads: []byte(nq)} - if _, err = txn.Mutate(ctx, mu); err != nil { - return err - } - - return txn.Commit(ctx) -} - -func checkIntegrity(c *dgo.Dgraph) { - ctx := context.Background() - - q := fmt.Sprintf(` - { - all(func: anyofterms(first, %q)) { - first - last - age - } - } - `, strings.Join(firsts, " ")) - resp, err := c.NewTxn().Query(ctx, q) - x.Check(err) - - decode := struct { - All []struct { - First *string - Last *string - Age *int - } - }{} - x.Check(json.Unmarshal(resp.GetJson(), &decode)) - - // Make sure there is exactly one of each account. - accountSet := make(map[string]struct{}) - for _, record := range decode.All { - x.AssertTrue(record.First != nil) - x.AssertTrue(record.Last != nil) - x.AssertTrue(record.Age != nil) - str := fmt.Sprintf("%s_%s_%d", *record.First, *record.Last, *record.Age) - accountSet[str] = struct{}{} - } - x.AssertTrue(len(accountSet) == len(accounts)) - for _, acct := range accounts { - str := fmt.Sprintf("%s_%s_%d", acct.first, acct.last, acct.age) - _, ok := accountSet[str] - x.AssertTrue(ok) - } -} diff --git a/contrib/integration/bank/.gitignore b/contrib/integration/bank/.gitignore deleted file mode 100644 index d282090a0bd..00000000000 --- a/contrib/integration/bank/.gitignore +++ /dev/null @@ -1 +0,0 @@ -/bank diff --git a/contrib/integration/bank/Dockerfile b/contrib/integration/bank/Dockerfile deleted file mode 100644 index dae443ce78d..00000000000 --- a/contrib/integration/bank/Dockerfile +++ /dev/null @@ -1,3 +0,0 @@ -FROM gcr.io/distroless/base -COPY ./bank / -CMD ["/bank"] diff --git a/contrib/integration/bank/Makefile b/contrib/integration/bank/Makefile deleted file mode 100644 index db2b7d589e3..00000000000 --- a/contrib/integration/bank/Makefile +++ /dev/null @@ -1,7 +0,0 @@ -.PHONY: build - -DOCKER_REPO ?= dgraph - -build: - go build -o bank - docker build -t $(DOCKER_REPO)/bank:latest . diff --git a/contrib/integration/bank/main.go b/contrib/integration/bank/main.go deleted file mode 100644 index 2e3a7cf846b..00000000000 --- a/contrib/integration/bank/main.go +++ /dev/null @@ -1,379 +0,0 @@ -/* - * SPDX-FileCopyrightText: © Hypermode Inc. - * SPDX-License-Identifier: Apache-2.0 - */ - -package main - -import ( - "bufio" - "bytes" - "context" - "crypto/tls" - "crypto/x509" - "encoding/json" - "flag" - "fmt" - "log" - "math/rand" - "net/http" - _ "net/http/pprof" // http profiler - "sort" - "strings" - "sync" - "sync/atomic" - "time" - - "google.golang.org/grpc" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/credentials/insecure" - - "github.com/dgraph-io/dgo/v240" - "github.com/dgraph-io/dgo/v240/protos/api" - "github.com/hypermodeinc/dgraph/v24/x" -) - -var ( - users = flag.Int("users", 100, "Number of accounts.") - conc = flag.Int("txns", 3, "Number of concurrent transactions per client.") - queryCheck = flag.Int("check_every", 5, "Check total accounts and balances after every N mutations.") - dur = flag.String("dur", "1m", "How long to run the transactions.") - alpha = flag.String("alpha", "localhost:9080", "Address of Dgraph alpha.") - verbose = flag.Bool("verbose", true, "Output all logs in verbose mode.") - login = flag.Bool("login", true, "Login as groot. Used for ACL-enabled cluster.") - slashToken = flag.String("slash-token", "", "Slash GraphQL API token") - debugHttp = flag.String("http", "localhost:6060", - "Address to serve http (pprof).") -) - -var startBal = 10 - -type account struct { - Uid string `json:"uid"` - Key int `json:"key,omitempty"` - Bal int `json:"bal,omitempty"` - Typ string `json:"typ"` -} - -type state struct { - aborts int32 - runs int32 -} - -func (s *state) createAccounts(dg *dgo.Dgraph) { - op := api.Operation{DropAll: true} - x.Check(dg.Alter(context.Background(), &op)) - - op.DropAll = false - op.Schema = ` - key: int @index(int) @upsert . - bal: int . - typ: string @index(exact) @upsert . - ` - x.Check(dg.Alter(context.Background(), &op)) - - var all []account - for i := 1; i <= *users; i++ { - a := account{ - Key: i, - Bal: startBal, - Typ: "ba", - } - all = append(all, a) - } - data, err := json.Marshal(all) - x.Check(err) - - txn := dg.NewTxn() - defer func() { - if err := txn.Discard(context.Background()); err != nil { - log.Fatalf("Discarding transaction failed: %+v\n", err) - } - }() - - var mu api.Mutation - mu.SetJson = data - resp, err := txn.Mutate(context.Background(), &mu) - if *verbose { - if resp.Txn == nil { - log.Printf("[resp.Txn: %+v] Mutation: %s\n", resp.Txn, mu.SetJson) - } else { - log.Printf("[StartTs: %v] Mutation: %s\n", resp.Txn.StartTs, mu.SetJson) - } - } - x.Check(err) - x.Check(txn.Commit(context.Background())) -} - -func (s *state) runTotal(dg *dgo.Dgraph) error { - query := ` - { - q(func: eq(typ, "ba")) { - uid - key - bal - } - } - ` - txn := dg.NewReadOnlyTxn() - defer func() { - if err := txn.Discard(context.Background()); err != nil { - log.Fatalf("Discarding transaction failed: %+v\n", err) - } - }() - - resp, err := txn.Query(context.Background(), query) - if err != nil { - return err - } - - m := make(map[string][]account) - if err := json.Unmarshal(resp.Json, &m); err != nil { - return err - } - accounts := m["q"] - sort.Slice(accounts, func(i, j int) bool { - return accounts[i].Key < accounts[j].Key - }) - var total int - for _, a := range accounts { - total += a.Bal - } - if *verbose { - log.Printf("[StartTs: %v] Read: %v. Total: %d\n", resp.Txn.StartTs, accounts, total) - } - if len(accounts) > *users { - log.Fatalf("len(accounts) = %d", len(accounts)) - } - if total != *users*startBal { - log.Fatalf("Total = %d", total) - } - return nil -} - -func (s *state) findAccount(txn *dgo.Txn, key int) (account, error) { - query := fmt.Sprintf(`{ q(func: eq(key, %d)) { key, uid, bal, typ }}`, key) - resp, err := txn.Query(context.Background(), query) - if err != nil { - return account{}, err - } - m := make(map[string][]account) - if err := json.Unmarshal(resp.Json, &m); err != nil { - log.Fatal(err) - } - accounts := m["q"] - if len(accounts) > 1 { - log.Printf("[StartTs: %v] Query: %s. Response: %s\n", resp.Txn.StartTs, query, resp.Json) - log.Fatal("Found multiple accounts") - } - if len(accounts) == 0 { - if *verbose { - log.Printf("[StartTs: %v] Unable to find account for K_%02d. JSON: %s\n", resp.Txn.StartTs, key, resp.Json) - } - return account{Key: key, Typ: "ba"}, nil - } - return accounts[0], nil -} - -func (s *state) runTransaction(dg *dgo.Dgraph, buf *bytes.Buffer) error { - w := bufio.NewWriter(buf) - fmt.Fprintf(w, "==>\n") - defer func() { - fmt.Fprintf(w, "---\n") - _ = w.Flush() - }() - - ctx := context.Background() - txn := dg.NewTxn() - defer func() { - if err := txn.Discard(context.Background()); err != nil { - log.Fatalf("Discarding transaction failed: %+v\n", err) - } - }() - - var sk, sd int - for { - sk = rand.Intn(*users + 1) - sd = rand.Intn(*users + 1) - if sk == 0 || sd == 0 { // Don't touch zero. - continue - } - if sk != sd { - break - } - } - - src, err := s.findAccount(txn, sk) - if err != nil { - return err - } - dst, err := s.findAccount(txn, sd) - if err != nil { - return err - } - if src.Key == dst.Key { - return nil - } - - amount := rand.Intn(10) - if src.Bal-amount <= 0 { - amount = src.Bal - } - fmt.Fprintf(w, "Moving [$%d, K_%02d -> K_%02d]. Src:%+v. Dst: %+v\n", - amount, src.Key, dst.Key, src, dst) - src.Bal -= amount - dst.Bal += amount - var mu api.Mutation - if len(src.Uid) > 0 { - // If there was no src.Uid, then don't run any mutation. - if src.Bal == 0 { - pb, err := json.Marshal(src) - x.Check(err) - mu.DeleteJson = pb - fmt.Fprintf(w, "Deleting K_%02d: %s\n", src.Key, mu.DeleteJson) - } else { - data, err := json.Marshal(src) - x.Check(err) - mu.SetJson = data - } - _, err := txn.Mutate(ctx, &mu) - if err != nil { - fmt.Fprintf(w, "Error while mutate: %v", err) - return err - } - } - - mu = api.Mutation{} - data, err := json.Marshal(dst) - x.Check(err) - mu.SetJson = data - assigned, err := txn.Mutate(ctx, &mu) - if err != nil { - fmt.Fprintf(w, "Error while mutate: %v", err) - return err - } - - if err := txn.Commit(ctx); err != nil { - return err - } - if len(assigned.GetUids()) > 0 { - fmt.Fprintf(w, "[StartTs: %v] CREATED K_%02d: %+v for %+v\n", assigned.Txn.StartTs, - dst.Key, assigned.GetUids(), dst) - for _, uid := range assigned.GetUids() { - dst.Uid = uid - } - } - fmt.Fprintf(w, "[StartTs: %v] MOVED [$%d, K_%02d -> K_%02d]. Src:%+v. Dst: %+v\n", - assigned.Txn.StartTs, amount, src.Key, dst.Key, src, dst) - return nil -} - -func (s *state) loop(dg *dgo.Dgraph, wg *sync.WaitGroup) { - defer wg.Done() - dur, err := time.ParseDuration(*dur) - if err != nil { - log.Fatal(err) - } - end := time.Now().Add(dur) - - var buf bytes.Buffer - for i := 0; ; i++ { - if i%*queryCheck == 0 { - if err := s.runTotal(dg); err != nil { - log.Printf("Error while runTotal: %v", err) - } - } - - buf.Reset() - err := s.runTransaction(dg, &buf) - if *verbose { - log.Printf("Final error: %v. %s", err, buf.String()) - } - if err != nil { - atomic.AddInt32(&s.aborts, 1) - } else { - r := atomic.AddInt32(&s.runs, 1) - if r%100 == 0 { - a := atomic.LoadInt32(&s.aborts) - fmt.Printf("Runs: %d. Aborts: %d\n", r, a) - } - if time.Now().After(end) { - return - } - } - } -} - -type authorizationCredentials struct { - token string -} - -func (a *authorizationCredentials) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { - return map[string]string{"Authorization": a.token}, nil -} - -func (a *authorizationCredentials) RequireTransportSecurity() bool { - return true -} - -func grpcConnection(one string) (*grpc.ClientConn, error) { - if slashToken == nil || *slashToken == "" { - return grpc.Dial(one, grpc.WithTransportCredentials(insecure.NewCredentials())) - } - pool, err := x509.SystemCertPool() - if err != nil { - return nil, err - } - return grpc.Dial( - one, - grpc.WithTransportCredentials(credentials.NewTLS(&tls.Config{ - RootCAs: pool, - ServerName: strings.Split(one, ":")[0], - })), - grpc.WithPerRPCCredentials(&authorizationCredentials{*slashToken}), - ) -} - -func main() { - flag.Parse() - go func() { - log.Printf("Listening for /debug HTTP requests at address: %v\n", *debugHttp) - log.Fatal(http.ListenAndServe(*debugHttp, nil)) - }() - - all := strings.Split(*alpha, ",") - x.AssertTrue(len(all) > 0) - - var clients []*dgo.Dgraph - for _, one := range all { - conn, err := grpcConnection(one) - if err != nil { - log.Fatal(err) - } - dc := api.NewDgraphClient(conn) - dg := dgo.NewDgraphClient(dc) - if *login { - // login as groot to perform the DropAll operation later - x.Check(dg.Login(context.Background(), "groot", "password")) - } - clients = append(clients, dg) - } - - s := state{} - s.createAccounts(clients[0]) - - var wg sync.WaitGroup - for range *conc { - for _, dg := range clients { - wg.Add(1) - go s.loop(dg, &wg) - } - } - wg.Wait() - fmt.Println() - fmt.Println("Total aborts", s.aborts) - fmt.Println("Total success", s.runs) - if err := s.runTotal(clients[0]); err != nil { - log.Fatal(err) - } -} diff --git a/contrib/integration/bigdata/main.go b/contrib/integration/bigdata/main.go deleted file mode 100644 index 751361f6fed..00000000000 --- a/contrib/integration/bigdata/main.go +++ /dev/null @@ -1,243 +0,0 @@ -/* - * SPDX-FileCopyrightText: © Hypermode Inc. - * SPDX-License-Identifier: Apache-2.0 - */ - -package main - -import ( - "context" - "encoding/json" - "flag" - "fmt" - "math/rand" - "net/url" - "os" - "strings" - "sync/atomic" - "time" - - "google.golang.org/grpc" - "google.golang.org/grpc/credentials/insecure" - - "github.com/dgraph-io/dgo/v240" - "github.com/dgraph-io/dgo/v240/protos/api" - "github.com/hypermodeinc/dgraph/v24/x" -) - -var addrs = flag.String("addrs", "", "comma separated dgraph addresses") -var mode = flag.String("mode", "", "mode to run in ('mutate' or 'query')") -var conc = flag.Int("j", 1, "number of operations to run in parallel") - -func init() { - rand.Seed(time.Now().Unix()) -} - -var ctx = context.Background() - -func main() { - flag.Parse() - c := makeClient() - - // Install the schema automatically on the first run. This allows the same - // command to be used when running this program for the first and - // subsequent times. We guess if it's the first run based on the number of - // schema items. - resp, err := c.NewTxn().Query(ctx, "schema {}") - x.Check(err) - if len(resp.Json) < 5 { - // Run each schema alter separately so that there is an even - // distribution among all groups. - for _, s := range schema() { - x.Check(c.Alter(ctx, &api.Operation{ - Schema: s, - })) - } - x.Check2(c.NewTxn().Mutate(ctx, &api.Mutation{ - CommitNow: true, - SetNquads: []byte(initialData()), - })) - } - - switch *mode { - case "mutate": - var errCount int64 - var mutateCount int64 - for range *conc { - go func() { - for { - err := mutate(c) - if err == nil { - atomic.AddInt64(&mutateCount, 1) - } else { - atomic.AddInt64(&errCount, 1) - } - } - }() - } - for { - time.Sleep(time.Second) - fmt.Printf("Status: success_mutations=%d errors=%d\n", - atomic.LoadInt64(&mutateCount), atomic.LoadInt64(&errCount)) - } - case "query": - var errCount int64 - var queryCount int64 - for range *conc { - go func() { - for { - err := showNode(c) - if err == nil { - atomic.AddInt64(&queryCount, 1) - } else { - atomic.AddInt64(&errCount, 1) - } - } - }() - } - for { - time.Sleep(time.Second) - fmt.Printf("Status: success_queries=%d errors=%d\n", - atomic.LoadInt64(&queryCount), atomic.LoadInt64(&errCount)) - } - default: - fmt.Printf("unknown mode: %q\n", *mode) - os.Exit(1) - } -} - -func schema() []string { - s := []string{"xid: string @index(exact) .\n"} - for char := 'a'; char <= 'z'; char++ { - s = append(s, fmt.Sprintf("count_%c: int .\n", char)) - } - for char := 'a'; char <= 'z'; char++ { - s = append(s, fmt.Sprintf("attr_%c: string .\n", char)) - } - return s -} - -func initialData() string { - rdfs := "_:root \"root\" .\n" - for char := 'a'; char <= 'z'; char++ { - rdfs += fmt.Sprintf("_:root \"0\" .\n", char) - } - return rdfs -} - -func makeClient() *dgo.Dgraph { - var dgcs []api.DgraphClient - for _, addr := range strings.Split(*addrs, ",") { - c, err := grpc.Dial(addr, grpc.WithTransportCredentials(insecure.NewCredentials())) - x.Check(err) - dgcs = append(dgcs, api.NewDgraphClient(c)) - } - return dgo.NewDgraphClient(dgcs...) -} - -type runner struct { - txn *dgo.Txn -} - -func mutate(c *dgo.Dgraph) error { - r := &runner{ - txn: c.NewTxn(), - } - defer func() { _ = r.txn.Discard(ctx) }() - - char := 'a' + rune(rand.Intn(26)) - - var result struct { - Q []struct { - Uid *string - Count *int - } - } - if err := r.query(&result, ` - { - q(func: eq(xid, "root")) { - uid - count: count_%c - } - } - `, char); err != nil { - return err - } - - x.AssertTrue(len(result.Q) > 0 && result.Q[0].Count != nil && result.Q[0].Uid != nil) - - if _, err := r.txn.Mutate(ctx, &api.Mutation{ - SetNquads: []byte(fmt.Sprintf("<%s> \"%d\" .\n", - *result.Q[0].Uid, char, *result.Q[0].Count+1)), - }); err != nil { - return err - } - - rdfs := fmt.Sprintf("_:node \"%c_%d\" .\n", char, *result.Q[0].Count) - for char := 'a'; char <= 'z'; char++ { - if rand.Float64() < 0.9 { - continue - } - payload := make([]byte, 16+rand.Intn(16)) - if _, err := rand.Read(payload); err != nil { - return err - } - rdfs += fmt.Sprintf("_:node \"%s\" .\n", char, url.QueryEscape(string(payload))) - } - if _, err := r.txn.Mutate(ctx, &api.Mutation{ - SetNquads: []byte(rdfs), - }); err != nil { - return err - } - - return r.txn.Commit(ctx) -} - -func showNode(c *dgo.Dgraph) error { - r := &runner{ - txn: c.NewTxn(), - } - defer func() { _ = r.txn.Discard(ctx) }() - - char := 'a' + rune(rand.Intn(26)) - var result struct { - Q []struct { - Count *int - } - } - - q := fmt.Sprintf(` - { - q(func: eq(xid, "root")) { - uid - count: count_%c - } - } - `, char) - resp, err := r.txn.Query(ctx, q) - if err != nil { - return err - } - if err := json.Unmarshal(resp.Json, &result); err != nil { - return err - } - x.AssertTruef(len(result.Q) > 0 && result.Q[0].Count != nil, "%v %+v", string(resp.Json), result) - - var m map[string]interface{} - return r.query(&m, ` - { - q(func: eq(xid, "%c_%d")) { - expand(_all_) - } - } - `, char, rand.Intn(*result.Q[0].Count)) -} - -func (r *runner) query(out interface{}, q string, args ...interface{}) error { - q = fmt.Sprintf(q, args...) - resp, err := r.txn.Query(ctx, q) - if err != nil { - return err - } - return json.Unmarshal(resp.Json, out) -} diff --git a/contrib/integration/mutates/.gitignore b/contrib/integration/mutates/.gitignore deleted file mode 100644 index 9b86bcbdc28..00000000000 --- a/contrib/integration/mutates/.gitignore +++ /dev/null @@ -1 +0,0 @@ -/mutates diff --git a/contrib/integration/mutates/main.go b/contrib/integration/mutates/main.go deleted file mode 100644 index c529833b7d3..00000000000 --- a/contrib/integration/mutates/main.go +++ /dev/null @@ -1,110 +0,0 @@ -/* - * SPDX-FileCopyrightText: © Hypermode Inc. - * SPDX-License-Identifier: Apache-2.0 - */ - -package main - -import ( - "bytes" - "context" - "flag" - "fmt" - "log" - - "google.golang.org/grpc" - "google.golang.org/grpc/credentials/insecure" - - "github.com/dgraph-io/dgo/v240" - "github.com/dgraph-io/dgo/v240/protos/api" - "github.com/hypermodeinc/dgraph/v24/x" -) - -var alpha = flag.String("alpha", "localhost:9080", "Dgraph alpha addr") -var insert = flag.Bool("add", false, "Insert") - -func main() { - flag.Parse() - - // Setup dgraph client - ctx := context.Background() - conn, err := grpc.Dial(*alpha, grpc.WithTransportCredentials(insecure.NewCredentials())) - if err != nil { - log.Fatal(err) - } - pc := api.NewDgraphClient(conn) - c := dgo.NewDgraphClient(pc) - err = c.Login(ctx, "groot", "password") - x.Check(err) - - // Ingest - if *insert { - testInsert3Quads(ctx, c) - } else { - testQuery3Quads(ctx, c) - } -} - -func testInsert3Quads(ctx context.Context, c *dgo.Dgraph) { - // Set schema - op := &api.Operation{} - op.Schema = `name: string @index(fulltext) .` - x.Check(c.Alter(ctx, op)) - - txn := c.NewTxn() - - mu := &api.Mutation{} - quad := &api.NQuad{ - Subject: "200", - Predicate: "name", - ObjectValue: &api.Value{Val: &api.Value_StrVal{StrVal: "ok 200"}}, - } - mu.Set = []*api.NQuad{quad} - _, err := txn.Mutate(ctx, mu) - if err != nil { - log.Fatalf("Error while running mutation: %v\n", err) - } - - mu = &api.Mutation{} - quad = &api.NQuad{ - Subject: "300", - Predicate: "name", - ObjectValue: &api.Value{Val: &api.Value_StrVal{StrVal: "ok 300"}}, - } - mu.Set = []*api.NQuad{quad} - // mu.SetNquads = []byte(`<300> "ok 300" .`) - _, err = txn.Mutate(ctx, mu) - if err != nil { - log.Fatalf("Error while running mutation: %v\n", err) - } - - mu = &api.Mutation{} - quad = &api.NQuad{ - Subject: "400", - Predicate: "name", - ObjectValue: &api.Value{Val: &api.Value_StrVal{StrVal: "ok 400"}}, - } - mu.Set = []*api.NQuad{quad} - // mu.SetNquads = []byte(`<400> "ok 400" .`) - _, err = txn.Mutate(ctx, mu) - if err != nil { - log.Fatalf("Error while running mutation: %v\n", err) - } - - x.Check(txn.Commit(ctx)) - fmt.Println("Commit OK") -} - -func testQuery3Quads(ctx context.Context, c *dgo.Dgraph) { - txn := c.NewTxn() - q := `{ me(func: uid(200, 300, 400)) { name }}` - resp, err := txn.Query(ctx, q) - if err != nil { - log.Fatalf("Error while running query: %v\n", err) - } - fmt.Printf("Response JSON: %q\n", resp.Json) - x.AssertTrue(bytes.Equal(resp.Json, []byte( - "{\"me\":[{\"name\":\"ok 200\"},{\"name\":\"ok 300\"},{\"name\":\"ok 400\"}]}"))) - x.AssertTrue(resp.Txn.StartTs > 0) - x.Check(txn.Commit(ctx)) -} diff --git a/contrib/integration/swap/.gitignore b/contrib/integration/swap/.gitignore deleted file mode 100644 index b7a2088ff42..00000000000 --- a/contrib/integration/swap/.gitignore +++ /dev/null @@ -1 +0,0 @@ -/swap diff --git a/contrib/integration/swap/main.go b/contrib/integration/swap/main.go deleted file mode 100644 index 1469e2dbf76..00000000000 --- a/contrib/integration/swap/main.go +++ /dev/null @@ -1,380 +0,0 @@ -/* - * SPDX-FileCopyrightText: © Hypermode Inc. - * SPDX-License-Identifier: Apache-2.0 - */ - -package main - -import ( - "context" - "encoding/json" - "flag" - "fmt" - "math/rand" - "reflect" - "sort" - "strings" - "sync/atomic" - "time" - - "github.com/pkg/errors" - - "github.com/dgraph-io/dgo/v240" - "github.com/dgraph-io/dgo/v240/protos/api" - "github.com/hypermodeinc/dgraph/v24/testutil" - "github.com/hypermodeinc/dgraph/v24/x" -) - -var ( - alpha = flag.String("alpha", "localhost:9180", "Dgraph alpha address") - timeout = flag.Int("timeout", 60, "query/mutation timeout") - numSents = flag.Int("sentences", 100, "number of sentences") - numSwaps = flag.Int("swaps", 1000, "number of swaps to attempt") - concurr = flag.Int("concurrency", 10, "number of concurrent swaps to run concurrently") - invPerSec = flag.Int("inv", 10, "number of times to check invariants per second") -) - -var ( - successCount uint64 - failCount uint64 - invChecks uint64 -) - -func main() { - flag.Parse() - - sents := createSentences(*numSents) - sort.Strings(sents) - wordCount := make(map[string]int) - for _, s := range sents { - words := strings.Split(s, " ") - for _, w := range words { - wordCount[w]++ - } - } - type wc struct { - word string - count int - } - var wcs []wc - for w, c := range wordCount { - wcs = append(wcs, wc{w, c}) - } - sort.Slice(wcs, func(i, j int) bool { - wi := wcs[i] - wj := wcs[j] - return wi.word < wj.word - }) - for _, w := range wcs { - fmt.Printf("%15s: %3d\n", w.word, w.count) - } - - c, err := testutil.DgraphClientWithGroot(*alpha) - x.Check(err) - uids := setup(c, sents) - - // Check invariants before doing any mutations as a sanity check. - x.Check(checkInvariants(c, uids, sents)) - - go func() { - ticker := time.NewTicker(time.Second / time.Duration(*invPerSec)) - for range ticker.C { - for { - if err := checkInvariants(c, uids, sents); err == nil { - break - } else { - fmt.Printf("Error while running inv: %v\n", err) - } - } - atomic.AddUint64(&invChecks, 1) - } - }() - - done := make(chan struct{}) - go func() { - pending := make(chan struct{}, *concurr) - for range *numSwaps { - pending <- struct{}{} - go func() { - swapSentences(c, - uids[rand.Intn(len(uids))], - uids[rand.Intn(len(uids))], - ) - <-pending - }() - } - for range *concurr { - pending <- struct{}{} - } - close(done) - }() - - for { - select { - case <-time.After(time.Second): - fmt.Printf("Success:%d Fail:%d Check:%d\n", - atomic.LoadUint64(&successCount), - atomic.LoadUint64(&failCount), - atomic.LoadUint64(&invChecks), - ) - case <-done: - // One final check for invariants. - x.Check(checkInvariants(c, uids, sents)) - return - } - } - -} - -func getNextWord(index int) string { - // check if index is in the range of words - if index < 0 || index >= len(words) { - x.Fatalf("invalid index for getting next word: %d", index) - } - return words[index] -} - -func createSentences(n int) []string { - var wordIndex int - sents := make([]string, n) - for i := range sents { - sents[i] = getNextWord(wordIndex) - wordIndex++ - } - - // add trailing words -- some will be common between sentences - same := 2 - for { - var w string - var count int - for i := range sents { - if i%same == 0 { - w = getNextWord(wordIndex) - wordIndex++ - count++ - } - sents[i] += " " + w - } - if count == 1 { - // Every sentence got the same trailing word, no point going any further. Sort the - // words within each sentence. - for i, one := range sents { - splits := strings.Split(one, " ") - sort.Strings(splits) - sents[i] = strings.Join(splits, " ") - } - return sents - } - same *= 2 - } -} - -func setup(c *dgo.Dgraph, sentences []string) []string { - ctx, cancel := context.WithTimeout(context.Background(), time.Duration(*timeout)*time.Second) - defer cancel() - x.Check(c.Alter(ctx, &api.Operation{ - DropAll: true, - })) - x.Check(c.Alter(ctx, &api.Operation{ - Schema: `sentence: string @index(term) .`, - })) - - rdfs := "" - for i, s := range sentences { - rdfs += fmt.Sprintf("_:s%d %q .\n", i, s) - } - txn := c.NewTxn() - defer func() { - if err := txn.Discard(ctx); err != nil { - fmt.Printf("Discarding transaction failed: %+v\n", err) - } - }() - - assigned, err := txn.Mutate(ctx, &api.Mutation{ - SetNquads: []byte(rdfs), - }) - x.Check(err) - x.Check(txn.Commit(ctx)) - - var uids []string - for _, uid := range assigned.GetUids() { - uids = append(uids, uid) - } - return uids -} - -func swapSentences(c *dgo.Dgraph, node1, node2 string) { - ctx, cancel := context.WithTimeout(context.Background(), time.Duration(*timeout)*time.Second) - defer cancel() - - txn := c.NewTxn() - defer func() { - if err := txn.Discard(ctx); err != nil { - fmt.Printf("Discarding transaction failed: %+v\n", err) - } - }() - - resp, err := txn.Query(ctx, fmt.Sprintf(` - { - node1(func: uid(%s)) { - sentence - } - node2(func: uid(%s)) { - sentence - } - } - `, node1, node2)) - x.Check(err) - - decode := struct { - Node1 []struct { - Sentence *string - } - Node2 []struct { - Sentence *string - } - }{} - err = json.Unmarshal(resp.GetJson(), &decode) - x.Check(err) - x.AssertTrue(len(decode.Node1) == 1) - x.AssertTrue(len(decode.Node2) == 1) - x.AssertTrue(decode.Node1[0].Sentence != nil) - x.AssertTrue(decode.Node2[0].Sentence != nil) - - // Delete sentences as an intermediate step. - delRDFs := fmt.Sprintf(` - <%s> %q . - <%s> %q . - `, - node1, *decode.Node1[0].Sentence, - node2, *decode.Node2[0].Sentence, - ) - if _, err := txn.Mutate(ctx, &api.Mutation{ - DelNquads: []byte(delRDFs), - }); err != nil { - atomic.AddUint64(&failCount, 1) - return - } - - // Add garbage data as an intermediate step. - garbageRDFs := fmt.Sprintf(` - <%s> "...garbage..." . - <%s> "...garbage..." . - `, - node1, node2, - ) - if _, err := txn.Mutate(ctx, &api.Mutation{ - SetNquads: []byte(garbageRDFs), - }); err != nil { - atomic.AddUint64(&failCount, 1) - return - } - - // Perform swap. - rdfs := fmt.Sprintf(` - <%s> %q . - <%s> %q . - `, - node1, *decode.Node2[0].Sentence, - node2, *decode.Node1[0].Sentence, - ) - if _, err := txn.Mutate(ctx, &api.Mutation{ - SetNquads: []byte(rdfs), - }); err != nil { - atomic.AddUint64(&failCount, 1) - return - } - if err := txn.Commit(ctx); err != nil { - atomic.AddUint64(&failCount, 1) - return - } - atomic.AddUint64(&successCount, 1) -} - -func checkInvariants(c *dgo.Dgraph, uids []string, sentences []string) error { - ctx, cancel := context.WithTimeout(context.Background(), time.Duration(*timeout)*time.Second) - defer cancel() - - // Get the sentence for each node. Then build (in memory) a term index. - // Then we can query dgraph for each term, and make sure the posting list - // is the same. - - txn := c.NewTxn() - uidList := strings.Join(uids, ",") - resp, err := txn.Query(ctx, fmt.Sprintf(` - { - q(func: uid(%s)) { - sentence - uid - } - } - `, uidList)) - if err != nil { - return err - } - decode := struct { - Q []struct { - Sentence *string - Uid *string - } - }{} - x.Check(json.Unmarshal(resp.GetJson(), &decode)) - x.AssertTrue(len(decode.Q) == len(sentences)) - - index := map[string][]string{} // term to uid list - var gotSentences []string - for _, node := range decode.Q { - x.AssertTrue(node.Sentence != nil) - x.AssertTrue(node.Uid != nil) - gotSentences = append(gotSentences, *node.Sentence) - for _, word := range strings.Split(*node.Sentence, " ") { - index[word] = append(index[word], *node.Uid) - } - } - sort.Strings(gotSentences) - for i := range sentences { - if sentences[i] != gotSentences[i] { - fmt.Printf("Sentence doesn't match. Wanted: %q. Got: %q\n", sentences[i], gotSentences[i]) - fmt.Printf("All sentences: %v\n", sentences) - fmt.Printf("Got sentences: %v\n", gotSentences) - x.AssertTrue(false) - } - } - - for word, uids := range index { - q := fmt.Sprintf(` - { - q(func: anyofterms(sentence, %q)) { - uid - } - } - `, word) - - resp, err := txn.Query(ctx, q) - if err != nil { - return err - } - decode := struct { - Q []struct { - Uid *string - } - }{} - x.Check(json.Unmarshal(resp.GetJson(), &decode)) - var gotUids []string - for _, node := range decode.Q { - x.AssertTrue(node.Uid != nil) - gotUids = append(gotUids, *node.Uid) - } - - sort.Strings(gotUids) - sort.Strings(uids) - if !reflect.DeepEqual(gotUids, uids) { - x.Panic(errors.Errorf(`query: %s\n - Uids in index for %q didn't match - calculated: %v. Len: %d - got: %v - `, q, word, uids, len(uids), gotUids)) - } - } - return nil -} diff --git a/contrib/integration/swap/words.go b/contrib/integration/swap/words.go deleted file mode 100644 index 5cacde9c7a2..00000000000 --- a/contrib/integration/swap/words.go +++ /dev/null @@ -1,2252 +0,0 @@ -/* - * SPDX-FileCopyrightText: © Hypermode Inc. - * SPDX-License-Identifier: Apache-2.0 - */ - -package main - -var words = []string{ - "information", - "available", - "copyright", - "university", - "management", - "international", - "development", - "education", - "community", - "technology", - "following", - "resources", - "including", - "directory", - "government", - "department", - "description", - "insurance", - "different", - "categories", - "conditions", - "accessories", - "september", - "questions", - "application", - "financial", - "equipment", - "performance", - "experience", - "important", - "activities", - "additional", - "something", - "professional", - "committee", - "washington", - "california", - "reference", - "companies", - "computers", - "president", - "australia", - "discussion", - "entertainment", - "agreement", - "marketing", - "association", - "collection", - "solutions", - "electronics", - "technical", - "microsoft", - "conference", - "environment", - "statement", - "downloads", - "applications", - "requirements", - "individual", - "subscribe", - "everything", - "production", - "commercial", - "advertising", - "treatment", - "newsletter", - "knowledge", - "currently", - "construction", - "registered", - "protection", - "engineering", - "published", - "corporate", - "customers", - "materials", - "countries", - "standards", - "political", - "advertise", - "environmental", - "availability", - "employment", - "commission", - "administration", - "institute", - "sponsored", - "electronic", - "condition", - "effective", - "organization", - "selection", - "corporation", - "executive", - "necessary", - "according", - "particular", - "facilities", - "opportunities", - "appropriate", - "statistics", - "investment", - "christmas", - "registration", - "furniture", - "wednesday", - "structure", - "distribution", - "industrial", - "potential", - "responsible", - "communications", - "associated", - "foundation", - "documents", - "communication", - "independent", - "operating", - "developed", - "telephone", - "population", - "navigation", - "operations", - "therefore", - "christian", - "understand", - "publications", - "worldwide", - "connection", - "publisher", - "introduction", - "properties", - "accommodation", - "excellent", - "opportunity", - "assessment", - "especially", - "interface", - "operation", - "restaurants", - "beautiful", - "locations", - "significant", - "technologies", - "manufacturer", - "providing", - "authority", - "considered", - "programme", - "enterprise", - "educational", - "employees", - "alternative", - "processing", - "responsibility", - "resolution", - "publication", - "relations", - "photography", - "components", - "assistance", - "completed", - "organizations", - "otherwise", - "transportation", - "disclaimer", - "membership", - "recommended", - "background", - "character", - "maintenance", - "functions", - "trademarks", - "phentermine", - "submitted", - "television", - "interested", - "throughout", - "established", - "programming", - "regarding", - "instructions", - "increased", - "understanding", - "beginning", - "associates", - "instruments", - "businesses", - "specified", - "restaurant", - "procedures", - "relationship", - "traditional", - "sometimes", - "themselves", - "transport", - "interesting", - "evaluation", - "implementation", - "galleries", - "references", - "presented", - "literature", - "respective", - "definition", - "secretary", - "networking", - "australian", - "magazines", - "francisco", - "individuals", - "guidelines", - "installation", - "described", - "attention", - "difference", - "regulations", - "certificate", - "directions", - "documentation", - "automotive", - "successful", - "communities", - "situation", - "publishing", - "emergency", - "developing", - "determine", - "temperature", - "announcements", - "historical", - "ringtones", - "difficult", - "scientific", - "satellite", - "particularly", - "functional", - "monitoring", - "architecture", - "recommend", - "dictionary", - "accounting", - "manufacturing", - "professor", - "generally", - "continued", - "techniques", - "permission", - "generation", - "component", - "guarantee", - "processes", - "interests", - "paperback", - "classifieds", - "supported", - "competition", - "providers", - "characters", - "thousands", - "apartments", - "generated", - "administrative", - "practices", - "reporting", - "essential", - "affiliate", - "immediately", - "designated", - "integrated", - "configuration", - "comprehensive", - "universal", - "presentation", - "languages", - "compliance", - "improvement", - "pennsylvania", - "challenge", - "acceptance", - "strategies", - "affiliates", - "multimedia", - "certified", - "computing", - "interactive", - "procedure", - "leadership", - "religious", - "breakfast", - "developer", - "approximately", - "recommendations", - "comparison", - "automatically", - "minnesota", - "adventure", - "institutions", - "assistant", - "advertisement", - "headlines", - "yesterday", - "determined", - "wholesale", - "extension", - "statements", - "completely", - "electrical", - "applicable", - "manufacturers", - "classical", - "dedicated", - "direction", - "basketball", - "wisconsin", - "personnel", - "identified", - "professionals", - "advantage", - "newsletters", - "estimated", - "anonymous", - "miscellaneous", - "integration", - "interview", - "framework", - "installed", - "massachusetts", - "associate", - "frequently", - "discussions", - "laboratory", - "destination", - "intelligence", - "specifications", - "tripadvisor", - "residential", - "decisions", - "industries", - "partnership", - "editorial", - "expression", - "provisions", - "principles", - "suggestions", - "replacement", - "strategic", - "economics", - "compatible", - "apartment", - "netherlands", - "consulting", - "recreation", - "participants", - "favorites", - "translation", - "estimates", - "protected", - "philadelphia", - "officials", - "contained", - "legislation", - "parameters", - "relationships", - "tennessee", - "representative", - "frequency", - "introduced", - "departments", - "residents", - "displayed", - "performed", - "administrator", - "addresses", - "permanent", - "agriculture", - "constitutes", - "portfolio", - "practical", - "delivered", - "collectibles", - "infrastructure", - "exclusive", - "originally", - "utilities", - "philosophy", - "regulation", - "reduction", - "nutrition", - "recording", - "secondary", - "wonderful", - "announced", - "prevention", - "mentioned", - "automatic", - "healthcare", - "maintained", - "increasing", - "connected", - "directors", - "participation", - "containing", - "combination", - "amendment", - "guaranteed", - "libraries", - "distributed", - "singapore", - "enterprises", - "convention", - "principal", - "certification", - "previously", - "buildings", - "household", - "batteries", - "positions", - "subscription", - "contemporary", - "panasonic", - "permalink", - "signature", - "provision", - "certainly", - "newspaper", - "liability", - "trademark", - "trackback", - "americans", - "promotion", - "conversion", - "reasonable", - "broadband", - "influence", - "importance", - "webmaster", - "prescription", - "specifically", - "represent", - "conservation", - "louisiana", - "javascript", - "marketplace", - "evolution", - "certificates", - "objectives", - "suggested", - "concerned", - "structures", - "encyclopedia", - "continuing", - "interracial", - "competitive", - "suppliers", - "preparation", - "receiving", - "accordance", - "discussed", - "elizabeth", - "reservations", - "playstation", - "instruction", - "annotation", - "differences", - "establish", - "expressed", - "paragraph", - "mathematics", - "compensation", - "conducted", - "percentage", - "mississippi", - "requested", - "connecticut", - "personals", - "immediate", - "agricultural", - "supporting", - "collections", - "participate", - "specialist", - "experienced", - "investigation", - "institution", - "searching", - "proceedings", - "transmission", - "characteristics", - "experiences", - "extremely", - "verzeichnis", - "contracts", - "concerning", - "developers", - "equivalent", - "chemistry", - "neighborhood", - "variables", - "continues", - "curriculum", - "psychology", - "responses", - "circumstances", - "identification", - "appliances", - "elementary", - "unlimited", - "printable", - "enforcement", - "hardcover", - "celebrity", - "chocolate", - "hampshire", - "bluetooth", - "controlled", - "requirement", - "authorities", - "representatives", - "pregnancy", - "biography", - "attractions", - "transactions", - "authorized", - "retirement", - "financing", - "efficiency", - "efficient", - "commitment", - "specialty", - "interviews", - "qualified", - "discovery", - "classified", - "confidence", - "lifestyle", - "consistent", - "clearance", - "connections", - "inventory", - "converter", - "organisation", - "objective", - "indicated", - "securities", - "volunteer", - "democratic", - "switzerland", - "parameter", - "processor", - "dimensions", - "contribute", - "challenges", - "recognition", - "submission", - "encourage", - "regulatory", - "inspection", - "consumers", - "territory", - "transaction", - "manchester", - "contributions", - "continuous", - "resulting", - "cambridge", - "initiative", - "execution", - "disability", - "increases", - "contractor", - "examination", - "indicates", - "committed", - "extensive", - "affordable", - "candidate", - "databases", - "outstanding", - "perspective", - "messenger", - "tournament", - "consideration", - "discounts", - "catalogue", - "publishers", - "caribbean", - "reservation", - "remaining", - "depending", - "expansion", - "purchased", - "performing", - "collected", - "absolutely", - "featuring", - "implement", - "scheduled", - "calculator", - "significantly", - "temporary", - "sufficient", - "awareness", - "vancouver", - "contribution", - "measurement", - "constitution", - "packaging", - "consultation", - "northwest", - "classroom", - "democracy", - "wallpaper", - "merchandise", - "resistance", - "baltimore", - "candidates", - "charlotte", - "biological", - "transition", - "preferences", - "instrument", - "classification", - "physician", - "hollywood", - "wikipedia", - "spiritual", - "photographs", - "relatively", - "satisfaction", - "represents", - "pittsburgh", - "preferred", - "intellectual", - "comfortable", - "interaction", - "listening", - "effectively", - "experimental", - "revolution", - "consolidation", - "landscape", - "dependent", - "mechanical", - "consultants", - "applicant", - "cooperation", - "acquisition", - "implemented", - "directories", - "recognized", - "notification", - "licensing", - "textbooks", - "diversity", - "cleveland", - "investments", - "accessibility", - "sensitive", - "templates", - "completion", - "universities", - "technique", - "contractors", - "subscriptions", - "calculate", - "alexander", - "broadcast", - "converted", - "anniversary", - "improvements", - "specification", - "accessible", - "accessory", - "typically", - "representation", - "arrangements", - "conferences", - "uniprotkb", - "consumption", - "birmingham", - "afternoon", - "consultant", - "controller", - "ownership", - "committees", - "legislative", - "researchers", - "unsubscribe", - "molecular", - "residence", - "attorneys", - "operators", - "sustainable", - "philippines", - "statistical", - "innovation", - "employers", - "definitions", - "elections", - "stainless", - "newspapers", - "hospitals", - "exception", - "successfully", - "indonesia", - "primarily", - "capabilities", - "recommendation", - "recruitment", - "organized", - "improving", - "expensive", - "organisations", - "explained", - "programmes", - "expertise", - "mechanism", - "jewellery", - "eventually", - "agreements", - "considering", - "innovative", - "conclusion", - "disorders", - "collaboration", - "detection", - "formation", - "engineers", - "proposals", - "moderator", - "tutorials", - "settlement", - "collectables", - "fantastic", - "governments", - "purchasing", - "appointed", - "operational", - "corresponding", - "descriptions", - "determination", - "animation", - "productions", - "telecommunications", - "instructor", - "approaches", - "highlights", - "designers", - "melbourne", - "scientists", - "blackjack", - "argentina", - "possibility", - "commissioner", - "dangerous", - "reliability", - "unfortunately", - "respectively", - "volunteers", - "attachment", - "appointment", - "workshops", - "hurricane", - "represented", - "mortgages", - "responsibilities", - "carefully", - "productivity", - "investors", - "underground", - "diagnosis", - "principle", - "vacations", - "calculated", - "appearance", - "incorporated", - "notebooks", - "algorithm", - "valentine", - "involving", - "investing", - "christopher", - "admission", - "terrorism", - "parliament", - "situations", - "allocated", - "corrections", - "structural", - "municipal", - "describes", - "disabilities", - "substance", - "prohibited", - "addressed", - "simulation", - "initiatives", - "concentration", - "interpretation", - "bankruptcy", - "optimization", - "substances", - "discovered", - "restrictions", - "participating", - "exhibition", - "composition", - "nationwide", - "definitely", - "existence", - "commentary", - "limousines", - "developments", - "immigration", - "destinations", - "necessarily", - "attribute", - "apparently", - "surrounding", - "mountains", - "popularity", - "postposted", - "coordinator", - "obviously", - "fundamental", - "substantial", - "progressive", - "championship", - "sacramento", - "impossible", - "depression", - "testimonials", - "memorabilia", - "cartridge", - "explanation", - "cincinnati", - "subsection", - "electricity", - "permitted", - "workplace", - "confirmed", - "wallpapers", - "infection", - "eligibility", - "involvement", - "placement", - "observations", - "vbulletin", - "subsequent", - "motorcycle", - "disclosure", - "establishment", - "presentations", - "undergraduate", - "occupation", - "donations", - "associations", - "citysearch", - "radiation", - "seriously", - "elsewhere", - "pollution", - "conservative", - "guestbook", - "effectiveness", - "demonstrate", - "atmosphere", - "experiment", - "purchases", - "federation", - "assignment", - "chemicals", - "everybody", - "nashville", - "counseling", - "acceptable", - "satisfied", - "measurements", - "milwaukee", - "medication", - "warehouse", - "shareware", - "violation", - "configure", - "stability", - "southwest", - "institutional", - "expectations", - "independence", - "metabolism", - "personally", - "excellence", - "somewhere", - "attributes", - "recognize", - "screening", - "thumbnail", - "forgotten", - "intelligent", - "edinburgh", - "obligation", - "regardless", - "restricted", - "republican", - "merchants", - "attendance", - "arguments", - "amsterdam", - "adventures", - "announcement", - "appreciate", - "regularly", - "mechanisms", - "customize", - "tradition", - "indicators", - "emissions", - "physicians", - "complaint", - "experiments", - "afghanistan", - "scholarship", - "governance", - "supplements", - "camcorder", - "implementing", - "ourselves", - "conversation", - "capability", - "producing", - "precision", - "contributed", - "reproduction", - "ingredients", - "franchise", - "complaints", - "promotions", - "rehabilitation", - "maintaining", - "environments", - "reception", - "correctly", - "consequences", - "geography", - "appearing", - "integrity", - "discrimination", - "processed", - "implications", - "functionality", - "intermediate", - "emotional", - "platforms", - "overnight", - "geographic", - "preliminary", - "districts", - "introduce", - "promotional", - "chevrolet", - "specialists", - "generator", - "suspension", - "correction", - "authentication", - "communicate", - "supplement", - "showtimes", - "promoting", - "machinery", - "bandwidth", - "probability", - "dimension", - "schedules", - "admissions", - "quarterly", - "illustrated", - "continental", - "alternate", - "achievement", - "limitations", - "automated", - "passenger", - "convenient", - "orientation", - "childhood", - "flexibility", - "jurisdiction", - "displaying", - "encouraged", - "cartridges", - "declaration", - "automation", - "advantages", - "preparing", - "recipient", - "extensions", - "athletics", - "southeast", - "alternatives", - "determining", - "personalized", - "conditioning", - "partnerships", - "destruction", - "increasingly", - "migration", - "basically", - "conventional", - "applicants", - "occupational", - "adjustment", - "treatments", - "camcorders", - "difficulty", - "collective", - "coalition", - "enrollment", - "producers", - "collector", - "interfaces", - "advertisers", - "representing", - "observation", - "restoration", - "convenience", - "returning", - "opposition", - "container", - "defendant", - "confirmation", - "supervisor", - "peripherals", - "bestsellers", - "departure", - "minneapolis", - "interactions", - "intervention", - "attraction", - "modification", - "customized", - "understood", - "assurance", - "happening", - "amendments", - "metropolitan", - "compilation", - "verification", - "attractive", - "recordings", - "jefferson", - "gardening", - "obligations", - "orchestra", - "polyphonic", - "outsourcing", - "adjustable", - "allocation", - "discipline", - "demonstrated", - "identifying", - "alphabetical", - "dispatched", - "installing", - "voluntary", - "photographer", - "messaging", - "constructed", - "additions", - "requiring", - "engagement", - "refinance", - "calendars", - "arrangement", - "conclusions", - "bibliography", - "compatibility", - "furthermore", - "cooperative", - "measuring", - "jacksonville", - "headquarters", - "transfers", - "transformation", - "attachments", - "administrators", - "personality", - "facilitate", - "subscriber", - "priorities", - "bookstore", - "parenting", - "incredible", - "commonwealth", - "pharmaceutical", - "manhattan", - "workforce", - "organizational", - "portuguese", - "everywhere", - "discharge", - "halloween", - "hazardous", - "methodology", - "housewares", - "reputation", - "resistant", - "democrats", - "recycling", - "qualifications", - "slideshow", - "variation", - "transferred", - "photograph", - "distributor", - "underlying", - "wrestling", - "photoshop", - "gathering", - "projection", - "mathematical", - "specialized", - "diagnostic", - "indianapolis", - "corporations", - "criticism", - "automobile", - "confidential", - "statutory", - "accommodations", - "northeast", - "downloaded", - "paintings", - "injection", - "yorkshire", - "populations", - "protective", - "initially", - "indicator", - "eliminate", - "sunglasses", - "preference", - "threshold", - "venezuela", - "exploration", - "sequences", - "astronomy", - "translate", - "announces", - "compression", - "establishing", - "constitutional", - "perfectly", - "instantly", - "litigation", - "submissions", - "broadcasting", - "horizontal", - "terrorist", - "informational", - "ecommerce", - "suffering", - "prospective", - "ultimately", - "artificial", - "spectacular", - "coordination", - "connector", - "affiliated", - "activation", - "naturally", - "subscribers", - "mitsubishi", - "underwear", - "potentially", - "constraints", - "inclusive", - "dimensional", - "considerable", - "selecting", - "processors", - "pantyhose", - "difficulties", - "complexity", - "constantly", - "barcelona", - "presidential", - "documentary", - "territories", - "palestinian", - "legislature", - "hospitality", - "procurement", - "theoretical", - "exercises", - "surveillance", - "protocols", - "highlight", - "substitute", - "inclusion", - "hopefully", - "brilliant", - "evaluated", - "assignments", - "termination", - "households", - "authentic", - "montgomery", - "architectural", - "louisville", - "macintosh", - "movements", - "amenities", - "virtually", - "authorization", - "projector", - "comparative", - "psychological", - "surprised", - "genealogy", - "expenditure", - "liverpool", - "connectivity", - "algorithms", - "similarly", - "collaborative", - "excluding", - "commander", - "suggestion", - "spotlight", - "investigate", - "connecting", - "logistics", - "proportion", - "significance", - "symposium", - "essentials", - "protecting", - "transmitted", - "screenshots", - "intensive", - "switching", - "correspondence", - "supervision", - "expenditures", - "separation", - "testimony", - "celebrities", - "mandatory", - "boundaries", - "syndication", - "celebration", - "filtering", - "luxembourg", - "offensive", - "deployment", - "colleagues", - "separated", - "directive", - "governing", - "retailers", - "occasionally", - "attending", - "recruiting", - "instructional", - "traveling", - "permissions", - "biotechnology", - "prescribed", - "catherine", - "reproduced", - "calculation", - "consolidated", - "occasions", - "equations", - "exceptional", - "respondents", - "considerations", - "queensland", - "musicians", - "composite", - "unavailable", - "essentially", - "designing", - "assessments", - "brunswick", - "sensitivity", - "preservation", - "streaming", - "intensity", - "technological", - "syndicate", - "antivirus", - "addressing", - "discounted", - "bangladesh", - "constitute", - "concluded", - "desperate", - "demonstration", - "governmental", - "manufactured", - "graduation", - "variations", - "addiction", - "springfield", - "synthesis", - "undefined", - "unemployment", - "enhancement", - "newcastle", - "performances", - "societies", - "brazilian", - "identical", - "petroleum", - "norwegian", - "retention", - "exchanges", - "soundtrack", - "wondering", - "profession", - "separately", - "physiology", - "collecting", - "participant", - "scholarships", - "recreational", - "dominican", - "friendship", - "expanding", - "provincial", - "investigations", - "medications", - "rochester", - "advertiser", - "encryption", - "downloadable", - "sophisticated", - "possession", - "laboratories", - "vegetables", - "thumbnails", - "stockings", - "respondent", - "destroyed", - "manufacture", - "wordpress", - "vulnerability", - "accountability", - "celebrate", - "accredited", - "appliance", - "compressed", - "scheduling", - "perspectives", - "mortality", - "christians", - "therapeutic", - "impressive", - "accordingly", - "architect", - "challenging", - "microwave", - "accidents", - "relocation", - "contributors", - "violations", - "temperatures", - "competitions", - "discretion", - "cosmetics", - "repository", - "concentrations", - "christianity", - "negotiations", - "realistic", - "generating", - "christina", - "congressional", - "photographic", - "modifications", - "millennium", - "achieving", - "fisheries", - "exceptions", - "reactions", - "macromedia", - "companion", - "divisions", - "additionally", - "fellowship", - "victorian", - "copyrights", - "lithuania", - "mastercard", - "chronicles", - "obtaining", - "distribute", - "decorative", - "enlargement", - "campaigns", - "conjunction", - "instances", - "indigenous", - "validation", - "corruption", - "incentives", - "cholesterol", - "differential", - "scientist", - "starsmerchant", - "arthritis", - "nevertheless", - "practitioners", - "transcript", - "inflation", - "compounds", - "contracting", - "structured", - "reasonably", - "graduates", - "recommends", - "controlling", - "distributors", - "arlington", - "particles", - "extraordinary", - "indicating", - "coordinate", - "exclusively", - "limitation", - "widescreen", - "illustration", - "construct", - "inquiries", - "inspiration", - "affecting", - "downloading", - "aggregate", - "forecasts", - "complicated", - "shopzilla", - "decorating", - "expressions", - "shakespeare", - "connectors", - "conflicts", - "travelers", - "offerings", - "incorrect", - "furnishings", - "guatemala", - "perception", - "renaissance", - "pathology", - "ordinance", - "photographers", - "infections", - "configured", - "festivals", - "possibilities", - "contributing", - "analytical", - "circulation", - "assumption", - "jerusalem", - "transexuales", - "invention", - "technician", - "executives", - "enquiries", - "cognitive", - "exploring", - "registrar", - "supporters", - "withdrawal", - "predicted", - "saskatchewan", - "cancellation", - "ministers", - "veterinary", - "prostores", - "relevance", - "incentive", - "butterfly", - "mechanics", - "numerical", - "reflection", - "accompanied", - "invitation", - "princeton", - "spirituality", - "meanwhile", - "proprietary", - "childrens", - "thumbzilla", - "porcelain", - "pichunter", - "translated", - "columnists", - "consensus", - "delivering", - "journalism", - "intention", - "undertaken", - "statewide", - "semiconductor", - "illustrations", - "happiness", - "substantially", - "identifier", - "calculations", - "conducting", - "accomplished", - "calculators", - "impression", - "correlation", - "fragrance", - "neighbors", - "transparent", - "charleston", - "champions", - "selections", - "projectors", - "inappropriate", - "comparing", - "vocational", - "pharmacies", - "introducing", - "appreciated", - "albuquerque", - "distinguished", - "projected", - "assumptions", - "shareholders", - "developmental", - "regulated", - "anticipated", - "completing", - "comparable", - "confusion", - "copyrighted", - "warranties", - "documented", - "paperbacks", - "keyboards", - "vulnerable", - "reflected", - "respiratory", - "notifications", - "transexual", - "mainstream", - "evaluating", - "subcommittee", - "maternity", - "journalists", - "foundations", - "volleyball", - "liabilities", - "decreased", - "tolerance", - "creativity", - "describing", - "lightning", - "quotations", - "inspector", - "bookmarks", - "behavioral", - "riverside", - "bathrooms", - "abilities", - "initiated", - "nonprofit", - "lancaster", - "suspended", - "containers", - "attitudes", - "simultaneously", - "integrate", - "sociology", - "screenshot", - "exhibitions", - "confident", - "retrieved", - "officially", - "consortium", - "recipients", - "delicious", - "traditions", - "periodically", - "hungarian", - "referring", - "transform", - "educators", - "vegetable", - "humanities", - "independently", - "alignment", - "masturbating", - "henderson", - "britannica", - "competitors", - "visibility", - "consciousness", - "encounter", - "resolutions", - "accessing", - "attempted", - "witnesses", - "administered", - "strengthen", - "frederick", - "aggressive", - "advertisements", - "sublimedirectory", - "disturbed", - "determines", - "sculpture", - "motivation", - "pharmacology", - "passengers", - "quantities", - "petersburg", - "consistently", - "powerpoint", - "obituaries", - "punishment", - "appreciation", - "subsequently", - "providence", - "restriction", - "incorporate", - "backgrounds", - "treasurer", - "lightweight", - "transcription", - "complications", - "scripting", - "remembered", - "synthetic", - "testament", - "specifics", - "partially", - "wilderness", - "generations", - "tournaments", - "sponsorship", - "headphones", - "proceeding", - "volkswagen", - "uncertainty", - "breakdown", - "reconstruction", - "subsidiary", - "strengths", - "encouraging", - "furnished", - "terrorists", - "comparisons", - "beneficial", - "distributions", - "viewpicture", - "threatened", - "republicans", - "discusses", - "responded", - "abstracts", - "prediction", - "pharmaceuticals", - "thesaurus", - "individually", - "battlefield", - "literally", - "ecological", - "appraisal", - "consisting", - "submitting", - "citations", - "geographical", - "mozambique", - "disclaimers", - "championships", - "sheffield", - "finishing", - "wellington", - "prospects", - "bulgarian", - "aboriginal", - "remarkable", - "preventing", - "productive", - "boulevard", - "compliant", - "penalties", - "imagination", - "refurbished", - "activated", - "conferencing", - "armstrong", - "politicians", - "trackbacks", - "accommodate", - "christine", - "accepting", - "precipitation", - "isolation", - "sustained", - "approximate", - "programmer", - "greetings", - "inherited", - "incomplete", - "chronicle", - "legitimate", - "biographies", - "investigator", - "plaintiff", - "prisoners", - "mediterranean", - "nightlife", - "architects", - "entrepreneur", - "freelance", - "excessive", - "screensaver", - "valuation", - "unexpected", - "cigarette", - "characteristic", - "metallica", - "consequently", - "appointments", - "narrative", - "academics", - "quantitative", - "screensavers", - "subdivision", - "distinction", - "livestock", - "exemption", - "sustainability", - "formatting", - "nutritional", - "nicaragua", - "affiliation", - "relatives", - "satisfactory", - "revolutionary", - "bracelets", - "telephony", - "breathing", - "thickness", - "adjustments", - "graphical", - "discussing", - "aerospace", - "meaningful", - "maintains", - "shortcuts", - "voyeurweb", - "extending", - "specifies", - "accreditation", - "blackberry", - "meditation", - "microphone", - "macedonia", - "combining", - "instrumental", - "organizing", - "moderators", - "kazakhstan", - "standings", - "partition", - "invisible", - "translations", - "commodity", - "kilometers", - "thanksgiving", - "guarantees", - "indication", - "congratulations", - "cigarettes", - "controllers", - "consultancy", - "conventions", - "coordinates", - "responding", - "physically", - "stakeholders", - "hydrocodone", - "consecutive", - "attempting", - "representations", - "competing", - "peninsula", - "accurately", - "considers", - "ministries", - "vacancies", - "parliamentary", - "acknowledge", - "thoroughly", - "nottingham", - "identifies", - "questionnaire", - "qualification", - "modelling", - "miniature", - "interstate", - "consequence", - "systematic", - "perceived", - "madagascar", - "presenting", - "troubleshooting", - "uzbekistan", - "centuries", - "magnitude", - "richardson", - "fragrances", - "vocabulary", - "earthquake", - "fundraising", - "geological", - "assessing", - "introduces", - "webmasters", - "computational", - "acdbentity", - "participated", - "handhelds", - "answering", - "impressed", - "conspiracy", - "organizer", - "combinations", - "preceding", - "cumulative", - "amplifier", - "arbitrary", - "prominent", - "lexington", - "contacted", - "recorders", - "occasional", - "innovations", - "postcards", - "reviewing", - "explicitly", - "transsexual", - "citizenship", - "informative", - "girlfriend", - "bloomberg", - "hierarchy", - "influenced", - "abandoned", - "complement", - "mauritius", - "checklist", - "requesting", - "lauderdale", - "scenarios", - "extraction", - "elevation", - "utilization", - "beverages", - "calibration", - "efficiently", - "entertaining", - "prerequisite", - "hypothesis", - "medicines", - "regression", - "enhancements", - "renewable", - "intersection", - "passwords", - "consistency", - "collectors", - "azerbaijan", - "astrology", - "occurring", - "supplemental", - "travelling", - "induction", - "precisely", - "spreading", - "provinces", - "widespread", - "incidence", - "incidents", - "enhancing", - "interference", - "palestine", - "listprice", - "atmospheric", - "knowledgestorm", - "referenced", - "publicity", - "proposition", - "allowance", - "designation", - "duplicate", - "criterion", - "civilization", - "vietnamese", - "tremendous", - "corrected", - "encountered", - "internationally", - "surrounded", - "creatures", - "commented", - "accomplish", - "vegetarian", - "newfoundland", - "investigated", - "ambassador", - "stephanie", - "contacting", - "vegetation", - "findarticles", - "specially", - "infectious", - "continuity", - "phenomenon", - "conscious", - "referrals", - "differently", - "integrating", - "revisions", - "reasoning", - "charitable", - "annotated", - "convinced", - "burlington", - "replacing", - "researcher", - "watershed", - "occupations", - "acknowledged", - "equilibrium", - "characterized", - "privilege", - "qualifying", - "estimation", - "pediatric", - "techrepublic", - "institutes", - "brochures", - "traveller", - "appropriations", - "suspected", - "benchmark", - "beginners", - "instructors", - "highlighted", - "stationery", - "unauthorized", - "competent", - "contributor", - "demonstrates", - "gradually", - "desirable", - "journalist", - "afterwards", - "religions", - "explosion", - "signatures", - "disciplines", - "daughters", - "conversations", - "simplified", - "motherboard", - "bibliographic", - "champagne", - "deviation", - "superintendent", - "housewives", - "influences", - "inspections", - "irrigation", - "hydraulic", - "robertson", - "penetration", - "conviction", - "omissions", - "retrieval", - "qualities", - "prototype", - "importantly", - "apparatus", - "explaining", - "nomination", - "empirical", - "dependence", - "sexuality", - "polyester", - "commitments", - "suggesting", - "remainder", - "privileges", - "televisions", - "specializing", - "commodities", - "motorcycles", - "concentrate", - "reproductive", - "molecules", - "refrigerator", - "intervals", - "sentences", - "exclusion", - "workstation", - "holocaust", - "receivers", - "disposition", - "navigator", - "investigators", - "marijuana", - "cathedral", - "fairfield", - "fascinating", - "landscapes", - "lafayette", - "computation", - "cardiovascular", - "salvation", - "predictions", - "accompanying", - "selective", - "arbitration", - "configuring", - "editorials", - "sacrifice", - "removable", - "convergence", - "gibraltar", - "anthropology", - "malpractice", - "reporters", - "necessity", - "rendering", - "hepatitis", - "nationally", - "waterproof", - "specialties", - "humanitarian", - "invitations", - "functioning", - "economies", - "alexandria", - "bacterial", - "undertake", - "continuously", - "achievements", - "convertible", - "secretariat", - "paragraphs", - "adolescent", - "nominations", - "cancelled", - "introductory", - "reservoir", - "occurrence", - "worcester", - "demographic", - "disciplinary", - "respected", - "portraits", - "interpreted", - "evaluations", - "elimination", - "hypothetical", - "immigrants", - "complimentary", - "helicopter", - "performer", - "commissions", - "powerseller", - "graduated", - "surprising", - "unnecessary", - "dramatically", - "yugoslavia", - "characterization", - "likelihood", - "fundamentals", - "contamination", - "endangered", - "compromise", - "masturbation", - "expiration", - "namespace", - "peripheral", - "negotiation", - "opponents", - "nominated", - "confidentiality", - "electoral", - "changelog", - "alternatively", - "greensboro", - "controversial", - "recovered", - "upgrading", - "frontpage", - "demanding", - "defensive", - "forbidden", - "programmers", - "monitored", - "installations", - "deutschland", - "practitioner", - "motivated", - "smithsonian", - "examining", - "revelation", - "delegation", - "dictionaries", - "greenhouse", - "transparency", - "currencies", - "survivors", - "positioning", - "descending", - "temporarily", - "frequencies", - "reflections", - "municipality", - "detective", - "experiencing", - "fireplace", - "endorsement", - "psychiatry", - "persistent", - "summaries", - "looksmart", - "magnificent", - "colleague", - "adaptation", - "paintball", - "enclosure", - "supervisors", - "westminster", - "distances", - "absorption", - "treasures", - "transcripts", - "disappointed", - "continually", - "communist", - "collectible", - "entrepreneurs", - "creations", - "acquisitions", - "biodiversity", - "excitement", - "presently", - "mysterious", - "librarian", - "subsidiaries", - "stockholm", - "indonesian", - "therapist", - "promising", - "relaxation", - "thereafter", - "commissioners", - "forwarding", - "nightmare", - "reductions", - "southampton", - "organisms", - "telescope", - "portsmouth", - "advancement", - "harassment", - "generators", - "generates", - "replication", - "inexpensive", - "receptors", - "interventions", - "huntington", - "internship", - "aluminium", - "snowboard", - "beastality", - "evanescence", - "coordinated", - "shipments", - "antarctica", - "chancellor", - "controversy", - "legendary", - "beautifully", - "antibodies", - "examinations", - "immunology", - "departmental", - "terminology", - "gentleman", - "reproduce", - "convicted", - "roommates", - "threatening", - "spokesman", - "activists", - "frankfurt", - "encourages", - "assembled", - "restructuring", - "terminals", - "simulations", - "sufficiently", - "conditional", - "crossword", - "conceptual", - "liechtenstein", - "translator", - "automobiles", - "continent", - "longitude", - "challenged", - "telecharger", - "insertion", - "instrumentation", - "constraint", - "groundwater", - "strengthening", - "insulation", - "infringement", - "subjective", - "swaziland", - "varieties", - "mediawiki", - "configurations", -} diff --git a/contrib/integration/testtxn/.gitignore b/contrib/integration/testtxn/.gitignore deleted file mode 100644 index 5a12fa844e0..00000000000 --- a/contrib/integration/testtxn/.gitignore +++ /dev/null @@ -1 +0,0 @@ -/testtxn diff --git a/contrib/local-test/Makefile b/contrib/local-test/Makefile deleted file mode 100644 index f76d50cf843..00000000000 --- a/contrib/local-test/Makefile +++ /dev/null @@ -1,76 +0,0 @@ -DGRAPH_VERSION ?= local - -current_dir = $(shell pwd) - -up: ## Start the zero and alpha containers - DGRAPH_VERSION=$(DGRAPH_VERSION) docker compose up -d && docker compose logs --follow alpha zero keepalive - -up-with-lambda: ## Start the zero and alpha containers and a lambda container - DGRAPH_VERSION=$(DGRAPH_VERSION) docker compose -f docker-compose.yml -f docker-compose-lambda.yml up -d && docker compose logs --follow alpha zero lambda keepalive - -down: ## Stop the containers - DGRAPH_VERSION=$(DGRAPH_VERSION) docker compose stop - -down-with-lambda: ## Stop the containers (including the lambda container) - DGRAPH_VERSION=$(DGRAPH_VERSION) docker compose -f docker-compose.yml -f docker-compose-lambda.yml stop - -refresh: ## Kick containers if new dgragh image found - DGRAPH_VERSION=$(DGRAPH_VERSION) docker compose up --detach --build - -schema-dql: ## Load/update a DQL schema -ifneq (,$(wildcard ./schema.dql)) - curl --data-binary '@./schema.dql' --header 'content-type: application/octet-stream' http://localhost:8080/alter -else - @echo "No schema.graphql found" -endif - -schema-gql: ## Load/update a GraphQL schema -ifneq (,$(wildcard ./schema.graphql)) - curl --data-binary '@./schema.graphql' --header 'content-type: application/octet-stream' http://localhost:8080/admin/schema -else - @echo "No schema.graphql found" -endif - -drop-data: ## Drops all data (but not the schema) - curl -X POST localhost:8080/alter -d '{"drop_op": "DATA"}' - -drop-all: ## Drops data and schema - curl -X POST localhost:8080/alter -d '{"drop_all": true}' - -load-data-gql: ## Loads data from the gql-data.json file -ifneq (,$(wildcard ./gql-data.json)) - docker run -it -v $(current_dir):/export dgraph/dgraph:$(DGRAPH_VERSION) dgraph live -a host.docker.internal:9080 -z host.docker.internal:5080 -f /export/gql-data.json -else - @echo "No gql-data.json file found" -endif - -load-data-dql-json: ## Loads data from the dql-data.json file -ifneq (,$(wildcard ./dql-data.json)) - curl --data-binary '@./dql-data.json' --header 'content-type: application/json' http://localhost:8080/mutate?commitNow=true -else - @echo "No dql-data.json file found" -endif - -load-data-dql-rdf: ## Loads data from the dql-data.rdf file -ifneq (,$(wildcard ./dql-data.rdf)) - curl --data-binary '@./dql-data.rdf' --header 'content-type: application/rdf' http://localhost:8080/mutate?commitNow=true -else - @echo "No dql-data.rdf file found" -endif - -query-dql: ## Runs the query present in query.dql -ifneq (,$(wildcard ./query.dql)) - @curl --data-binary '@./query.dql' -H "Content-Type: application/dql" -X POST localhost:8080/query -else - @echo "No query.dql file found" -endif - -query-gql: ## Runs the query present in query.gql and variables in variables.json (requires gql) -ifeq (, $(shell which gql)) - @echo "No gql in $(PATH), download from https://github.com/matthewmcneely/gql/tree/feature/add-query-and-variables-from-file/builds" -else - @gql file --query-file query.gql --variables-file variables.json --endpoint http://localhost:8080/graphql -endif - -help: ## Print target help - @grep -E '^[0-9a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' \ No newline at end of file diff --git a/contrib/local-test/README.md b/contrib/local-test/README.md deleted file mode 100644 index 5fa4c096c61..00000000000 --- a/contrib/local-test/README.md +++ /dev/null @@ -1,347 +0,0 @@ -# Local Test - -A collection of make commands that enable: - -- hot reloading of built local images in a docker compose environment -- creating/updating dql/graphql schemas -- loading data in RDF and JSON encoding -- running DQL/GraphQL queries/mutations - -Requirements - -- Docker -- make -- curl -- [jq](https://stedolan.github.io/jq/download/) (optional, for formatting JSON results) -- [gql](https://github.com/matthewmcneely/gql/tree/feature/add-query-and-variables-from-file/builds) - (for running graphql queries) - -One final requirement is to build a local image of dgraph from the source currently on your machine. - -```bash -cd .. && make image-local -``` - -This will build a `dgraph/dgraph:local` image in your local Docker registry. - -## Make targets - -### `make help` - -Lists all available make targets and a short description. - -### `make up` - -Brings up a simple alpha and zero node using docker compose in your local Docker environment. The -target then tails the log out from both containers. This target also launches a -_[watchtower](https://containrrr.dev/watchtower/)_ container that will automatically restart alpha -and zero when it detects a new dgraph image (built via `cd .. && make image-local`). - -The process for hot-reloading development basically involves `make up`, modifying source on your -machine, then `make image-local`. The changes in your source will show up in the locally deployed -dgraph containers when watchtower restarts them. - -Note that this deployment is completely insecure—it's meant for local testing only. - -### `make up-with-lambda` - -Brings up the alpha and zero containers along with the dgraph lambda container. Note this lambda -container is based on `dgraph/dgraph-lambda:latest`. If you're trying to debug the lambda container, -you'll need reference your local image in the docker compose file. - -### `make down` and `make down-with-lambda` - -Stops the containers. - -### `make refresh` - -Restarts the containers if a new `dgraph/dgraph:local` image is available. This shouldn't be needed -if the _watchtower_ container is running correctly. - -### `make schema-dql` - -Updates dgraph with the schema defined in `schema.dql`. - -Example schema.dql: - -```dql -type Person { - name - boss_of - works_for -} - -type Company { - name - industry - work_here -} - -industry: string @index(term) . -boss_of: [uid] . -name: string @index(exact, term) . -work_here: [uid] . -boss_of: [uid] @reverse . -works_for: [uid] @reverse . -``` - -### `make schema-gql` - -Updates dgraph with the schema defined in `schema.gql` - -Example schema.gql: - -```graphql -type Post { - id: ID! - title: String! - text: String - datePublished: DateTime - author: Author! -} - -type Author { - id: ID! - name: String! - posts: [Post!] @hasInverse(field: author) -} -``` - -### `make drop-data` - -Drops all data from the cluster, but not the schema. - -### `make drop-all` - -Drops all data and the schema from the cluster. - -### `make load-data-gql` - -Loads JSON data defined in `gql-data.json`. This target is useful for loading data into schemas -defined with GraphQL SDL. - -Example gql-data.json: - -```json -[ - { - "uid": "_:katie_howgate", - "dgraph.type": "Author", - "Author.name": "Katie Howgate", - "Author.posts": [ - { - "uid": "_:katie_howgate_1" - }, - { - "uid": "_:katie_howgate_2" - } - ] - }, - { - "uid": "_:timo_denk", - "dgraph.type": "Author", - "Author.name": "Timo Denk", - "Author.posts": [ - { - "uid": "_:timo_denk_1" - }, - { - "uid": "_:timo_denk_2" - } - ] - }, - { - "uid": "_:katie_howgate_1", - "dgraph.type": "Post", - "Post.title": "Graph Theory 101", - "Post.text": "https://www.lancaster.ac.uk/stor-i-student-sites/katie-howgate/2021/04/27/graph-theory-101/", - "Post.datePublished": "2021-04-27", - "Post.author": { - "uid": "_:katie_howgate" - } - }, - { - "uid": "_:katie_howgate_2", - "dgraph.type": "Post", - "Post.title": "Hypergraphs – not just a cool name!", - "Post.text": "https://www.lancaster.ac.uk/stor-i-student-sites/katie-howgate/2021/04/29/hypergraphs-not-just-a-cool-name/", - "Post.datePublished": "2021-04-29", - "Post.author": { - "uid": "_:katie_howgate" - } - }, - { - "uid": "_:timo_denk_1", - "dgraph.type": "Post", - "Post.title": "Polynomial-time Approximation Schemes", - "Post.text": "https://timodenk.com/blog/ptas/", - "Post.datePublished": "2019-04-12", - "Post.author": { - "uid": "_:timo_denk" - } - }, - { - "uid": "_:timo_denk_2", - "dgraph.type": "Post", - "Post.title": "Graph Theory Overview", - "Post.text": "https://timodenk.com/blog/graph-theory-overview/", - "Post.datePublished": "2017-08-03", - "Post.author": { - "uid": "_:timo_denk" - } - } -] -``` - -### `make load-data-dql-json` - -Loads JSON data defined in `dql-data.json`. This target is useful for loading data into schemas -defined with base dgraph types. - -Example dql-data.json: - -```json -{ - "set": [ - { - "uid": "_:company1", - "industry": "Machinery", - "dgraph.type": "Company", - "name": "CompanyABC" - }, - { - "uid": "_:company2", - "industry": "High Tech", - "dgraph.type": "Company", - "name": "The other company" - }, - { - "uid": "_:jack", - "works_for": { "uid": "_:company1" }, - "dgraph.type": "Person", - "name": "Jack" - }, - { - "uid": "_:ivy", - "works_for": { "uid": "_:company1" }, - "boss_of": { "uid": "_:jack" }, - "dgraph.type": "Person", - "name": "Ivy" - }, - { - "uid": "_:zoe", - "works_for": { "uid": "_:company1" }, - "dgraph.type": "Person", - "name": "Zoe" - }, - { - "uid": "_:jose", - "works_for": { "uid": "_:company2" }, - "dgraph.type": "Person", - "name": "Jose" - }, - { - "uid": "_:alexei", - "works_for": { "uid": "_:company2" }, - "boss_of": { "uid": "_:jose" }, - "dgraph.type": "Person", - "name": "Alexei" - } - ] -} -``` - -### `make load-data-dql-rdf` - -Loads RDF data defined in `dql-data.rdf`. This target is useful for loading data into schemas -defined with base dgraph types. - -Example dql-data.rdf: - -```rdf -{ - set { - _:company1 "CompanyABC" . - _:company1 "Company" . - _:company2 "The other company" . - _:company2 "Company" . - - _:company1 "Machinery" . - - _:company2 "High Tech" . - - _:jack _:company1 . - _:jack "Person" . - - _:ivy _:company1 . - _:ivy "Person" . - - _:zoe _:company1 . - _:zoe "Person" . - - _:jack "Jack" . - _:ivy "Ivy" . - _:zoe "Zoe" . - _:jose "Jose" . - _:alexei "Alexei" . - - _:jose _:company2 . - _:jose "Person" . - _:alexei _:company2 . - _:alexei "Person" . - - _:ivy _:jack . - - _:alexei _:jose . - } -} -``` - -### `make query-dql` - -Runs the query defined in query.dql. - -Example query.dql: - -```dql -{ - q(func: eq(name, "CompanyABC")) { - name - works_here : ~works_for { - uid - name - } - } -} -``` - -### `make query-gql` - -Runs the query defined in query.gql and optional variables defined in variables.json. - -Example query-gql: - -```graphql -query QueryAuthor($order: PostOrder) { - queryAuthor { - id - name - posts(order: $order) { - id - datePublished - title - text - } - } -} -``` - -Example variables.json: - -```json -{ - "order": { - "desc": "datePublished" - } -} -``` diff --git a/contrib/local-test/docker-compose-lambda.yml b/contrib/local-test/docker-compose-lambda.yml deleted file mode 100644 index ab2d373a145..00000000000 --- a/contrib/local-test/docker-compose-lambda.yml +++ /dev/null @@ -1,25 +0,0 @@ -version: "3.8" - -# -# A simple compose file for running single zero and alpha -# -services: - - alpha: - # override the root command with the lambda arguments - command: > - dgraph alpha --my=alpha:7080 --zero=zero:5080 - --security whitelist=0.0.0.0/0 - --logtostderr -v=2 - --graphql lambda-url=http://host.docker.internal:8686/graphql-worker - --telemetry sentry=false - lambda: - image: dgraph/dgraph-lambda:latest - volumes: - - ./scripts/script.js:/app/script/script.js:ro - environment: - - DGRAPH_URL=http://host.docker.internal:8080 - - MAX_MEMORY_LIMIT=256M - ports: - - 8686:8686 - diff --git a/contrib/local-test/docker-compose.yml b/contrib/local-test/docker-compose.yml deleted file mode 100644 index e3492e59635..00000000000 --- a/contrib/local-test/docker-compose.yml +++ /dev/null @@ -1,51 +0,0 @@ -version: "3.8" - -# -# A simple compose file for running single zero and alpha -# -services: - - # Dgraph Zero controls the cluster - zero: - image: dgraph/dgraph:$DGRAPH_VERSION - container_name: local_dgraph_zero - volumes: - - ~/local-dgraph-data:/dgraph - ports: - - 5080:5080 - - 6080:6080 - command: dgraph zero --my=zero:5080 --logtostderr -v=2 --telemetry sentry=false - restart: unless-stopped - # Dgraph Alpha hosts the graph and indexes - alpha: - image: dgraph/dgraph:$DGRAPH_VERSION - container_name: local_dgraph_alpha - volumes: - - ~/local-dgraph-data:/dgraph - ports: - - 8080:8080 - - 9080:9080 - command: > - dgraph alpha --my=alpha:7080 --zero=zero:5080 - --security whitelist=0.0.0.0/0 - --logtostderr -v=2 - --telemetry sentry=false - restart: unless-stopped - - # Watchtower is a third party tool that will restart a container if an updated image is found - watchtower: - image: containrrr/watchtower - container_name: watchtower - volumes: - - /var/run/docker.sock:/var/run/docker.sock - environment: - - WATCHTOWER_POLL_INTERVAL=5 - - WATCHTOWER_WARN_ON_HEAD_FAILURE=never - - WATCHTOWER_NO_PULL=true - - WATCHTOWER_NOTIFICATIONS_LEVEL=error - # Loads busybox so that the hook to the dgraph container logs doesn't close - keepalive: - image: busybox - container_name: keepalive - command: > - sh -c "trap : TERM INT; sleep infinity & wait" diff --git a/contrib/local-test/scripts/script.js b/contrib/local-test/scripts/script.js deleted file mode 100644 index ccc933d5374..00000000000 --- a/contrib/local-test/scripts/script.js +++ /dev/null @@ -1 +0,0 @@ -// Add lambda scripts here diff --git a/contrib/manual_tests/.gitignore b/contrib/manual_tests/.gitignore deleted file mode 100644 index d722bf25a4f..00000000000 --- a/contrib/manual_tests/.gitignore +++ /dev/null @@ -1 +0,0 @@ -_tmp/ diff --git a/contrib/manual_tests/README.md b/contrib/manual_tests/README.md deleted file mode 100644 index 19c1c565275..00000000000 --- a/contrib/manual_tests/README.md +++ /dev/null @@ -1,18 +0,0 @@ -# manual_tests - -To run manual tests: - -- Set `$DGRAPH_BIN` to the path of the Dgraph binary you want to test. -- Set `$EXIT_ON_FAILURE` to `1` to stop testing immediately after a test fails, leaving Dgraph - running and the test directory intact. -- Execute `./test.sh`. - -For long-running tests: - -- These tests have been grouped under `testx::`, so they do not run by default. -- Execute `./test.sh testx::` - -To add a new test: - -- Create a function with the `test::` prefix. -- Return `0` on success, return `1` on failure. diff --git a/contrib/manual_tests/log.sh b/contrib/manual_tests/log.sh deleted file mode 100755 index 098a1c4623f..00000000000 --- a/contrib/manual_tests/log.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/usr/bin/env bash - -function _log_date() { - date '+%Y-%m-%d %H:%M:%S' -} - -function log::debug() { - printf '%b\n' "\e[32m[DEBUG] $(_log_date) $*\e[0m" -} - -function log::info() { - printf '%b\n' "\e[34m[ INFO] $(_log_date) $*\e[0m" -} - -function log::warn() { - printf '%b\n' "\e[33m[ WARN] $(_log_date) $*\e[0m" -} - -function log::error() { - printf '%b\n' "\e[31m[ERROR] $(_log_date) $*\e[0m" -} diff --git a/contrib/manual_tests/test.sh b/contrib/manual_tests/test.sh deleted file mode 100755 index bbd8dbb5556..00000000000 --- a/contrib/manual_tests/test.sh +++ /dev/null @@ -1,600 +0,0 @@ -#!/usr/bin/env bash - -set -euo pipefail - -"${DGRAPH_BIN}" version - -readonly TEST_PATH="${PWD}/_tmp" - -readonly DATA_PATH="${TEST_PATH}/data" -readonly LOGS_PATH="${TEST_PATH}/logs" -readonly DGRAPH_PATH="${TEST_PATH}/dgraph" - -readonly ENCRYPTION_KEY_PATH="${DGRAPH_PATH}/encryption_key_file" -readonly ACL_SECRET_PATH="${DGRAPH_PATH}/acl_secret_file" -readonly TLS_PATH="${DGRAPH_PATH}/tls" - -readonly DATASET_1MILLION_FILE_URL='https://github.com/hypermodeinc/dgraph-benchmarks/blob/main/data/1million.rdf.gz?raw=true' -readonly DATASET_1MILLION_FILE_PATH="${DATA_PATH}/1million.rdf.gz" - -readonly DATASET_1MILLION_SCHEMA_URL='https://github.com/hypermodeinc/dgraph-benchmarks/blob/main/data/1million.schema?raw=true' -readonly DATASET_1MILLION_SCHEMA_PATH="${DATA_PATH}/1million.schema" - -source "log.sh" - -function dataset::1million::download() { - if ! [[ -f ${DATASET_1MILLION_FILE_PATH} ]]; then - log::debug "Downloading from ${DATASET_1MILLION_FILE_URL}." - curl -L "${DATASET_1MILLION_FILE_URL}" --output "${DATASET_1MILLION_FILE_PATH}" - fi - - if ! [[ -f ${DATASET_1MILLION_SCHEMA_PATH} ]]; then - log::debug "Downloading from ${DATASET_1MILLION_SCHEMA_URL}." - curl -L "${DATASET_1MILLION_SCHEMA_URL}" --output "${DATASET_1MILLION_SCHEMA_PATH}" - fi -} - -function dataset::1million::verify() { - local count_names_exp=197408 - count_names_got=$( - curl \ - -SsX POST \ - -H 'Content-Type: application/json' \ - -d '{ "query": "query { test(func: has(name@.)) { count(uid) } }" }' \ - 'localhost:8081/query' | jq '.data.test[0].count' - ) - - if [[ ${count_names_got} -ne ${count_names_exp} ]]; then - log::error "Could not verify 1million, expected: ${count_names_exp}, got: ${count_names_got}" - return 1 - fi -} - -function portkill() { - local pids - if pids="$(lsof -nti ":$1")"; then - echo "${pids}" | xargs kill -9 - fi -} - -function dgraph::killall() { - while pkill -x 'dgraph'; do - log::debug 'Killing running Dgraph instances.' - sleep 1 - done -} - -function dgraph::start_zero() { - local -r i="${i}" - log::debug "Starting Zero ${i}." - - local grpc_port=$((5080 + i)) - local http_port=$((6080 + i)) - - for port in "${grpc_port}" "${http_port}"; do - portkill "${port}" - done - - local zero_args_default=(--cwd "${DGRAPH_PATH}/zero${i}" --raft="idx=${i}" --port_offset "${i}") - - if [[ ${i} -ne 1 ]]; then - zero_args_default+=(--peer 'localhost:5081') - fi - - "${DGRAPH_BIN}" zero "${zero_args_default[@]}" "${@:2}" &>"${LOGS_PATH}/zero${i}" & - sleep 1 -} - -function dgraph::start_zeros() { - local -r n="$1" - for i in $(seq "${n}"); do - dgraph::start_zero "${i}" "${@:2}" - done -} - -function dgraph::start_alpha() { - local -r i="$1" - log::debug "Starting Alpha ${i}." - - local internal_port=$((7080 + i)) - local http_port=$((8080 + i)) - local grpc_port=$((9080 + i)) - - for port in "${internal_port}" "${http_port}" "${grpc_port}"; do - portkill "${port}" - done - - "${DGRAPH_BIN}" \ - alpha \ - --cwd "${DGRAPH_PATH}/alpha${i}" \ - --port_offset "${i}" \ - --zero 'localhost:5081' \ - "${@:2}" &>"${LOGS_PATH}/alpha${i}" & - sleep 1 -} - -function dgraph::start_alphas() { - local -r n="$1" - for i in $(seq "${n}"); do - dgraph::start_alpha "${i}" "${@:2}" - done -} - -function dgraph::generate_encryption_key() { - dd if=/dev/random bs=1 count=32 of="${ENCRYPTION_KEY_PATH}" -} - -function dgraph::generate_acl_secret() { - dd if=/dev/random bs=1 count=256 of="${ACL_SECRET_PATH}" -} - -function dgraph::generate_tls() { - "${DGRAPH_BIN}" cert --cwd "${DGRAPH_PATH}" --nodes 'localhost' -} - -function dgraph::healthcheck_zero() { - local -r i="$1" - local -r http_port=$((6080 + i)) - local response - - while true; do - response="$(curl -Ss "localhost:${http_port}/health")" - if [[ ${response} == "Please retry again, server is not ready to accept requests" ]]; then - log::warn "Zero ${i} is not ready, retrying in 1s." - sleep 1 - else - break - fi - done - - if [[ ${response} != "OK" ]]; then - log::error "Zero ${i} is not healthy." - echo "${response}" - return 1 - fi - - log::debug "Zero ${i} is healthy." -} - -function dgraph::healthcheck_alpha() { - local -r i="$1" - local -r http_port=$((8080 + i)) - local response - - while true; do - response="$(curl -Ss "localhost:${http_port}/health")" - if [[ ${response} == "Please retry again, server is not ready to accept requests" ]]; then - log::warn "Alpha ${i} is not ready, retrying in 1s." - sleep 1 - else - break - fi - done - - if [[ "$(echo "${response}" | jq '.[0].status')" != '"healthy"' ]]; then - log::error "Alpha ${i} is not healthy." - echo "${response}" | jq || echo "${response}" - return 1 - fi - - log::debug "Alpha ${i} is healthy." -} - -function dgraph::healthcheck_alpha_tls() { - local -r i="$1" - local -r http_port=$((8080 + i)) - local response - - while true; do - response="$(curl --insecure -Ss "https://localhost:${http_port}/health")" - if [[ ${response} == "Please retry again, server is not ready to accept requests" ]]; then - log::warn "Alpha ${i} is not ready, retrying in 1s." - sleep 1 - else - break - fi - done - - if [[ "$(echo "${response}" | jq '.[0].status')" != '"healthy"' ]]; then - log::error "Alpha ${i} is not healthy." - echo "${response}" | jq || echo "${response}" - return 1 - fi - - log::debug "Alpha ${i} is healthy." -} - -function dgraph::increment() { - local -r i="$1" - local -r grpc_port=$((9080 + i)) - "${DGRAPH_BIN}" increment --alpha "localhost:${grpc_port}" "${@:2}" | - grep -oP 'Counter VAL: \K\d+' | - tail -1 -} - -function setup() { - dgraph::killall - - log::debug 'Removing old test files.' - - rm -rf "${LOGS_PATH}" - mkdir -p "${LOGS_PATH}" - - rm -rf "${DGRAPH_PATH}" - mkdir -p "${DGRAPH_PATH}" - - mkdir -p "${DATA_PATH}" -} - -function cleanup() { - dgraph::killall - - log::debug 'Removing old test files.' - rm -rf "${TEST_PATH}" -} - -function test::manual_start() { - local -r n_zeros=3 - local -r n_alphas=3 - - dgraph::start_zeros "${n_zeros}" - dgraph::start_alphas "${n_alphas}" - - for i in $(seq "${n_zeros}"); do - dgraph::healthcheck_zero "${i}" - done - - sleep 5 - - for i in $(seq "${n_alphas}"); do - dgraph::healthcheck_alpha "${i}" - done - - local count - for i in $(seq "${n_alphas}"); do - count="$(dgraph::increment "${i}")" - if [[ ${i} -ne ${count} ]]; then - log::error "Expected increment: ${i} but got: ${count}" - return 1 - fi - done -} - -function test::manual_start_encryption() { - dgraph::generate_encryption_key - - local -r n_zeros=3 - local -r n_alphas=3 - - dgraph::start_zeros "${n_zeros}" - dgraph::start_alphas "${n_alphas}" --encryption "key-file=${ENCRYPTION_KEY_PATH};" - - for i in $(seq "${n_zeros}"); do - dgraph::healthcheck_zero "${i}" - done - - sleep 5 - - for i in $(seq "${n_alphas}"); do - dgraph::healthcheck_alpha "${i}" - done - - local count - for i in $(seq "${n_alphas}"); do - count="$(dgraph::increment "${i}")" - if [[ ${i} -ne ${count} ]]; then - log::error "Expected increment: ${i} but got: ${count}" - return 1 - fi - done -} - -function test::manual_start_acl() { - dgraph::generate_acl_secret - - local -r n_zeros=3 - local -r n_alphas=3 - - dgraph::start_zeros "${n_zeros}" - dgraph::start_alphas "${n_alphas}" --acl "secret-file=${ACL_SECRET_PATH};" - - for i in $(seq "${n_zeros}"); do - dgraph::healthcheck_zero "${i}" - done - - sleep 5 - - for i in $(seq "${n_alphas}"); do - dgraph::healthcheck_alpha "${i}" - done - - local count - for i in $(seq "${n_alphas}"); do - count="$(dgraph::increment "${i}" --user groot --password password)" - if [[ ${i} -ne ${count} ]]; then - log::error "Expected increment: ${i} but got: ${count}" - return 1 - fi - done -} - -# Test manual start with external TLS enabled. -function test::manual_start_tls() { - dgraph::generate_tls - - local -r n_zeros=3 - local -r n_alphas=3 - - dgraph::start_zeros "${n_zeros}" - dgraph::start_alphas "${n_alphas}" --tls "ca-cert=${TLS_PATH}/ca.crt; server-cert=${TLS_PATH}/node.crt; server-key=${TLS_PATH}/node.key;" - - for i in $(seq "${n_zeros}"); do - dgraph::healthcheck_zero "${i}" - done - - sleep 5 - - for i in $(seq "${n_alphas}"); do - dgraph::healthcheck_alpha_tls "${i}" - done - - local count - for i in $(seq "${n_alphas}"); do - count="$(dgraph::increment "${i}" --tls "ca-cert=${TLS_PATH}/ca.crt;")" - if [[ ${i} -ne ${count} ]]; then - log::error "Expected increment: ${i} but got: ${count}" - return 1 - fi - done -} - -# Test manual start with both internal and external TLS enabled. -function test::manual_start_tls2() { - dgraph::generate_tls - - local -r n_zeros=3 - local -r n_alphas=3 - - for i in $(seq "${n_zeros}"); do - "${DGRAPH_BIN}" cert --client "zero${i}" --cwd "${DGRAPH_PATH}" - dgraph::start_zero "${i}" \ - --tls "ca-cert=${TLS_PATH}/ca.crt; internal-port=true; client-cert=${TLS_PATH}/client.zero${i}.crt; client-key=${TLS_PATH}/client.zero${i}.key; server-cert=${TLS_PATH}/node.crt; server-key=${TLS_PATH}/node.key;" - done - - for i in $(seq "${n_alphas}"); do - "${DGRAPH_BIN}" cert --client "alpha${i}" --cwd "${DGRAPH_PATH}" - dgraph::start_alpha "${i}" \ - --tls "ca-cert=${TLS_PATH}/ca.crt; internal-port=true; client-cert=${TLS_PATH}/client.alpha${i}.crt; client-key=${TLS_PATH}/client.alpha${i}.key; server-cert=${TLS_PATH}/node.crt; server-key=${TLS_PATH}/node.key;" - done - - for i in $(seq "${n_zeros}"); do - dgraph::healthcheck_zero "${i}" - done - - sleep 5 - - for i in $(seq "${n_alphas}"); do - dgraph::healthcheck_alpha_tls "${i}" - done - - local count - for i in $(seq "${n_alphas}"); do - count="$(dgraph::increment "${i}" --tls "ca-cert=${TLS_PATH}/ca.crt;")" - if [[ ${i} -ne ${count} ]]; then - log::error "Expected increment: ${i} but got: ${count}" - return 1 - fi - done -} - -function test::manual_start_encryption_acl_tls() { - dgraph::generate_encryption_key - dgraph::generate_acl_secret - dgraph::generate_tls - - local -r n_zeros=3 - local -r n_alphas=3 - - dgraph::start_zeros "${n_zeros}" - dgraph::start_alphas "${n_alphas}" \ - --acl "secret-file=${ACL_SECRET_PATH};" \ - --encryption "key-file=${ENCRYPTION_KEY_PATH}" \ - --tls "ca-cert=${TLS_PATH}/ca.crt; server-cert=${TLS_PATH}/node.crt; server-key=${TLS_PATH}/node.key;" - - for i in $(seq "${n_zeros}"); do - dgraph::healthcheck_zero "${i}" - done - - sleep 5 - - for i in $(seq "${n_alphas}"); do - dgraph::healthcheck_alpha_tls "${i}" - done - - local count - for i in $(seq "${n_alphas}"); do - count="$(dgraph::increment "${i}" --tls "ca-cert=${TLS_PATH}/ca.crt;" --user groot --password password)" - if [[ ${i} -ne ${count} ]]; then - log::error "Expected increment: ${i} but got: ${count}" - return 1 - fi - done -} - -function test::live_loader() { - dataset::1million::download - - dgraph::start_zeros 1 - dgraph::start_alphas 2 - - sleep 5 - - log::debug 'Running live loader.' - "${DGRAPH_BIN}" \ - live \ - --alpha 'localhost:9081' \ - --cwd "${DGRAPH_PATH}/live" \ - --files "${DATASET_1MILLION_FILE_PATH}" \ - --schema "${DATASET_1MILLION_SCHEMA_PATH}" \ - --zero 'localhost:5081' &>"${LOGS_PATH}/live" - - dataset::1million::verify -} - -function test::bulk_loader() { - dataset::1million::download - - dgraph::start_zeros 1 - - sleep 5 - - log::debug 'Running bulk loader.' - "${DGRAPH_BIN}" \ - bulk \ - --cwd "${DGRAPH_PATH}/bulk" \ - --files "${DATASET_1MILLION_FILE_PATH}" \ - --schema "${DATASET_1MILLION_SCHEMA_PATH}" \ - --map_shards 1 \ - --reduce_shards 1 \ - --zero 'localhost:5081' &>"${LOGS_PATH}/bulk" - - mkdir -p "${DGRAPH_PATH}/alpha1" - cp -r "${DGRAPH_PATH}/bulk/out/0/p" "${DGRAPH_PATH}/alpha1" - - dgraph::start_alphas 1 - sleep 5 - - dataset::1million::verify - log::info "Bulk load succeeded." - - log::debug "Exporting data." - - local export_result - export_result=$(curl -Ss 'localhost:8081/admin/export') - - if [[ "$(echo "${export_result}" | jq '.code')" != '"Success"' ]]; then - log::error 'Export failed.' - echo "${export_result}" | jq || echo "${export_result}" - return 1 - else - log::info "Export succeeded." - fi - - log::debug "Backing up data." - - local -r backup_path="${TEST_PATH}/backup" - rm -rf "${backup_path}" - mkdir -p "${backup_path}" - - local backup_result - backup_result=$(curl -SsX POST -H 'Content-Type: application/json' -d " - { - \"query\": \"mutation { backup(input: {destination: \\\"${backup_path}\\\"}) { response { message code } } }\" - }" 'http://localhost:8081/admin') - - if [[ "$(echo "${backup_result}" | jq '.data.backup.response.code')" != '"Success"' ]]; then - log::error 'Backup failed.' - echo "${backup_result}" | jq || echo "${backup_result}" - return 1 - else - log::info "Backup succeeded." - fi - - setup - - dgraph::start_zeros 1 - - sleep 5 - - log::info "Restoring data." - "${DGRAPH_BIN}" \ - restore \ - --cwd "${DGRAPH_PATH}/restore" \ - --location "${backup_path}" \ - --postings "${DGRAPH_PATH}" \ - --zero 'localhost:5081' &>"${LOGS_PATH}/restore" - - mkdir -p "${DGRAPH_PATH}/alpha1" - mv "${DGRAPH_PATH}/p1" "${DGRAPH_PATH}/alpha1/p" - - dgraph::start_alphas 1 - sleep 5 - - dataset::1million::verify - log::info "Restore succeeded." -} - -# Run `dgraph increment` in a loop with 1, 2, and 3 groups respectively and verify the result. -function testx::increment() { - local -r increment_factor=100 - - # Set replicas to 1 so that each Alpha forms its own group. - dgraph::start_zeros 1 --replicas 1 - local alphas=() - - dgraph::start_alpha 1 - alphas+=("localhost:9081") - - for i in {1..20000}; do - if [[ ${i} -eq 5000 ]]; then - dgraph::start_alpha 2 - alphas+=("localhost:9082") - elif [[ ${i} -eq 10000 ]]; then - dgraph::start_alpha 3 - alphas+=("localhost:9083") - fi - - # Pick an Alpha in a round-robin manner and run the increment tool on it. - count="$( - "${DGRAPH_BIN}" increment --alpha "${alphas[$((i % ${#alphas[@]}))]}" --num "${increment_factor}" | - grep -oP 'Counter VAL: \K\d+' | - tail -1 - )" - if [[ ${count} -ne $((i * increment_factor)) ]]; then - log::error "Increment error: expected: ${count}, got: ${i}" - return 1 - fi - log::debug "Increment: ${count}" - done -} - -function dgraph::run_tests() { - local passed=0 - local failed=0 - - for test in $(compgen -A function "${1:-test::}"); do - log::info "${test} starting." - - setup - if "${test}"; then - log::info "${test} succeeded." - ((passed += 1)) - else - log::error "${test} failed." - ((failed += 1)) - - if [[ ${EXIT_ON_FAILURE:-0} -eq 1 ]]; then - return 1 - fi - fi - done - - local -r summary="${passed} tests passed, ${failed} failed." - if [[ ${failed} -ne 0 ]]; then - log::error "${summary}" - return 1 - else - log::info "${summary}" - return 0 - fi -} - -function main() { - cleanup - dgraph::run_tests "$@" - local status="$?" - cleanup - return "${status}" -} - -main "$@" diff --git a/contrib/scripts/README.txt b/contrib/scripts/README.txt deleted file mode 100644 index 488cfd7921e..00000000000 --- a/contrib/scripts/README.txt +++ /dev/null @@ -1,2 +0,0 @@ -A good way to figure out which scripts are run is to have a look at travis.yml -file. That shows what gets run. diff --git a/contrib/scripts/cover.sh b/contrib/scripts/cover.sh deleted file mode 100755 index bf7d58b8545..00000000000 --- a/contrib/scripts/cover.sh +++ /dev/null @@ -1,34 +0,0 @@ -#!/bin/bash - -SRC="$(cd -P "$(dirname "${BASH_SOURCE[0]}")" && pwd)/.." -TMP=$(mktemp /tmp/dgraph-coverage-XXXXX.txt) - -BUILD=$1 -# If build variable is empty then we set it. -if [[ -z $1 ]]; then - BUILD=${SRC}/build -fi - -OUT=$2 -if [[ -z ${OUT} ]]; then - OUT=${SRC}/coverage.out -fi -rm -f "${OUT}" - -set -e - -# create coverage output -echo 'mode: atomic' >"${OUT}" -for PKG in $(go list ./... | grep -v -E 'vendor|contrib|wiki|customtok'); do - if [[ ${TRAVIS} == true ]]; then - go test -v -timeout 25m -covermode=atomic -coverprofile="${TMP}" "${PKG}" - else - go test -v -race -timeout 25m -covermode=atomic -coverprofile="${TMP}" "${PKG}" | go-test-teamcity - fi - tail -n +2 "${TMP}" >>"${OUT}" -done - -# open in browser if not in a build environment -if [[ -n ${DISPLAY} ]]; then - go tool cover -html="${OUT}" -fi diff --git a/contrib/scripts/functions.sh b/contrib/scripts/functions.sh deleted file mode 100755 index 36cb68887c8..00000000000 --- a/contrib/scripts/functions.sh +++ /dev/null @@ -1,59 +0,0 @@ -#!/bin/bash -# Containers MUST be labeled with "cluster:test" to be restarted and stopped -# by these functions. - -set -e - -# May be called with an argument which is a docker compose file -# to use *instead of* the default docker-compose.yml. -function restartCluster { - if [[ -z $1 ]]; then - compose_file="docker-compose.yml" - else - compose_file="$(readlink -f "$1")" - fi - - basedir=$(dirname "${BASH_SOURCE[0]}")/../.. - pushd "${basedir}"/dgraph >/dev/null - echo "Rebuilding dgraph ..." - - docker_compose_gopath="${GOPATH:-$(go env GOPATH)}" - make install - - if [[ ${OSTYPE} == "darwin"* ]]; then - if !(AVAILABLE_RAM=$(cat ~/Library/Group\ Containers/group.com.docker/settings.json | grep memoryMiB | grep -oe "[0-9]\+") && test "${AVAILABLE_RAM}" -ge 6144); then - echo -e "\e[33mWarning: You may not have allocated enough memory for Docker on Mac. Please increase the allocated RAM to at least 6GB with a 4GB swap. See https://docs.docker.com/docker-for-mac/#resources \e[0m" - fi - docker_compose_gopath=$(pwd)/../osx-docker-gopath - - # FIXME: read the go version from a constant - docker run --rm \ - -v dgraph_gopath:/go \ - -v dgraph_gocache:/root/.cache/go-build \ - -v $(pwd)/..:/app \ - -w /app/dgraph \ - golang:1.19.5 \ - go build -o /app/osx-docker-gopath/bin/dgraph - fi - - docker ps -a --filter label="cluster=test" --format "{{.Names}}" | xargs -r docker rm -f - GOPATH=${docker_compose_gopath} docker-compose -p dgraph -f "${compose_file}" up --force-recreate --build --remove-orphans -d || exit 1 - popd >/dev/null - - "${basedir}"/contrib/wait-for-it.sh -t 60 localhost:6180 || exit 1 - "${basedir}"/contrib/wait-for-it.sh -t 60 localhost:9180 || exit 1 - sleep 10 || exit 1 -} - -function stopCluster { - docker ps --filter label="cluster=test" --format "{{.Names}}" | - xargs -r docker stop | sed 's/^/Stopped /' - docker ps -a --filter label="cluster=test" --format "{{.Names}}" | - xargs -r docker rm | sed 's/^/Removed /' -} - -function loginWithGroot() { - curl -s -XPOST localhost:8180/login -d '{"userid": "groot","password": "password"}' | - python3 -c \ - "import json; resp = input(); data = json.loads(resp); print(data['data']['accessJWT'])" -} diff --git a/contrib/scripts/goldendata-queries.sh b/contrib/scripts/goldendata-queries.sh deleted file mode 100755 index 9fecca533ed..00000000000 --- a/contrib/scripts/goldendata-queries.sh +++ /dev/null @@ -1,55 +0,0 @@ -#!/bin/bash - -basedir=$(dirname "${BASH_SOURCE[0]}")/../.. -source "${basedir}"/contrib/scripts/functions.sh -pushd $(dirname "${BASH_SOURCE[0]}")/queries &>/dev/null - -function run_index_test { - local max_attempts=${ATTEMPTS-5} - local timeout=${TIMEOUT-1} - local attempt=0 - local exitCode=0 - - X=$1 - GREPFOR=$2 - ANS=$3 - echo "Running test: ${X}" - while ((attempt < max_attempts)); do - set +e - accessToken=$(loginWithGroot) - N=$(curl -s -H 'Content-Type: application/dql' localhost:8180/query -XPOST -d @"${X}".in -H "X-Dgraph-AccessToken: ${accessToken}") - exitCode=$? - - set -e - - if [[ ${exitCode} == 0 ]]; then - break - fi - - echo "Failure! Retrying in ${timeout}.." 1>&2 - sleep "${timeout}" - attempt=$((attempt + 1)) - timeout=$((timeout * 2)) - done - - NUM=$(echo "${N}" | python3 -m json.tool | grep "${GREPFOR}" | wc -l) - if [[ ! ${NUM} -eq ${ANS} ]]; then - echo "Index test failed: ${X} Expected: ${ANS} Got: ${NUM}" - exit 1 - else - echo -e "Index test passed: ${X}\n" - fi -} - -echo -e "Running some queries and checking count of results returned." -run_index_test basic name 138677 -run_index_test allof_the name 25432 -run_index_test allof_the_a name 368 -run_index_test allof_the_first name 4384 -run_index_test releasedate release_date 137859 -run_index_test releasedate_sort release_date 137859 -run_index_test releasedate_sort_first_offset release_date 2316 -run_index_test releasedate_geq release_date 60992 -run_index_test gen_anyof_good_bad name 1104 - -popd &>/dev/null diff --git a/contrib/scripts/install-dependencies.sh b/contrib/scripts/install-dependencies.sh deleted file mode 100755 index 4bcd9be184c..00000000000 --- a/contrib/scripts/install-dependencies.sh +++ /dev/null @@ -1,13 +0,0 @@ -# Use this script if make install does not work because of dependency issues. -# Make sure to run the script from the dgraph repository root. - -# Vendor opencensus. -rm -rf vendor/go.opencensus.io/ -govendor fetch go.opencensus.io/...@v0.19.2 -# Vendor prometheus. -rm -rf vendor/github.com/prometheus/ -govendor fetch github.com/prometheus/client_golang/prometheus/...@v0.9.2 -# Vendor gRPC. -govendor fetch google.golang.org/grpc/...@v1.13.0 -# Vendor dgo (latest version before API changes). -govendor fetch github.com/dgraph-io/dgo...@v1.0.0 diff --git a/contrib/scripts/load-test.sh b/contrib/scripts/load-test.sh deleted file mode 100755 index 68fcf124eee..00000000000 --- a/contrib/scripts/load-test.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/bash - -ONE_GB=$((1024 ** 3)) -REQUIRED_MEM=$((20 * ONE_GB)) - -set -e - -total_mem_kb=$(cat /proc/meminfo | awk '/MemTotal:/ {print $2}') -if [[ ${total_mem_kb} -lt $((REQUIRED_MEM / 1024)) ]]; then - printf >&2 "Load test requires system with at least %dGB of memory\n" \ - $((REQUIRED_MEM / ONE_GB)) - exit 1 -fi - -bash contrib/scripts/loader.sh "$1" -bash contrib/scripts/transactions.sh "$1" diff --git a/contrib/scripts/loader.sh b/contrib/scripts/loader.sh deleted file mode 100755 index 0987b580028..00000000000 --- a/contrib/scripts/loader.sh +++ /dev/null @@ -1,42 +0,0 @@ -#!/bin/bash - -basedir=$(dirname "${BASH_SOURCE[0]}")/../.. -goldendata=$(pwd)/${basedir}/systest/data/goldendata.rdf.gz -set -e - -source "${basedir}"/contrib/scripts/functions.sh -restartCluster - -# Create a temporary directory to use for running live loader. -tmpdir=$(mktemp --tmpdir -d loader.tmp-XXXXXX) -trap "rm -rf ${tmpdir}" EXIT -pushd "${tmpdir}" -echo "Inside $(pwd)" - -# log file size. -ls -laH "${goldendata}" - -echo "Setting schema." -while true; do - accessJWT=$(loginWithGroot) - curl -s -XPOST --output alter.txt -d ' - name: string @index(term) @lang . - initial_release_date: datetime @index(year) . - ' "http://localhost:8180/alter" -H "X-Dgraph-AccessToken: ${accessJWT}" - cat alter.txt - echo - cat alter.txt | grep -iq "success" && break - echo "Retrying..." - sleep 3 -done -rm -f alter.txt - -echo -e "\nRunning dgraph live." -dgraph live -f "${goldendata}" -a "127.0.0.1:9180" -z "127.0.0.1:5180" -c 10 -u groot -p password -popd -rm -rf "${tmpdir}" - -echo "Running queries" -"${basedir}"/contrib/scripts/goldendata-queries.sh - -stopCluster diff --git a/contrib/scripts/queries/allof_the.in b/contrib/scripts/queries/allof_the.in deleted file mode 100644 index cf7efe1a5e6..00000000000 --- a/contrib/scripts/queries/allof_the.in +++ /dev/null @@ -1,12 +0,0 @@ -{ - me(func:eq(name@en,"Chi Chi LaRue")) { - director.film { - directed_by { - director.film @filter(allofterms(name@en, "the")) { - uid - name@en - } - } - } - } -} diff --git a/contrib/scripts/queries/allof_the_a.in b/contrib/scripts/queries/allof_the_a.in deleted file mode 100644 index 07739388fd7..00000000000 --- a/contrib/scripts/queries/allof_the_a.in +++ /dev/null @@ -1,12 +0,0 @@ -{ - me(func:eq(name@en,"Chi Chi LaRue")) { - director.film { - directed_by { - director.film @filter(allofterms(name@en, "the") and allofterms(name@en, "a")) { - uid - name@en - } - } - } - } -} diff --git a/contrib/scripts/queries/allof_the_first.in b/contrib/scripts/queries/allof_the_first.in deleted file mode 100644 index a26cc845025..00000000000 --- a/contrib/scripts/queries/allof_the_first.in +++ /dev/null @@ -1,12 +0,0 @@ -{ - me(func:eq(name@en,"Chi Chi LaRue")) { - director.film { - directed_by { - director.film(first: 10) @filter(allofterms(name@en, "the")) { - uid - name@en - } - } - } - } -} diff --git a/contrib/scripts/queries/basic.in b/contrib/scripts/queries/basic.in deleted file mode 100644 index 8c2fc14ecbb..00000000000 --- a/contrib/scripts/queries/basic.in +++ /dev/null @@ -1,12 +0,0 @@ -{ - me(func:eq(name@en,"Chi Chi LaRue")) { - director.film { - directed_by { - director.film { - uid - name@en - } - } - } - } -} diff --git a/contrib/scripts/queries/gen_anyof_good_bad.in b/contrib/scripts/queries/gen_anyof_good_bad.in deleted file mode 100644 index 93f07545257..00000000000 --- a/contrib/scripts/queries/gen_anyof_good_bad.in +++ /dev/null @@ -1,6 +0,0 @@ -{ - me(func:anyofterms(name@en, "good bad")) { - uid - name@en - } -} diff --git a/contrib/scripts/queries/releasedate.in b/contrib/scripts/queries/releasedate.in deleted file mode 100644 index ac89de84ec0..00000000000 --- a/contrib/scripts/queries/releasedate.in +++ /dev/null @@ -1,12 +0,0 @@ -{ - me(func:eq(name@en,"Chi Chi LaRue")) { - director.film { - directed_by { - director.film { - uid - initial_release_date - } - } - } - } -} diff --git a/contrib/scripts/queries/releasedate_geq.in b/contrib/scripts/queries/releasedate_geq.in deleted file mode 100644 index d88c4fea271..00000000000 --- a/contrib/scripts/queries/releasedate_geq.in +++ /dev/null @@ -1,12 +0,0 @@ -{ - me(func:eq(name@en,"Chi Chi LaRue")) { - director.film { - directed_by { - director.film @filter(ge(initial_release_date, "2000-01-05")) { - uid - initial_release_date - } - } - } - } -} diff --git a/contrib/scripts/queries/releasedate_sort.in b/contrib/scripts/queries/releasedate_sort.in deleted file mode 100644 index cc21d3ec690..00000000000 --- a/contrib/scripts/queries/releasedate_sort.in +++ /dev/null @@ -1,12 +0,0 @@ -{ - me(func:eq(name@en,"Chi Chi LaRue")) { - director.film { - directed_by { - director.film(orderasc: initial_release_date) { - uid - initial_release_date - } - } - } - } -} diff --git a/contrib/scripts/queries/releasedate_sort_first_offset.in b/contrib/scripts/queries/releasedate_sort_first_offset.in deleted file mode 100644 index 0cfac7dc0a3..00000000000 --- a/contrib/scripts/queries/releasedate_sort_first_offset.in +++ /dev/null @@ -1,12 +0,0 @@ -{ - me(func:eq(name@en,"Chi Chi LaRue")) { - director.film { - directed_by { - director.film(orderasc: initial_release_date, first: 5, offset: 10) { - uid - initial_release_date - } - } - } - } -} diff --git a/contrib/scripts/transactions.sh b/contrib/scripts/transactions.sh deleted file mode 100755 index 19c40ad0f11..00000000000 --- a/contrib/scripts/transactions.sh +++ /dev/null @@ -1,31 +0,0 @@ -#!/bin/bash - -basedir=$(dirname "${BASH_SOURCE[0]}")/../.. -contrib=${basedir}/contrib -set -e - -# go test -v $contrib/integration/testtxn/main_test.go - -source "${contrib}"/scripts/functions.sh -restartCluster - -echo "* Running transaction tests." - -echo "* Running bank tests" -go run "${contrib}"/integration/bank/main.go --alpha=localhost:9180,localhost:9182,localhost:9183 --verbose=false - -echo "* Running account upsert tests" -go run "${contrib}"/integration/acctupsert/main.go --alpha=localhost:9180 - -echo "* Running sentence swap tests" -pushd "${contrib}"/integration/swap -go build . && ./swap --alpha=localhost:9180 -popd - -echo "* Running mutate from #1750." -pushd "${contrib}"/integration/mutates -go build . && ./mutates --add --alpha=localhost:9180 -./mutates --alpha=localhost:9180 -popd - -stopCluster diff --git a/contrib/systemd/centos/README.md b/contrib/systemd/centos/README.md deleted file mode 100644 index 0d2263db543..00000000000 --- a/contrib/systemd/centos/README.md +++ /dev/null @@ -1,82 +0,0 @@ -# systemd Integration for CentOS - -The following document describes how to manage `dgraph` with `systemd`. - -First, you need to install Dgraph: - -```Bash -curl https://get.dgraph.io -sSf | bash -``` - -Then create a system account for `dgraph` service: - -> **NOTE** You must run these operations as root. - -```Bash -groupadd --system dgraph -useradd --system -d /var/lib/dgraph -s /bin/false -g dgraph dgraph -mkdir -p /var/log/dgraph -mkdir -p /var/lib/dgraph/{p,w,zw} -chown -R dgraph:dgraph /var/{lib,log}/dgraph -``` - -Next, copy the `systemd` unit files, i.e. `dgraph-alpha.service`, `dgraph-zero.service`, and -`dgraph-ui.service`, in this directory to `/etc/systemd/system/`. - -> **NOTE** These unit files expect that Dgraph is installed as `/usr/local/bin/dgraph`. - -```Bash -cp dgraph-alpha.service /etc/systemd/system/ -cp dgraph-zero.service /etc/systemd/system/ -cp dgraph-ui.service /etc/systemd/system/ -``` - -Next, enable and start the `dgraph-alpha` service. Systemd will also automatically start the -`dgraph-zero` service as a prerequisite. - -```Bash -systemctl enable dgraph-alpha -systemctl start dgraph-alpha -``` - -The `dgraph-ui` service is optional and, unlike `dgraph-zero`, will not be started automatically. - -```Bash -systemctl enable dgraph-ui -systemctl start dgraph-ui -``` - -If necessary, create an `iptables` rule to allow traffic to `dgraph-ui` service: - -```Bash -iptables -I INPUT 4 -p tcp -m state --state NEW --dport 8000 -j ACCEPT -iptables -I INPUT 4 -p tcp -m state --state NEW --dport 8080 -j ACCEPT -``` - -Check the status of the services: - -```Bash -systemctl status dgraph-alpha -systemctl status dgraph-zero -systemctl status dgraph-ui -``` - -The logs are available via `journald`: - -```Bash -journalctl -u dgraph-zero.service --since today -journalctl -u dgraph-zero.service -r -journalctl -u dgraph-alpha.service -r -journalctl -u dgraph-ui.service -r -``` - -You can also follow the logs using journalctl -f: - -```Bash -journalctl -f -u dgraph-zero.service -journalctl -f -u dgraph-alpha.service -journalctl -f -u dgraph-ui.service -``` - -> **NOTE** When `dgraph` exits with an error, `systemctl status` may not show the entire error -> output. In that case it may be necessary to use `journald`. diff --git a/contrib/systemd/centos/add_dgraph_account.sh b/contrib/systemd/centos/add_dgraph_account.sh deleted file mode 100755 index 80d02f21999..00000000000 --- a/contrib/systemd/centos/add_dgraph_account.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/usr/bin/env bash -sudo_cmd="" -if hash sudo 2>/dev/null; then - sudo_cmd="sudo" - echo "Requires sudo permission to install Dgraph in Systemd." - if ! ${sudo_cmd} -v; then - print_error "Need sudo privileges to complete installation." - exit 1 - fi -fi - -${sudo_cmd} groupadd --system dgraph -${sudo_cmd} useradd --system -d /var/lib/dgraph -s /bin/false -g dgraph dgraph -${sudo_cmd} mkdir -p /var/log/dgraph -${sudo_cmd} mkdir -p /var/lib/dgraph/{p,w,zw} -${sudo_cmd} chown -R dgraph:dgraph /var/{lib,log}/dgraph diff --git a/contrib/systemd/centos/dgraph-alpha.service b/contrib/systemd/centos/dgraph-alpha.service deleted file mode 100644 index dcac7002e31..00000000000 --- a/contrib/systemd/centos/dgraph-alpha.service +++ /dev/null @@ -1,18 +0,0 @@ -[Unit] -Description=dgraph.io Alpha instance -Wants=network.target -After=network.target dgraph-zero.service -Requires=dgraph-zero.service - -[Service] -Type=simple -WorkingDirectory=/var/lib/dgraph -ExecStart=/usr/bin/bash -c 'dgraph alpha -p /var/lib/dgraph/p -w /var/lib/dgraph/w' -Restart=on-failure -StandardOutput=journal -StandardError=journal -User=dgraph -Group=dgraph - -[Install] -WantedBy=multi-user.target diff --git a/contrib/systemd/centos/dgraph-ui.service b/contrib/systemd/centos/dgraph-ui.service deleted file mode 100644 index 00a6688ef91..00000000000 --- a/contrib/systemd/centos/dgraph-ui.service +++ /dev/null @@ -1,16 +0,0 @@ -[Unit] -Description=dgraph.io Web UI -Wants=network.target -After=network.target - -[Service] -Type=simple -ExecStart=/usr/bin/bash -c 'dgraph-ratel' -Restart=on-failure -StandardOutput=journal -StandardError=journal -User=dgraph -Group=dgraph - -[Install] -WantedBy=multi-user.target diff --git a/contrib/systemd/centos/dgraph-zero.service b/contrib/systemd/centos/dgraph-zero.service deleted file mode 100644 index e82670b5717..00000000000 --- a/contrib/systemd/centos/dgraph-zero.service +++ /dev/null @@ -1,18 +0,0 @@ -[Unit] -Description=dgraph.io Zero instance -Wants=network.target -After=network.target - -[Service] -Type=simple -WorkingDirectory=/var/lib/dgraph -ExecStart=/usr/bin/bash -c 'dgraph zero --wal /var/lib/dgraph/zw' -Restart=on-failure -StandardOutput=journal -StandardError=journal -User=dgraph -Group=dgraph - -[Install] -WantedBy=multi-user.target -RequiredBy=dgraph-alpha.service diff --git a/contrib/systemd/ha_cluster/README.md b/contrib/systemd/ha_cluster/README.md deleted file mode 100644 index e40a5dd8a91..00000000000 --- a/contrib/systemd/ha_cluster/README.md +++ /dev/null @@ -1,193 +0,0 @@ -# Systemd Configuration for a HA Dgraph Cluster - -This following document describes how to configure several nodes that are managed through -[systemd](https://systemd.io/). - -## Overview - -You will configure the following types of Dgraph nodes: - -- zero nodes - - zero leader node - an initial leader node configured at start of cluster, e.g. `zero-0` - - zero peer nodes - peer nodes, e.g. `zero-1`, `zero-2`, that point to the zero leader -- alpha nodes - configured similarly, e.g. `alpha-0`, `alpha-1`, `alpha-2`, that point to list of - all zero nodes - -> **NOTE** These commands are run as root using bash shell. - -## All Nodes (Zero and Alpha) - -On all systems that will run a Dgraph service, create `dgraph` group and user. - -```bash -groupadd --system dgraph -useradd --system --home-dir /var/lib/dgraph --shell /bin/false --gid dgraph dgraph -``` - -## All Zero Nodes (Leader and Peers) - -On all Zero Nodes, create the these directory paths that are owned by `dgraph` user: - -```bash -mkdir --parents /var/{log/dgraph,lib/dgraph/zw} -chown --recursive dgraph:dgraph /var/{lib,log}/dgraph -``` - -### Configure First Zero Node - -Edit the file [dgraph-zero-0.service](dgraph-zero-0.service) as necessary. There are three -parameters and include the hostname: - -- `--replicas` - total number of zeros -- `--idx` - initial zero node will be `1`, and each zero node added afterward will have the `idx` - increased by `1` - -Copy the file to `/etc/systemd/system/dgraph-zero.service` and run the following: - -```bash -systemctl enable dgraph-zero -systemctl start dgraph-zero -``` - -### Configure Second Zero Node - -This process is similar to previous step. Edit the file -[dgraph-zero-1.service](dgraph-zero-1.service) as required. Replace the string `{{ zero-0 }}` to -match the hostname of the zero leader, such as `zero-0`. The `idx` will be set to `2` - -Copy the file to `/etc/systemd/system/dgraph-zero.service` and run the following: - -```bash -systemctl enable dgraph-zero -systemctl start dgraph-zero -``` - -### Configure Third Zero Node - -For the third zero node, [dgraph-zero-2.service](dgraph-zero-2.service), this is configured in the -same manner as the second zero node with the `idx` set to `3` - -Copy the file to `/etc/systemd/system/dgraph-zero.service` and run the following: - -```bash -systemctl enable dgraph-zero -systemctl start dgraph-zero -``` - -### Configure Firewall for Zero Ports - -For zero you will want to open up port `5080` (GRPC). The port `6080` (HTTP) is optional admin port -that is not required by clients. For further information, see -https://dgraph.io/docs/deploy/ports-usage/. This process will vary depending on firewall you are -using. Some examples below: - -On **Ubuntu 18.04**: - -```bash -# enable internal port -ufw allow from any to any port 5080 proto tcp -# admin port (not required by clients) -ufw allow from any to any port 6080 proto tcp -``` - -On **CentOS 8**: - -```bash -# NOTE: public zone is the default and includes NIC used to access service -# enable internal port -firewall-cmd --zone=public --permanent --add-port=5080/tcp -# admin port (not required by clients) -firewall-cmd --zone=public --permanent --add-port=6080/tcp -firewall-cmd --reload -``` - -## Configure Alpha Nodes - -On all Alpha Nodes, create the these directory paths that are owned by `dgraph` user: - -```bash -mkdir --parents /var/{log/dgraph,lib/dgraph/{w,p}} -chown --recursive dgraph:dgraph /var/{lib,log}/dgraph -``` - -Edit the file [dgraph-alpha.service](dgraph-alpha.service) as required. For the `--zero` parameter, -you want to create a list that matches all the zeros in your cluster, so that when `{{ zero-0 }}`, -`{{ zero-1 }}`, and `{{ zero-2 }}` are replaced, you will have a string something like this -(adjusted to your organization's domain): - -```bash ---zero zero-0:5080,zero-1:5080,zero-2:5080 -``` - -Copy the edited file to `/etc/systemd/system/dgraph-alpha.service` and run the following: - -```bash -systemctl enable dgraph-alpha -systemctl start dgraph-alpha -``` - -### Configure Firewall for Alpha Ports - -For alpha you will want to open up ports `7080` (GRPC), `8080` (HTTP/S), and `9080` (GRPC). For -further information, see: https://dgraph.io/docs/deploy/ports-usage/. This process will vary -depending on firewall you are using. Some examples below: - -On **Ubuntu 18.04**: - -```bash -# enable internal ports -ufw allow from any to any port 7080 proto tcp -# enable external ports -ufw allow from any to any port 8080 proto tcp -ufw allow from any to any port 9080 proto tcp -``` - -On **CentOS 8**: - -```bash -# NOTE: public zone is the default and includes NIC used to access service -# enable internal port -firewall-cmd --zone=public --permanent --add-port=7080/tcp -# enable external ports -firewall-cmd --zone=public --permanent --add-port=8080/tcp -firewall-cmd --zone=public --permanent --add-port=9080/tcp -firewall-cmd --reload -``` - -## Verifying Services - -Below are examples of checking the health of the nodes and cluster. - -> **NOTE** Replace hostnames to your domain or use the IP address. - -### Zero Nodes - -You can check the health and state endpoints of the service: - -```bash -curl zero-0:6080/health -curl zero-0:6080/state -``` - -On the system itself, you can check the service status and logs: - -```bash -systemctl status dgraph-zero -journalctl -u dgraph-zero -``` - -### Alpha Nodes - -You can check the health and state endpoints of the service: - -```bash -curl alpha-0:8080/health -curl alpha-0:8080/state -``` - -On the system itself, you can check the service status and logs: - -```bash -systemctl status dgraph-alpha -journalctl -u dgraph-alpha -``` diff --git a/contrib/systemd/ha_cluster/dgraph-alpha.service b/contrib/systemd/ha_cluster/dgraph-alpha.service deleted file mode 100644 index 9a31844a5ba..00000000000 --- a/contrib/systemd/ha_cluster/dgraph-alpha.service +++ /dev/null @@ -1,17 +0,0 @@ -[Unit] -Description=dgraph alpha server -Wants=network.target -After=network.target - -[Service] -Type=simple -WorkingDirectory=/var/lib/dgraph -Restart=on-failure -ExecStart=/bin/bash -c '/usr/local/bin/dgraph alpha --my={{ myhostname }}:7080 --zero {{ zero-0 }}:5080,{{ zero-1 }}:5080,{{ zero-2 }}:5080 --postings /var/lib/dgraph/p --wal /var/lib/dgraph/w' -StandardOutput=journal -StandardError=journal -User=dgraph -Group=dgraph - -[Install] -WantedBy=multi-user.target diff --git a/contrib/systemd/ha_cluster/dgraph-zero-0.service b/contrib/systemd/ha_cluster/dgraph-zero-0.service deleted file mode 100644 index fad0f9f93b9..00000000000 --- a/contrib/systemd/ha_cluster/dgraph-zero-0.service +++ /dev/null @@ -1,17 +0,0 @@ -[Unit] -Description=dgraph zero server -Wants=network.target -After=network.target - -[Service] -Type=simple -WorkingDirectory=/var/lib/dgraph -Restart=on-failure -ExecStart=/bin/bash -c '/usr/local/bin/dgraph zero --my={{ myhostname }}:5080 --wal /var/lib/dgraph/zw --raft="idx=1" --replicas 3' -StandardOutput=journal -StandardError=journal -User=dgraph -Group=dgraph - -[Install] -WantedBy=multi-user.target diff --git a/contrib/systemd/ha_cluster/dgraph-zero-1.service b/contrib/systemd/ha_cluster/dgraph-zero-1.service deleted file mode 100644 index 3e639ad59a0..00000000000 --- a/contrib/systemd/ha_cluster/dgraph-zero-1.service +++ /dev/null @@ -1,17 +0,0 @@ -[Unit] -Description=dgraph zero server -Wants=network.target -After=network.target - -[Service] -Type=simple -WorkingDirectory=/var/lib/dgraph -Restart=on-failure -ExecStart=/bin/bash -c '/usr/local/bin/dgraph zero --my={{ myhostname }}:5080 --peer {{ zero-0 }}:5080 --wal /var/lib/dgraph/zw --raft="idx=2" --replicas 3' -StandardOutput=journal -StandardError=journal -User=dgraph -Group=dgraph - -[Install] -WantedBy=multi-user.target diff --git a/contrib/systemd/ha_cluster/dgraph-zero-2.service b/contrib/systemd/ha_cluster/dgraph-zero-2.service deleted file mode 100644 index 92d6b9a2a96..00000000000 --- a/contrib/systemd/ha_cluster/dgraph-zero-2.service +++ /dev/null @@ -1,17 +0,0 @@ -[Unit] -Description=dgraph zero server -Wants=network.target -After=network.target - -[Service] -Type=simple -WorkingDirectory=/var/lib/dgraph -Restart=on-failure -ExecStart=/bin/bash -c '/usr/local/bin/dgraph zero --my={{ myhostname }}:5080 --peer {{ zero-0 }}:5080 --wal /var/lib/dgraph/zw --raft="idx=3" --replicas 3' -StandardOutput=journal -StandardError=journal -User=dgraph -Group=dgraph - -[Install] -WantedBy=multi-user.target diff --git a/contrib/systemd/ha_cluster/tests/.gitignore b/contrib/systemd/ha_cluster/tests/.gitignore deleted file mode 100644 index 8000dd9db47..00000000000 --- a/contrib/systemd/ha_cluster/tests/.gitignore +++ /dev/null @@ -1 +0,0 @@ -.vagrant diff --git a/contrib/systemd/ha_cluster/tests/README.md b/contrib/systemd/ha_cluster/tests/README.md deleted file mode 100644 index 885ed51ba70..00000000000 --- a/contrib/systemd/ha_cluster/tests/README.md +++ /dev/null @@ -1,147 +0,0 @@ -# Systemd Tests - -These are tests to both demonstrate and test functionality of systemd units to manage dgraph. - -## Requirements - -- HashiCorp [Vagrant](https://www.vagrantup.com/) - automation to manage virtual machine systems and - provision them. - -## Instructions - -### Create VM Guests and Provision - -Either `cd centos8` or `cd ubuntu1804` and run: - -```bash -vagrant up -``` - -#### Using Hyper/V Provider - -On Windows 10 Pro with Hyper/V enabled, you can run this in PowerShell: - -```powershell -$Env:VAGRANT_DEFAULT_PROVIDER = "hyperv" -vagrant up -``` - -#### Using libvirt Provider - -If you running on Linux and would like to use KVM for a speedier Vagrant experience, you can install -the `vagrant-libvirt` plugin (see -[Installation](https://github.com/vagrant-libvirt/vagrant-libvirt#installation)) and run this: - -```bash -export VAGRANT_DEFAULT_PROVIDER=libvirt -vagrant up -``` - -### Logging Into the System - -You can log into the guest virtual machines with SSH. - -```bash -vagrant ssh # log into default `alpha-0` -vagrant status # Get Status of running systems -vagrant ssh zero-1 # log into zero-1 -``` - -### Get Health Check - -You can check the health of a system with this pattern (using `awk` and `curl`): - -```bash -# test a zero virtual guest -curl $(awk '/zero-0/{ print $1 }' hosts):6080/health -# test an alpha virtual guest -curl $(awk '/alpha-0/{ print $1 }' hosts):8080/health -``` - -### Get State of Cluster - -You can check the state of the cluster with this pattern (using `awk` and `curl`): - -```bash -# get state of cluster -curl $(awk '/zero-0/{ print $1 }' hosts):6080/state -``` - -### Get Logs - -```bash -# get logs from zero0 -vagrant ssh zero-0 --command "sudo journalctl -u dgraph-zero" -# get logs from alpha0 -vagrant ssh alpha-0 --command "sudo journalctl -u dgraph-alpha" -``` - -### Cleanup and Destroy VMs - -```bash -vagrant destroy --force -``` - -## About Automation - -### Configuration - -The configuration is a `hosts` file format, space-delimited. This defines both the hostnames and -virtual IP address used to create the virtual guests. Vagrant in combination with the underlying -virtual machine provider will create a virtual network accessible by the host. - -```host - - - -``` - -You can use `default` for one system to be designated as the default for `vagrant ssh` - -#### Dgraph Version - -By default, the latest Dgraph version will be used to for the version. If you want to use another -version, you can set the environment variable `DGRAPH_VERSION` for the desired version. - -### Windows Environment - -On Windows, for either Hyper/V or Virtualbox providers, for convenience you can specify username -`SMB_USER` and password `SMB_PASSWD` before running `vagrant up`, so that you won't get prompted 6 -times for username and password. - -> **NOTE**: Setting a password in an environment variable is not considered security best practices. - -To use this in PowerShell, you can do this: - -```powershell -$Env:SMB_USER = "" # example: $Env:USERNAME -$Env:SMB_PASSWD = "" -# "hyperv" or "virtualbox" -$Env:VAGRANT_DEFAULT_PROVIDER = "" - -vagrant up -``` - -## Environments Tested - -- Guest OS - - [Cent OS 8](https://app.vagrantup.com/generic/boxes/centos8) from - [Roboxes](https://roboxes.org/) - - [Ubuntu 18.04](https://app.vagrantup.com/generic/boxes/ubuntu1804) from - [Roboxes](https://roboxes.org/) -- Providers - - [libvirt](https://github.com/vagrant-libvirt/vagrant-libvirt) (KVM) on Ubuntu 19.10 - - [VirtualBox](https://www.vagrantup.com/docs/providers/virtualbox) on Win10 Home, Mac OS X 10.14 - - [Hyper/V](https://www.vagrantup.com/docs/providers/hyperv) on Win10 Pro - -## Resources - -- Vagrant - - Util API: https://www.rubydoc.info/github/hashicorp/vagrant/Vagrant/Util/Platform - - Multi-Machine: https://www.vagrantup.com/docs/multi-machine - - Synced Folders: https://www.vagrantup.com/docs/synced-folders - - lib-virt: https://github.com/vagrant-libvirt/vagrant-libvirt#synced-folders - - Provisioning: https://www.vagrantup.com/docs/provisioning -- Dgraph - - Documentation: https://dgraph.io/docs/ - - Community: https://discuss.dgraph.io/ diff --git a/contrib/systemd/ha_cluster/tests/centos8/Vagrantfile b/contrib/systemd/ha_cluster/tests/centos8/Vagrantfile deleted file mode 100644 index e83a67826c6..00000000000 --- a/contrib/systemd/ha_cluster/tests/centos8/Vagrantfile +++ /dev/null @@ -1,36 +0,0 @@ -# -*- mode: ruby -*- -# vi: set ft=ruby : -eval File.read("./vagrant_helper.rb") - -Vagrant.configure("2") do |config| - @hosts.each do |hostname, ipaddr| - default = if hostname == @primary then true else false end - config.vm.define hostname, primary: default do |node| - node.vm.box = "generic/centos8" - node.vm.hostname = "#{hostname}" - node.vm.network "private_network", ip: ipaddr - node.vm.synced_folder ".", "/vagrant" - - node.vm.provider "virtualbox" do |vbox, override| - vbox.name = "#{hostname}" - # enable SMB3.0 for better fileshare UX on Windows-Virtualbox - if Vagrant::Util::Platform.windows? then - override.vm.synced_folder ".", "/vagrant", @smb_sync_opts - end - end - - node.vm.provider "hyperv" do |hyperv, override| - hyperv.vmname = "#{hostname}" - # enable SMB3.0 for better fileshare UX on Windows-HyperV - override.vm.synced_folder ".", "/vagrant", @smb_sync_opts - end - - node.vm.provision "shell" do |shell| - shell.path = "provision.sh" - shell.args = [@replicas] - shell.env = { DGRAPH_VERSION: @version } - shell.privileged = true - end - end - end -end diff --git a/contrib/systemd/ha_cluster/tests/centos8/hosts b/contrib/systemd/ha_cluster/tests/centos8/hosts deleted file mode 100644 index 118dd67d08f..00000000000 --- a/contrib/systemd/ha_cluster/tests/centos8/hosts +++ /dev/null @@ -1,6 +0,0 @@ -192.168.123.11 zero-0 -192.168.123.12 zero-1 -192.168.123.13 zero-2 -192.168.123.14 alpha-0 default -192.168.123.15 alpha-1 -192.168.123.16 alpha-2 diff --git a/contrib/systemd/ha_cluster/tests/centos8/provision.sh b/contrib/systemd/ha_cluster/tests/centos8/provision.sh deleted file mode 100755 index 9510ddf7ae1..00000000000 --- a/contrib/systemd/ha_cluster/tests/centos8/provision.sh +++ /dev/null @@ -1,186 +0,0 @@ -#!/usr/bin/env bash - -##### -# main -################################ -main() { - if [[ $1 =~ h(elp)?|\? ]]; then usage; fi - if (($# != 1)); then usage; fi - REPLICAS=$1 - - echo "RUNNING script" - - setup_hosts - install_dgraph - setup_user_group - setup_systemd - setup_firewall -} - -##### -# usage -################################ -usage() { - printf " Usage: \n\t$0 [REPLICAS]\n\n" >&2 - exit 1 -} - -##### -# install_dgraph - installer script from https://get.dgraph.io -################################ -install_dgraph() { - [[ -z ${DGRAPH_VERSION} ]] && { - echo 'DGRAPH_VERSION not specified. Aborting' 2>&1 - return 1 - } - echo "INFO: Installing Dgraph with 'curl -sSf https://get.dgraph.io | ACCEPT_LICENSE="y" VERSION=""${DGRAPH_VERSION}"" bash'" - curl -sSf https://get.dgraph.io | ACCEPT_LICENSE="y" VERSION="${DGRAPH_VERSION}" bash -} - -##### -# setup_hosts - configure /etc/hosts in absence of DNS -################################ -setup_hosts() { - CONFIG_FILE=/vagrant/hosts - if [[ ! -f /vagrant/hosts ]]; then - echo "INFO: '${CONFIG_FILE}' does not exist. Skipping configuring /etc/hosts" - return 1 - fi - - while read -a LINE; do - ## append to hosts entry if it doesn't exist - if ! grep -q "${LINE[1]}" /etc/hosts; then - printf "%s %s \n" "${LINE[*]}" >>/etc/hosts - fi - done <"${CONFIG_FILE}" -} - -##### -# setup_user_group - dgraph user and gruop -################################ -setup_user_group() { - id -g dgraph &>/dev/null || groupadd --system dgraph - id -u dgraph &>/dev/null || useradd --system -d /var/lib/dgraph -s /bin/false -g dgraph dgraph -} - -##### -# setup_firewall on Ubuntu 18.04 and CentOS 8 -################################ -setup_firewall() { - case $(hostname) in - *zero*) - PORTS=(5080 6080) - ;; - *alpha*) - PORTS=(7080 8080 9080) - ;; - esac - - if grep -q centos /etc/os-release; then - if /usr/bin/firewall-cmd --state 2>&1 | grep -q "^running$"; then - for PORT in ${PORTS[*]}; do - firewall-cmd --zone=public --permanent --add-port="${PORT}"/tcp - firewall-cmd --reload - done - fi - elif grep -iq ubuntu /etc/os-release; then - if /usr/sbin/ufw status | grep -wq active; then - for PORT in ${PORTS[*]}; do - ufw allow from any to any port "${PORT}" proto tcp - done - fi - fi -} - -##### -# setup_systemd_zero - setup dir and systemd unit for zero leader or peer -################################ -setup_systemd_zero() { - TYPE=${1:-"peer"} - LDR="zero-0:5080" - WAL=/var/lib/dgraph/zw - IDX=$(($(grep -o '[0-9]' <<<"${HOSTNAME}") + 1)) - if [[ ${TYPE} == "leader" ]]; then - EXEC="/bin/bash -c '/usr/local/bin/dgraph zero --my=\$(hostname):5080 --wal ${WAL} - --raft="idx=${IDX}" --replicas ${REPLICAS}'" - else - EXEC="/bin/bash -c '/usr/local/bin/dgraph zero --my=\$(hostname):5080 --peer ${LDR} --wal ${WAL} - --raft="idx=${IDX}" --replicas ${REPLICAS}'" - fi - - mkdir -p /var/{log/dgraph,lib/dgraph/zw} - chown -R dgraph:dgraph /var/{lib,log}/dgraph - - install_systemd_unit "zero" "${EXEC}" -} - -##### -# setup_systemd_alpha - setup dir and systemd unit for alpha -################################ -setup_systemd_alpha() { - WAL=/var/lib/dgraph/w - POSTINGS=/var/lib/dgraph/p - # build array based on number of replicas - for ((I = 0; I <= REPLICAS - 1; I++)); do ZEROS+=("zero-${I}:5080"); done - IFS=, eval 'ZERO_LIST="${ZEROS[*]}"' # join by ',' - - EXEC="/bin/bash -c '/usr/local/bin/dgraph alpha --my=\$(hostname):7080 --zero ${ZERO_LIST} --postings ${POSTINGS} --wal ${WAL}'" - - mkdir -p /var/{log/dgraph,lib/dgraph/{w,p}} - chown -R dgraph:dgraph /var/{lib,log}/dgraph - - install_systemd_unit "alpha" "${EXEC}" -} - -##### -# install_systemd_unit - config systemd unit give exec str and service type -################################ -install_systemd_unit() { - TYPE=$1 - EXEC=$2 - - if [[ ! -f /etc/systemd/system/dgraph-${TYPE}.service ]]; then - cat <<-EOF >/etc/systemd/system/dgraph-"${TYPE}".service - [Unit] - Description=dgraph ${TYPE} server - Wants=network.target - After=network.target - - [Service] - Type=simple - WorkingDirectory=/var/lib/dgraph - Restart=on-failure - ExecStart=${EXEC} - StandardOutput=journal - StandardError=journal - User=dgraph - Group=dgraph - - [Install] - WantedBy=multi-user.target - EOF - systemctl enable dgraph-"${TYPE}" - systemctl start dgraph-"${TYPE}" - else - echo "Skipping as 'dgraph-${TYPE}.service' already exists" - fi -} - -##### -# setup_systemd - configure systemd unit based on hostname -################################ -setup_systemd() { - case $(hostname) in - *zero-0*) - setup_systemd_zero "leader" - ;; - *zero-[1-9]*) - setup_systemd_zero "peer" - ;; - *alpha*) - setup_systemd_alpha - ;; - esac -} - -main $@ diff --git a/contrib/systemd/ha_cluster/tests/centos8/vagrant_helper.rb b/contrib/systemd/ha_cluster/tests/centos8/vagrant_helper.rb deleted file mode 100644 index ca19b8dea69..00000000000 --- a/contrib/systemd/ha_cluster/tests/centos8/vagrant_helper.rb +++ /dev/null @@ -1,22 +0,0 @@ -## read lines from configuration -lines = File.readlines("./hosts") - -## Create hash of { hostname => inet_addr } -@hosts = lines.map { |ln| i,h = ln.split(/\s+/); [h,i] }.to_h - -## Set primary host for `vagrant ssh` -@primary = (lines.select { |line| line =~ /primary|default/ }[0] ||="").split[1] || "alpha-1" - -## Set Replicas based on # of zeros -@replicas = @hosts.keys.select { |host| host.to_s.match /^zero-\d+/ }.count - -## Create hash 0f SMB sync options w/ optional smb_username and smb_password -@smb_sync_opts = { type: "smb", mount_options: %w[mfsymlinks vers=3.0] } -@smb_sync_opts.merge! smb_username: ENV['SMB_USER'] if ENV['SMB_USER'] -@smb_sync_opts.merge! smb_password: ENV['SMB_PASSWD'] if ENV['SMB_PASSWD'] - -## Set Latest Version -uri = URI.parse("https://get.dgraph.io/latest") -response = Net::HTTP.get_response(uri) -latest = JSON.parse(response.body)["tag_name"] -@version = ENV['DGRAPH_VERSION'] || latest diff --git a/contrib/systemd/ha_cluster/tests/ubuntu1804/Vagrantfile b/contrib/systemd/ha_cluster/tests/ubuntu1804/Vagrantfile deleted file mode 100644 index b1b62f69022..00000000000 --- a/contrib/systemd/ha_cluster/tests/ubuntu1804/Vagrantfile +++ /dev/null @@ -1,36 +0,0 @@ -# -*- mode: ruby -*- -# vi: set ft=ruby : -eval File.read("./vagrant_helper.rb") - -Vagrant.configure("2") do |config| - @hosts.each do |hostname, ipaddr| - default = if hostname == @primary then true else false end - config.vm.define hostname, primary: default do |node| - node.vm.box = "generic/ubuntu1804" - node.vm.hostname = "#{hostname}" - node.vm.network "private_network", ip: ipaddr - node.vm.synced_folder ".", "/vagrant" - - node.vm.provider "virtualbox" do |vbox, override| - vbox.name = "#{hostname}" - # enable SMB3.0 for better fileshare UX on Windows-Virtualbox - if Vagrant::Util::Platform.windows? then - override.vm.synced_folder ".", "/vagrant", @smb_sync_opts - end - end - - node.vm.provider "hyperv" do |hyperv, override| - hyperv.vmname = "#{hostname}" - # enable SMB3.0 for better fileshare UX on Windows-HyperV - override.vm.synced_folder ".", "/vagrant", @smb_sync_opts - end - - node.vm.provision "shell" do |shell| - shell.path = "provision.sh" - shell.args = [@replicas] - shell.env = { DGRAPH_VERSION: @version } - shell.privileged = true - end - end - end -end diff --git a/contrib/systemd/ha_cluster/tests/ubuntu1804/hosts b/contrib/systemd/ha_cluster/tests/ubuntu1804/hosts deleted file mode 100644 index bef99957f34..00000000000 --- a/contrib/systemd/ha_cluster/tests/ubuntu1804/hosts +++ /dev/null @@ -1,6 +0,0 @@ -192.168.123.21 zero-0 -192.168.123.22 zero-1 -192.168.123.23 zero-2 -192.168.123.24 alpha-0 default -192.168.123.25 alpha-1 -192.168.123.26 alpha-2 diff --git a/contrib/systemd/ha_cluster/tests/ubuntu1804/provision.sh b/contrib/systemd/ha_cluster/tests/ubuntu1804/provision.sh deleted file mode 100755 index 9510ddf7ae1..00000000000 --- a/contrib/systemd/ha_cluster/tests/ubuntu1804/provision.sh +++ /dev/null @@ -1,186 +0,0 @@ -#!/usr/bin/env bash - -##### -# main -################################ -main() { - if [[ $1 =~ h(elp)?|\? ]]; then usage; fi - if (($# != 1)); then usage; fi - REPLICAS=$1 - - echo "RUNNING script" - - setup_hosts - install_dgraph - setup_user_group - setup_systemd - setup_firewall -} - -##### -# usage -################################ -usage() { - printf " Usage: \n\t$0 [REPLICAS]\n\n" >&2 - exit 1 -} - -##### -# install_dgraph - installer script from https://get.dgraph.io -################################ -install_dgraph() { - [[ -z ${DGRAPH_VERSION} ]] && { - echo 'DGRAPH_VERSION not specified. Aborting' 2>&1 - return 1 - } - echo "INFO: Installing Dgraph with 'curl -sSf https://get.dgraph.io | ACCEPT_LICENSE="y" VERSION=""${DGRAPH_VERSION}"" bash'" - curl -sSf https://get.dgraph.io | ACCEPT_LICENSE="y" VERSION="${DGRAPH_VERSION}" bash -} - -##### -# setup_hosts - configure /etc/hosts in absence of DNS -################################ -setup_hosts() { - CONFIG_FILE=/vagrant/hosts - if [[ ! -f /vagrant/hosts ]]; then - echo "INFO: '${CONFIG_FILE}' does not exist. Skipping configuring /etc/hosts" - return 1 - fi - - while read -a LINE; do - ## append to hosts entry if it doesn't exist - if ! grep -q "${LINE[1]}" /etc/hosts; then - printf "%s %s \n" "${LINE[*]}" >>/etc/hosts - fi - done <"${CONFIG_FILE}" -} - -##### -# setup_user_group - dgraph user and gruop -################################ -setup_user_group() { - id -g dgraph &>/dev/null || groupadd --system dgraph - id -u dgraph &>/dev/null || useradd --system -d /var/lib/dgraph -s /bin/false -g dgraph dgraph -} - -##### -# setup_firewall on Ubuntu 18.04 and CentOS 8 -################################ -setup_firewall() { - case $(hostname) in - *zero*) - PORTS=(5080 6080) - ;; - *alpha*) - PORTS=(7080 8080 9080) - ;; - esac - - if grep -q centos /etc/os-release; then - if /usr/bin/firewall-cmd --state 2>&1 | grep -q "^running$"; then - for PORT in ${PORTS[*]}; do - firewall-cmd --zone=public --permanent --add-port="${PORT}"/tcp - firewall-cmd --reload - done - fi - elif grep -iq ubuntu /etc/os-release; then - if /usr/sbin/ufw status | grep -wq active; then - for PORT in ${PORTS[*]}; do - ufw allow from any to any port "${PORT}" proto tcp - done - fi - fi -} - -##### -# setup_systemd_zero - setup dir and systemd unit for zero leader or peer -################################ -setup_systemd_zero() { - TYPE=${1:-"peer"} - LDR="zero-0:5080" - WAL=/var/lib/dgraph/zw - IDX=$(($(grep -o '[0-9]' <<<"${HOSTNAME}") + 1)) - if [[ ${TYPE} == "leader" ]]; then - EXEC="/bin/bash -c '/usr/local/bin/dgraph zero --my=\$(hostname):5080 --wal ${WAL} - --raft="idx=${IDX}" --replicas ${REPLICAS}'" - else - EXEC="/bin/bash -c '/usr/local/bin/dgraph zero --my=\$(hostname):5080 --peer ${LDR} --wal ${WAL} - --raft="idx=${IDX}" --replicas ${REPLICAS}'" - fi - - mkdir -p /var/{log/dgraph,lib/dgraph/zw} - chown -R dgraph:dgraph /var/{lib,log}/dgraph - - install_systemd_unit "zero" "${EXEC}" -} - -##### -# setup_systemd_alpha - setup dir and systemd unit for alpha -################################ -setup_systemd_alpha() { - WAL=/var/lib/dgraph/w - POSTINGS=/var/lib/dgraph/p - # build array based on number of replicas - for ((I = 0; I <= REPLICAS - 1; I++)); do ZEROS+=("zero-${I}:5080"); done - IFS=, eval 'ZERO_LIST="${ZEROS[*]}"' # join by ',' - - EXEC="/bin/bash -c '/usr/local/bin/dgraph alpha --my=\$(hostname):7080 --zero ${ZERO_LIST} --postings ${POSTINGS} --wal ${WAL}'" - - mkdir -p /var/{log/dgraph,lib/dgraph/{w,p}} - chown -R dgraph:dgraph /var/{lib,log}/dgraph - - install_systemd_unit "alpha" "${EXEC}" -} - -##### -# install_systemd_unit - config systemd unit give exec str and service type -################################ -install_systemd_unit() { - TYPE=$1 - EXEC=$2 - - if [[ ! -f /etc/systemd/system/dgraph-${TYPE}.service ]]; then - cat <<-EOF >/etc/systemd/system/dgraph-"${TYPE}".service - [Unit] - Description=dgraph ${TYPE} server - Wants=network.target - After=network.target - - [Service] - Type=simple - WorkingDirectory=/var/lib/dgraph - Restart=on-failure - ExecStart=${EXEC} - StandardOutput=journal - StandardError=journal - User=dgraph - Group=dgraph - - [Install] - WantedBy=multi-user.target - EOF - systemctl enable dgraph-"${TYPE}" - systemctl start dgraph-"${TYPE}" - else - echo "Skipping as 'dgraph-${TYPE}.service' already exists" - fi -} - -##### -# setup_systemd - configure systemd unit based on hostname -################################ -setup_systemd() { - case $(hostname) in - *zero-0*) - setup_systemd_zero "leader" - ;; - *zero-[1-9]*) - setup_systemd_zero "peer" - ;; - *alpha*) - setup_systemd_alpha - ;; - esac -} - -main $@ diff --git a/contrib/systemd/ha_cluster/tests/ubuntu1804/vagrant_helper.rb b/contrib/systemd/ha_cluster/tests/ubuntu1804/vagrant_helper.rb deleted file mode 100644 index ca19b8dea69..00000000000 --- a/contrib/systemd/ha_cluster/tests/ubuntu1804/vagrant_helper.rb +++ /dev/null @@ -1,22 +0,0 @@ -## read lines from configuration -lines = File.readlines("./hosts") - -## Create hash of { hostname => inet_addr } -@hosts = lines.map { |ln| i,h = ln.split(/\s+/); [h,i] }.to_h - -## Set primary host for `vagrant ssh` -@primary = (lines.select { |line| line =~ /primary|default/ }[0] ||="").split[1] || "alpha-1" - -## Set Replicas based on # of zeros -@replicas = @hosts.keys.select { |host| host.to_s.match /^zero-\d+/ }.count - -## Create hash 0f SMB sync options w/ optional smb_username and smb_password -@smb_sync_opts = { type: "smb", mount_options: %w[mfsymlinks vers=3.0] } -@smb_sync_opts.merge! smb_username: ENV['SMB_USER'] if ENV['SMB_USER'] -@smb_sync_opts.merge! smb_password: ENV['SMB_PASSWD'] if ENV['SMB_PASSWD'] - -## Set Latest Version -uri = URI.parse("https://get.dgraph.io/latest") -response = Net::HTTP.get_response(uri) -latest = JSON.parse(response.body)["tag_name"] -@version = ENV['DGRAPH_VERSION'] || latest diff --git a/contrib/tlstest/Makefile b/contrib/tlstest/Makefile deleted file mode 100644 index c4546aaf732..00000000000 --- a/contrib/tlstest/Makefile +++ /dev/null @@ -1,48 +0,0 @@ -# -# SPDX-FileCopyrightText: © Hypermode Inc. -# SPDX-License-Identifier: Apache-2.0 -# - -DGRAPH_PATH = $(GOPATH)/src/github.com/hypermodeinc/dgraph/dgraph -DGRAPH_BIN = $(DGRAPH_PATH)/dgraph - -TARGETS = test1 test2 test3 test4 test5 test6 -KEYBITS = 2048 - -.PHONY: all -all: cert $(TARGETS) - -test: all - -cert: - @echo "Generating CA cert in 'tls' dir." - @$(MAKE) -C $(DGRAPH_PATH) all - @$(DGRAPH_BIN) cert --keysize $(KEYBITS) -d $(PWD)/tls -n localhost -c live - -test1: cert - @echo "Test 1: Alpha non-TLS, Live non-TLS" - @(DGRAPH_BIN=$(DGRAPH_BIN) ./test.sh ./alpha_notls.sh ./live_notls.sh 0) - -test2: cert - @echo "Test 2: Alpha non-TLS, Live TLS" - @(DGRAPH_BIN=$(DGRAPH_BIN) ./test.sh ./alpha_notls.sh ./live_tls.sh 1) - -test3: cert - @echo "Test 3: Alpha TLS, Live non-TLS" - @(DGRAPH_BIN=$(DGRAPH_BIN) ./test.sh ./alpha_tls.sh ./live_notls.sh 1) - -test4: cert - @echo "Test 4: Alpha TLS, Live TLS" - @(DGRAPH_BIN=$(DGRAPH_BIN) ./test.sh ./alpha_tls.sh ./live_tls.sh 0) - -test5: cert - @echo "Test 5: Alpha TLS Auth, Live TLS" - @(DGRAPH_BIN=$(DGRAPH_BIN) ./test.sh ./alpha_tls_auth.sh ./live_tls_auth.sh 0) - -test6: cert - @echo "Test 6: Alpha TLS reload, Live TLS" - @(DGRAPH_BIN=$(DGRAPH_BIN) RELOAD_TEST=1 ./test.sh ./alpha_tls.sh ./live_tls.sh 1) - -clean: - git clean -d -f - diff --git a/contrib/tlstest/README.md b/contrib/tlstest/README.md deleted file mode 100644 index 70622d9d94e..00000000000 --- a/contrib/tlstest/README.md +++ /dev/null @@ -1,29 +0,0 @@ -# Semiautomatic tests of TLS configuration - -This directory contains several scripts, that helps with testing of tls functionality in dgraph. - -- `Makefile` - cleans up the directory, creates CA, client and server keys and signed certs, - executes the tests -- `server_nopass.sh` - starts server that use unencryped private key -- `server_nopass_client_auth.sh` - starts server that use unencryped private key, and require client - authentication -- `server_pass.sh` - starts server that use encrypted/password protected private key -- `server_11.sh` - starts server with maximum TLS version set to 1.1 -- `client_nopass.sh` - executes dgraph-live-loader configured to use unencrypted privae key -- `client_pass.sh` - executes dgraph-live-loader configured to use encrypted/password protected - private key -- `client_nocert.sh` - executes dgraph-live-loader without configured client certificate -- `client_12.sh` - executes dgraph-live-loader with minimum TLS version set to 1.2 - -## Notes - -Go x509 package supports only encrypted private keys conaining "DEK-Info". By default, openssl -doesn't include it in generated keys. Fortunately, if encryption method is explicitly set in the -command line, openssl adds "DEK-Info" header. - -`server_pass.sh` should be used with `client_pass.sh`. This enable testing of `tls_server_name` -configuration option. Mixing `_pass` and `_nopass` client/server shows that server name is verified -by the client. - -For testing purposes, DNS names for server1.dgraph.io and server2.dgraph.io has to be resolvable. -Editing /etc/hosts is the simplest way to achieve this. diff --git a/contrib/tlstest/alpha_notls.sh b/contrib/tlstest/alpha_notls.sh deleted file mode 100755 index 84993aa5047..00000000000 --- a/contrib/tlstest/alpha_notls.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/bash -set -e -${DGRAPH_BIN} alpha --zero 127.0.0.1:5081 &>alpha.log diff --git a/contrib/tlstest/alpha_tls.sh b/contrib/tlstest/alpha_tls.sh deleted file mode 100755 index 29fb3078638..00000000000 --- a/contrib/tlstest/alpha_tls.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/bash -set -e - -${DGRAPH_BIN} alpha --tls "ca-cert=${PWD}/tls/ca.crt; server-cert=${PWD}/tls/node.crt; server-key=${PWD}/tls/node.key;" --zero 127.0.0.1:5081 &>alpha.log diff --git a/contrib/tlstest/alpha_tls_auth.sh b/contrib/tlstest/alpha_tls_auth.sh deleted file mode 100755 index fe5f64ecf06..00000000000 --- a/contrib/tlstest/alpha_tls_auth.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/bash -set -e -${DGRAPH_BIN} alpha --tls "ca-cert=${PWD}/tls/ca.crt; server-cert=${PWD}/tls/node.crt; server-key=${PWD}/tls/node.key; client-auth-type=REQUIREANDVERIFY;" --zero 127.0.0.1:5081 &>alpha.log diff --git a/contrib/tlstest/data.rdf.gz b/contrib/tlstest/data.rdf.gz deleted file mode 100644 index c59c185288a6c26cb5e08a6d32d84bd8c37e5b05..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1543 zcmV+i2Kf0OiwFP(0j^j81D#mkZ`(v1e}=!})I9(aNo=Qal4`Q{vm2vh6`cfoS#`G0 z_9eE@IbV{xPb-=bgBJv1lg1E2Oak@*0}T)e@v{Gf(suhVI49>dw$CNAR8@)nxsN~J z-(TmnjVZDip-l(-f$_>|kFjX2)tXMH&1t(ysN2#MMQJH&i`q^jwmcNTQ#J`84mmnI zZM@Qa^>i&N3PTrh@cfK~C4>RLzp~E1#bACD*(+yICN_} zdPe@(T$$WDPG%-A?WV$mUy3Ai(94R^T1vM?rv51$P7saJHD`gOS)oWc`+~R#%p~vW z40p1hPmr2FNYPB%c6MP2g%zQCfyNfQb0&y3lcQb!g zocureZc|mY#OVgnad~B?2TjH8?(~x#2~z&J6}8{h8;agg`ztqZ%AKIL-oLuBB^)q2 zRU#MWUV+#L=O}_h9<CQL(W*R+X}QqW{##( zT``sbnyx$1YzeZ`^1fQna=|MszwRM$fe;$6fek!C0Egh>IL%A&y4u%!Z)m0wlgyHAx~nLzQO0H&P7v7S&y7h~ z;B*G8BeUNc4MU@hTXZs-U?0aG*z{pIEYNyt!eq2BN+YRR33(rzfdTCGWt3*A>?FpfP87y*#jkh}Zo z6S;12V1MuL{SR*K@7>$q`~LY?pB+5>?%?rIF{lE;Iog@PW@`cf7e~P}*0dT0`B>bIyU);aaV! zb^&K>gd8Q!>wh&ibtbc#fmv%xYP@UWa7r^Q{RQ!mg_ZqGdL!#KtkIP05Z{sRBctoLcDMdTq49a69jIgwoG}^ zVrvNAC9v2hNBu~;b@=);SET}1nY~)g>=nGisP{a`z$VvB{ERo-Pf$Q8hUsfnfMXDi`Qi>3 zO_!0vF5=XSkiFynpUd27two&GNaV0!@GdmCDdgraph.log diff --git a/contrib/tlstest/server_nopass_client_auth.sh b/contrib/tlstest/server_nopass_client_auth.sh deleted file mode 100755 index 615400739bd..00000000000 --- a/contrib/tlstest/server_nopass_client_auth.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/bash - -../../dgraph/dgraph alpha --tls "ca-cert=ca.crt; client-cert=server.crt; client-key=server.key; client-auth-type=REQUIREANDVERIFY;" --zero 127.0.0.1:5081 diff --git a/contrib/tlstest/server_pass.sh b/contrib/tlstest/server_pass.sh deleted file mode 100755 index 551b914f993..00000000000 --- a/contrib/tlstest/server_pass.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/bash - -../../dgraph/dgraph alpha --tls "ca-cert=ca.crt; client-cert=server_pass.crt; client-key=server_pass.key;" --zero 127.0.0.1:5081 &>dgraph.log diff --git a/contrib/tlstest/test.sh b/contrib/tlstest/test.sh deleted file mode 100755 index e97df9c3813..00000000000 --- a/contrib/tlstest/test.sh +++ /dev/null @@ -1,33 +0,0 @@ -#!/bin/bash -trap "cleanup" EXIT - -cleanup() { - killall -KILL dgraph >/dev/null 2>/dev/null -} - -ALPHA=$1 -LIVE=$2 -EXPECTED=$3 - -${DGRAPH_BIN} zero -w zw -o 1 >zero.log 2>&1 & -sleep 5 - -${ALPHA} >/dev/null 2>&1 & - -if [[ "x${RELOAD_TEST}" != "x" ]]; then - trap '' HUP - rm -f ./tls/ca.key - ${DGRAPH_BIN} cert -d "${PWD}"/tls -n localhost -c live --force - killall -HUP dgraph >/dev/null 2>/dev/null - sleep 3 -fi - -timeout 30s "${LIVE}" >live.log 2>&1 -RESULT=$? - -if [[ ${RESULT} != "${EXPECTED}" ]]; then - echo "${ALPHA} <-> ${LIVE}, Result: ${RESULT} != Expected: ${EXPECTED}" - exit 1 -fi - -exit 0 diff --git a/contrib/tlstest/test_reload.sh b/contrib/tlstest/test_reload.sh deleted file mode 100755 index 1f7558fcc0d..00000000000 --- a/contrib/tlstest/test_reload.sh +++ /dev/null @@ -1,35 +0,0 @@ -#!/bin/bash - -trap "cleanup" EXIT - -cleanup() { - killall -9 dgraph >/dev/null 2>/dev/null -} - -ALPHA=./alpha_tls.sh -LIVE=./live_tls.sh -EXPECTED=1 - -${DGRAPH_BIN} zero -w zw -o 1 >zero.log 2>&1 & -sleep 5 - -# start the server -${ALPHA} >/dev/null 2>&1 & -timeout 30s "${LIVE}" >/dev/null 2>&1 -RESULT=$? - -# regenerate TLS certificate -rm -f ./tls/ca.key -${DGRAPH_BIN} cert -d "${PWD}"/tls -n localhost -c live --force -pkill -HUP dgraph >/dev/null 2>&1 - -# try to connect again -timeout 30s "${LIVE}" >/dev/null 2>&1 -RESULT=$? - -if [[ ${RESULT} == "${EXPECTED}" ]]; then - exit 0 -else - echo "Error while reloading TLS certificate" - exit 1 -fi diff --git a/contrib/wait-for-it.sh b/contrib/wait-for-it.sh deleted file mode 100755 index 822ce15816e..00000000000 --- a/contrib/wait-for-it.sh +++ /dev/null @@ -1,193 +0,0 @@ -#!/usr/bin/env bash -# Use this script to test if a given TCP host/port are available -# -# The MIT License (MIT) -# Copyright (c) 2016 Giles Hall -# -# Permission is hereby granted, free of charge, to any person obtaining a copy of -# this software and associated documentation files (the "Software"), to deal in -# the Software without restriction, including without limitation the rights to -# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -# of the Software, and to permit persons to whom the Software is furnished to do -# so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in all -# copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. - -cmdname=$(basename "$0") - -echoerr() { if [[ ${QUIET} -ne 1 ]]; then echo "$@" 1>&2; fi; } - -usage() { - cat <&2 -Usage: - ${cmdname} host:port [-s] [-t timeout] [-- command args] - -h HOST | --host=HOST Host or IP under test - -p PORT | --port=PORT TCP port under test - Alternatively, you specify the host and port as host:port - -s | --strict Only execute subcommand if the test succeeds - -q | --quiet Don't output any status messages - -t TIMEOUT | --timeout=TIMEOUT - Timeout in seconds, zero for no timeout - -- COMMAND ARGS Execute command with args after the test finishes -USAGE - exit 1 -} - -wait_for() { - if [[ ${TIMEOUT} -gt 0 ]]; then - echoerr "${cmdname}: waiting ${TIMEOUT} seconds for ${HOST}:${PORT}" - else - echoerr "${cmdname}: waiting for ${HOST}:${PORT} without a timeout" - fi - start_ts=$(date +%s) - while :; do - if [[ ${ISBUSY} -eq 1 ]]; then - nc -z "${HOST}" "${PORT}" - result=$? - else - (echo >/dev/tcp/"${HOST}"/"${PORT}") >/dev/null 2>&1 - result=$? - fi - if [[ ${result} -eq 0 ]]; then - end_ts=$(date +%s) - echoerr "${cmdname}: ${HOST}:${PORT} is available after $((end_ts - start_ts)) seconds" - break - fi - sleep 1 - done - return "${result}" -} - -wait_for_wrapper() { - # In order to support SIGINT during timeout: http://unix.stackexchange.com/a/57692 - if [[ ${QUIET} -eq 1 ]]; then - timeout "${BUSYTIMEFLAG}" "${TIMEOUT}" "$0" --quiet --child --host="${HOST}" --port="${PORT}" --timeout="${TIMEOUT}" & - else - timeout "${BUSYTIMEFLAG}" "${TIMEOUT}" "$0" --child --host="${HOST}" --port="${PORT}" --timeout="${TIMEOUT}" & - fi - PID=$! - trap "kill -INT -${PID}" INT - wait "${PID}" - RESULT=$? - if [[ ${RESULT} -ne 0 ]]; then - echoerr "${cmdname}: timeout occurred after waiting ${TIMEOUT} seconds for ${HOST}:${PORT}" - fi - return "${RESULT}" -} - -# process arguments -while [[ $# -gt 0 ]]; do - case "$1" in - *:*) - hostport=(${1//:/ }) - HOST=${hostport[0]} - PORT=${hostport[1]} - shift 1 - ;; - --child) - CHILD=1 - shift 1 - ;; - -q | --quiet) - QUIET=1 - shift 1 - ;; - -s | --strict) - STRICT=1 - shift 1 - ;; - -h) - HOST="$2" - if [[ ${HOST} == "" ]]; then break; fi - shift 2 - ;; - --host=*) - HOST="${1#*=}" - shift 1 - ;; - -p) - PORT="$2" - if [[ ${PORT} == "" ]]; then break; fi - shift 2 - ;; - --port=*) - PORT="${1#*=}" - shift 1 - ;; - -t) - TIMEOUT="$2" - if [[ ${TIMEOUT} == "" ]]; then break; fi - shift 2 - ;; - --timeout=*) - TIMEOUT="${1#*=}" - shift 1 - ;; - --) - shift - CLI=("$@") - break - ;; - --help) - usage - ;; - *) - echoerr "Unknown argument: $1" - usage - ;; - esac -done - -if [[ ${HOST} == "" || ${PORT} == "" ]]; then - echoerr "Error: you need to provide a host and port to test." - usage -fi - -TIMEOUT=${TIMEOUT:-15} -STRICT=${STRICT:-0} -CHILD=${CHILD:-0} -QUIET=${QUIET:-0} - -# check to see if timeout is from busybox? -# check to see if timeout is from busybox? -TIMEOUT_PATH=$(realpath $(which timeout)) -if [[ ${TIMEOUT_PATH} =~ "busybox" ]]; then - ISBUSY=1 - BUSYTIMEFLAG="-t" -else - ISBUSY=0 - BUSYTIMEFLAG="" -fi - -if [[ ${CHILD} -gt 0 ]]; then - wait_for - RESULT=$? - exit "${RESULT}" -else - if [[ ${TIMEOUT} -gt 0 ]]; then - wait_for_wrapper - RESULT=$? - else - wait_for - RESULT=$? - fi -fi - -if [[ ${CLI} != "" ]]; then - if [[ ${RESULT} -ne 0 && ${STRICT} -eq 1 ]]; then - echoerr "${cmdname}: strict mode, refusing to execute subprocess" - exit "${RESULT}" - fi - exec "${CLI[@]}" -else - exit "${RESULT}" -fi diff --git a/contrib/integration/testtxn/main_test.go b/dgraph/cmd/alpha/txn_test.go similarity index 84% rename from contrib/integration/testtxn/main_test.go rename to dgraph/cmd/alpha/txn_test.go index 79006b7aa5a..705cbaf159d 100644 --- a/contrib/integration/testtxn/main_test.go +++ b/dgraph/cmd/alpha/txn_test.go @@ -5,7 +5,7 @@ * SPDX-License-Identifier: Apache-2.0 */ -package main_test +package alpha import ( "bytes" @@ -24,41 +24,14 @@ import ( "github.com/dgraph-io/dgo/v240" "github.com/dgraph-io/dgo/v240/protos/api" - "github.com/hypermodeinc/dgraph/v24/dgraphapi" - "github.com/hypermodeinc/dgraph/v24/dgraphtest" - "github.com/hypermodeinc/dgraph/v24/x" ) -type state struct { - dg *dgraphapi.GrpcClient -} - -var s state - -func TestMain(m *testing.M) { - log.SetFlags(log.LstdFlags | log.Lshortfile) - conf := dgraphtest.NewClusterConfig().WithNumAlphas(1).WithNumZeros(1).WithReplicas(1).WithACL(time.Hour) - c, err := dgraphtest.NewLocalCluster(conf) - - x.Panic(err) - x.Panic(c.Start()) - dg, cleanup, err := c.Client() - x.Panic(err) - defer cleanup() - - x.Panic(dg.Login(context.Background(), dgraphapi.DefaultUser, dgraphapi.DefaultPassword)) - x.Panic(c.AssignUids(dg.Dgraph, 200)) - s.dg = dg - _ = m.Run() -} - -// readTs == startTs func TestTxnRead1(t *testing.T) { op := &api.Operation{} op.DropAll = true - require.NoError(t, s.dg.Alter(context.Background(), op)) + require.NoError(t, dg.Alter(context.Background(), op)) - txn := s.dg.NewTxn() + txn := dg.NewTxn() mu := &api.Mutation{} mu.SetJson = []byte(`{"name": "Manish"}`) assigned, err := txn.Mutate(context.Background(), mu) @@ -80,7 +53,7 @@ func TestTxnRead1(t *testing.T) { // readTs < commitTs func TestTxnRead2(t *testing.T) { - txn := s.dg.NewTxn() + txn := dg.NewTxn() mu := &api.Mutation{} mu.SetJson = []byte(`{"name": "Manish"}`) @@ -96,7 +69,7 @@ func TestTxnRead2(t *testing.T) { uid = u } - txn2 := s.dg.NewTxn() + txn2 := dg.NewTxn() q := fmt.Sprintf(`{ me(func: uid(%s)) { name }}`, uid) resp, err := txn2.Query(context.Background(), q) @@ -113,13 +86,13 @@ func TestTxnRead3(t *testing.T) { op.DropAttr = "name" attempts := 0 for attempts < 10 { - if err := s.dg.Alter(context.Background(), op); err == nil { + if err := dg.Alter(context.Background(), op); err == nil { break } attempts++ } - txn := s.dg.NewTxn() + txn := dg.NewTxn() mu := &api.Mutation{} mu.SetJson = []byte(`{"name": "Manish"}`) @@ -136,7 +109,7 @@ func TestTxnRead3(t *testing.T) { } require.NoError(t, txn.Commit(context.Background())) - txn = s.dg.NewTxn() + txn = dg.NewTxn() q := fmt.Sprintf(`{ me(func: uid(%s)) { name }}`, uid) resp, err := txn.Query(context.Background(), q) if err != nil { @@ -147,7 +120,7 @@ func TestTxnRead3(t *testing.T) { // readTs > commitTs func TestTxnRead4(t *testing.T) { - txn := s.dg.NewTxn() + txn := dg.NewTxn() mu := &api.Mutation{} mu.SetJson = []byte(`{"name": "Manish"}`) @@ -164,9 +137,9 @@ func TestTxnRead4(t *testing.T) { } require.NoError(t, txn.Commit(context.Background())) - txn2 := s.dg.NewTxn() + txn2 := dg.NewTxn() - txn3 := s.dg.NewTxn() + txn3 := dg.NewTxn() mu = &api.Mutation{} mu.SetJson = []byte(fmt.Sprintf(`{"uid": "%s", "name": "Manish2"}`, uid)) _, err = txn3.Mutate(context.Background(), mu) @@ -182,7 +155,7 @@ func TestTxnRead4(t *testing.T) { require.NoError(t, txn3.Commit(context.Background())) - txn4 := s.dg.NewTxn() + txn4 := dg.NewTxn() q = fmt.Sprintf(`{ me(func: uid(%s)) { name }}`, uid) resp, err = txn4.Query(context.Background(), q) if err != nil { @@ -192,7 +165,7 @@ func TestTxnRead4(t *testing.T) { } func TestTxnRead5(t *testing.T) { - txn := s.dg.NewTxn() + txn := dg.NewTxn() mu := &api.Mutation{} mu.SetJson = []byte(`{"name": "Manish"}`) @@ -211,7 +184,7 @@ func TestTxnRead5(t *testing.T) { require.NoError(t, txn.Commit(context.Background())) q := fmt.Sprintf(`{ me(func: uid(%s)) { name }}`, uid) - txn = s.dg.NewReadOnlyTxn() + txn = dg.NewReadOnlyTxn() resp, err := txn.Query(context.Background(), q) if err != nil { log.Fatalf("Error while running query: %v\n", err) @@ -222,13 +195,13 @@ func TestTxnRead5(t *testing.T) { mu = &api.Mutation{CommitNow: true} mu.SetJson = []byte(fmt.Sprintf("{\"uid\": \"%s\", \"name\": \"Manish2\"}", uid)) - txn = s.dg.NewTxn() + txn = dg.NewTxn() res, err := txn.Mutate(context.Background(), mu) if err != nil { log.Fatalf("Error while running mutation: %v\n", err) } require.True(t, res.Txn.StartTs > 0) - txn = s.dg.NewReadOnlyTxn() + txn = dg.NewReadOnlyTxn() resp, err = txn.Query(context.Background(), q) if err != nil { log.Fatalf("Error while running query: %v\n", err) @@ -239,9 +212,9 @@ func TestTxnRead5(t *testing.T) { func TestConflict(t *testing.T) { op := &api.Operation{} op.DropAll = true - require.NoError(t, s.dg.Alter(context.Background(), op)) + require.NoError(t, dg.Alter(context.Background(), op)) - txn := s.dg.NewTxn() + txn := dg.NewTxn() mu := &api.Mutation{} mu.SetJson = []byte(`{"name": "Manish"}`) @@ -257,7 +230,7 @@ func TestConflict(t *testing.T) { uid = u } - txn2 := s.dg.NewTxn() + txn2 := dg.NewTxn() mu = &api.Mutation{} mu.SetJson = []byte(fmt.Sprintf(`{"uid": "%s", "name": "Manish"}`, uid)) _, err = txn2.Mutate(context.Background(), mu) @@ -266,7 +239,7 @@ func TestConflict(t *testing.T) { require.NoError(t, txn.Commit(context.Background())) require.Error(t, txn2.Commit(context.Background())) - txn = s.dg.NewTxn() + txn = dg.NewTxn() q := fmt.Sprintf(`{ me(func: uid(%s)) { name }}`, uid) resp, err := txn.Query(context.Background(), q) if err != nil { @@ -277,7 +250,7 @@ func TestConflict(t *testing.T) { func TestConflictTimeout(t *testing.T) { var uid string - txn := s.dg.NewTxn() + txn := dg.NewTxn() { mu := &api.Mutation{} mu.SetJson = []byte(`{"name": "Manish"}`) @@ -293,7 +266,7 @@ func TestConflictTimeout(t *testing.T) { } } - txn2 := s.dg.NewTxn() + txn2 := dg.NewTxn() q := fmt.Sprintf(`{ me(func: uid(%s)) { name }}`, uid) _, err := txn2.Query(context.Background(), q) require.NoError(t, err) @@ -307,7 +280,7 @@ func TestConflictTimeout(t *testing.T) { require.Error(t, txn.Commit(context.Background())) - txn3 := s.dg.NewTxn() + txn3 := dg.NewTxn() q = fmt.Sprintf(`{ me(func: uid(%s)) { name }}`, uid) _, err = txn3.Query(context.Background(), q) require.NoError(t, err) @@ -315,7 +288,7 @@ func TestConflictTimeout(t *testing.T) { func TestConflictTimeout2(t *testing.T) { var uid string - txn := s.dg.NewTxn() + txn := dg.NewTxn() { mu := &api.Mutation{} @@ -332,7 +305,7 @@ func TestConflictTimeout2(t *testing.T) { } } - txn2 := s.dg.NewTxn() + txn2 := dg.NewTxn() mu := &api.Mutation{} mu.SetJson = []byte(fmt.Sprintf(`{"uid": "%s", "name": "Jan the man"}`, uid)) _, err := txn2.Mutate(context.Background(), mu) @@ -341,7 +314,7 @@ func TestConflictTimeout2(t *testing.T) { require.NoError(t, txn.Commit(context.Background())) require.Error(t, txn2.Commit(context.Background())) - txn3 := s.dg.NewTxn() + txn3 := dg.NewTxn() mu = &api.Mutation{} mu.SetJson = []byte(fmt.Sprintf(`{"uid": "%s", "name": "Jan the man"}`, uid)) assigned, err := txn3.Mutate(context.Background(), mu) @@ -352,7 +325,7 @@ func TestConflictTimeout2(t *testing.T) { uid = u } - txn4 := s.dg.NewTxn() + txn4 := dg.NewTxn() q := fmt.Sprintf(`{ me(func: uid(%s)) { name }}`, uid) _, err = txn4.Query(context.Background(), q) require.NoError(t, err) @@ -361,15 +334,15 @@ func TestConflictTimeout2(t *testing.T) { func TestIgnoreIndexConflict(t *testing.T) { op := &api.Operation{} op.DropAll = true - require.NoError(t, s.dg.Alter(context.Background(), op)) + require.NoError(t, dg.Alter(context.Background(), op)) op = &api.Operation{} op.Schema = `name: string @index(exact) .` - if err := s.dg.Alter(context.Background(), op); err != nil { + if err := dg.Alter(context.Background(), op); err != nil { log.Fatal(err) } - txn := s.dg.NewTxn() + txn := dg.NewTxn() mu := &api.Mutation{} mu.SetJson = []byte(`{"name": "Manish"}`) assigned, err := txn.Mutate(context.Background(), mu) @@ -384,7 +357,7 @@ func TestIgnoreIndexConflict(t *testing.T) { uid1 = u } - txn2 := s.dg.NewTxn() + txn2 := dg.NewTxn() mu = &api.Mutation{} mu.SetJson = []byte(`{"name": "Manish"}`) assigned, err = txn2.Mutate(context.Background(), mu) @@ -401,7 +374,7 @@ func TestIgnoreIndexConflict(t *testing.T) { require.NoError(t, txn.Commit(context.Background())) require.NoError(t, txn2.Commit(context.Background())) - txn = s.dg.NewTxn() + txn = dg.NewTxn() q := `{ me(func: eq(name, "Manish")) { uid }}` resp, err := txn.Query(context.Background(), q) require.NoError(t, err) @@ -412,15 +385,15 @@ func TestIgnoreIndexConflict(t *testing.T) { func TestReadIndexKeySameTxn(t *testing.T) { op := &api.Operation{} op.DropAll = true - require.NoError(t, s.dg.Alter(context.Background(), op)) + require.NoError(t, dg.Alter(context.Background(), op)) op = &api.Operation{} op.Schema = `name: string @index(exact) .` - if err := s.dg.Alter(context.Background(), op); err != nil { + if err := dg.Alter(context.Background(), op); err != nil { log.Fatal(err) } - txn := s.dg.NewTxn() + txn := dg.NewTxn() mu := &api.Mutation{ CommitNow: true, SetJson: []byte(`{"name": "Manish"}`), @@ -437,7 +410,7 @@ func TestReadIndexKeySameTxn(t *testing.T) { uid = u } - txn = s.dg.NewTxn() + txn = dg.NewTxn() defer func() { require.NoError(t, txn.Discard(context.Background())) }() q := `{ me(func: le(name, "Manish")) { uid }}` resp, err := txn.Query(context.Background(), q) @@ -451,27 +424,27 @@ func TestReadIndexKeySameTxn(t *testing.T) { func TestEmailUpsert(t *testing.T) { op := &api.Operation{} op.DropAll = true - require.NoError(t, s.dg.Alter(context.Background(), op)) + require.NoError(t, dg.Alter(context.Background(), op)) op = &api.Operation{} op.Schema = `email: string @index(exact) @upsert .` - if err := s.dg.Alter(context.Background(), op); err != nil { + if err := dg.Alter(context.Background(), op); err != nil { log.Fatal(err) } - txn1 := s.dg.NewTxn() + txn1 := dg.NewTxn() mu := &api.Mutation{} mu.SetJson = []byte(`{"uid": "_:user1", "email": "email@email.org"}`) _, err := txn1.Mutate(context.Background(), mu) assert.Nil(t, err) - txn2 := s.dg.NewTxn() + txn2 := dg.NewTxn() mu = &api.Mutation{} mu.SetJson = []byte(`{"uid": "_:user2", "email": "email@email.org"}`) _, err = txn2.Mutate(context.Background(), mu) assert.Nil(t, err) - txn3 := s.dg.NewTxn() + txn3 := dg.NewTxn() mu = &api.Mutation{} mu.SetJson = []byte(`{"uid": "_:user3", "email": "email3@email.org"}`) _, err = txn3.Mutate(context.Background(), mu) @@ -487,28 +460,28 @@ func TestEmailUpsert(t *testing.T) { func TestFriendList(t *testing.T) { op := &api.Operation{} op.DropAll = true - require.NoError(t, s.dg.Alter(context.Background(), op)) + require.NoError(t, dg.Alter(context.Background(), op)) op = &api.Operation{} op.Schema = ` - friend: [uid] @reverse .` - if err := s.dg.Alter(context.Background(), op); err != nil { + friend: [uid] @reverse .` + if err := dg.Alter(context.Background(), op); err != nil { log.Fatal(err) } - txn1 := s.dg.NewTxn() + txn1 := dg.NewTxn() mu := &api.Mutation{} mu.SetJson = []byte(`{"uid": "0x01", "friend": [{"uid": "0x02"}]}`) _, err := txn1.Mutate(context.Background(), mu) assert.Nil(t, err) - txn2 := s.dg.NewTxn() + txn2 := dg.NewTxn() mu = &api.Mutation{} mu.SetJson = []byte(`{"uid": "0x01", "friend": [{"uid": "0x02"}]}`) _, err = txn2.Mutate(context.Background(), mu) assert.Nil(t, err) - txn3 := s.dg.NewTxn() + txn3 := dg.NewTxn() mu = &api.Mutation{} mu.SetJson = []byte(`{"uid": "0x01", "friend": [{"uid": "0x03"}]}`) _, err = txn3.Mutate(context.Background(), mu) @@ -524,21 +497,21 @@ func TestFriendList(t *testing.T) { func TestNameSet(t *testing.T) { op := &api.Operation{} op.DropAll = true - require.NoError(t, s.dg.Alter(context.Background(), op)) + require.NoError(t, dg.Alter(context.Background(), op)) op = &api.Operation{} op.Schema = `name: string .` - if err := s.dg.Alter(context.Background(), op); err != nil { + if err := dg.Alter(context.Background(), op); err != nil { log.Fatal(err) } - txn1 := s.dg.NewTxn() + txn1 := dg.NewTxn() mu := &api.Mutation{} mu.SetJson = []byte(`{"uid": "0x01", "name": "manish"}`) _, err := txn1.Mutate(context.Background(), mu) assert.Nil(t, err) - txn2 := s.dg.NewTxn() + txn2 := dg.NewTxn() mu = &api.Mutation{} mu.SetJson = []byte(`{"uid": "0x01", "name": "contributor"}`) _, err = txn2.Mutate(context.Background(), mu) @@ -576,13 +549,13 @@ func retrieveUids(t *testing.T, uidMap map[string]string) []string { func TestSPStar(t *testing.T) { op := &api.Operation{} op.DropAll = true - require.NoError(t, s.dg.Alter(context.Background(), op)) + require.NoError(t, dg.Alter(context.Background(), op)) op = &api.Operation{} op.Schema = `friend: [uid] .` - require.NoError(t, s.dg.Alter(context.Background(), op)) + require.NoError(t, dg.Alter(context.Background(), op)) - txn := s.dg.NewTxn() + txn := dg.NewTxn() mu := &api.Mutation{} mu.SetJson = []byte(`{"name": "Manish", "friend": [{"name": "Jan"}]}`) assigned, err := txn.Mutate(context.Background(), mu) @@ -592,7 +565,7 @@ func TestSPStar(t *testing.T) { require.Equal(t, 2, len(assigned.Uids)) require.NoError(t, txn.Commit(context.Background())) - txn = s.dg.NewTxn() + txn = dg.NewTxn() mu = &api.Mutation{} dgo.DeleteEdges(mu, uid1, "friend") assigned, err = txn.Mutate(context.Background(), mu) @@ -607,14 +580,14 @@ func TestSPStar(t *testing.T) { uid2 := retrieveUids(t, assigned.Uids)[0] q := fmt.Sprintf(`{ - me(func: uid(%s)) { - uid - friend { - uid - name - } - } - }`, uid1) + me(func: uid(%s)) { + uid + friend { + uid + name + } + } + }`, uid1) resp, err := txn.Query(context.Background(), q) require.NoError(t, err) @@ -625,14 +598,14 @@ func TestSPStar(t *testing.T) { func TestSPStar2(t *testing.T) { op := &api.Operation{} op.DropAll = true - require.NoError(t, s.dg.Alter(context.Background(), op)) + require.NoError(t, dg.Alter(context.Background(), op)) op = &api.Operation{} op.Schema = `friend: [uid] .` - require.NoError(t, s.dg.Alter(context.Background(), op)) + require.NoError(t, dg.Alter(context.Background(), op)) // Add edge - txn := s.dg.NewTxn() + txn := dg.NewTxn() mu := &api.Mutation{} mu.SetJson = []byte(`{"name": "Manish", "friend": [{"name": "Jan"}]}`) assigned, err := txn.Mutate(context.Background(), mu) @@ -644,14 +617,14 @@ func TestSPStar2(t *testing.T) { uid1 := uids[0] uid2 := uids[1] q := fmt.Sprintf(`{ - me(func: uid(%s)) { - uid - friend { - uid - name - } - } - }`, uid1) + me(func: uid(%s)) { + uid + friend { + uid + name + } + } + }`, uid1) resp, err := txn.Query(context.Background(), q) require.NoError(t, err) @@ -712,33 +685,33 @@ func TestSPStar2(t *testing.T) { var ( ctxb = context.Background() countQuery = ` -query countAnswers($num: int) { - me(func: eq(count(answer), $num)) { - uid - count(answer) - } -} -` + query countAnswers($num: int) { + me(func: eq(count(answer), $num)) { + uid + count(answer) + } + } + ` ) func TestCountIndexConcurrentTxns(t *testing.T) { - require.NoError(t, s.dg.DropAll()) - require.NoError(t, s.dg.SetupSchema("answer: [uid] @count .")) + require.NoError(t, dg.DropAll()) + require.NoError(t, dg.SetupSchema("answer: [uid] @count .")) // Expected edge count of 0x100: 1 - txn0 := s.dg.NewTxn() + txn0 := dg.NewTxn() mu := api.Mutation{SetNquads: []byte("<0x100> <0x200> .")} _, err := txn0.Mutate(ctxb, &mu) require.NoError(t, err) require.NoError(t, txn0.Commit(ctxb)) // The following two mutations are in separate interleaved txns. - txn1 := s.dg.NewTxn() + txn1 := dg.NewTxn() mu = api.Mutation{SetNquads: []byte("<0x1> <0x2> .")} _, err = txn1.Mutate(ctxb, &mu) require.NoError(t, err) - txn2 := s.dg.NewTxn() + txn2 := dg.NewTxn() mu = api.Mutation{SetNquads: []byte("<0x1> <0x3> .")} _, err = txn2.Mutate(ctxb, &mu) require.NoError(t, err) @@ -748,13 +721,13 @@ func TestCountIndexConcurrentTxns(t *testing.T) { "the txn2 should be aborted due to concurrent update on the count index of <0x01>") // retry the mutation - txn3 := s.dg.NewTxn() + txn3 := dg.NewTxn() _, err = txn3.Mutate(ctxb, &mu) require.NoError(t, err) require.NoError(t, txn3.Commit(ctxb)) // Verify count queries - txn := s.dg.NewReadOnlyTxn() + txn := dg.NewReadOnlyTxn() vars := map[string]string{"$num": "1"} resp, err := txn.QueryWithVars(ctxb, countQuery, vars) require.NoError(t, err) @@ -762,7 +735,7 @@ func TestCountIndexConcurrentTxns(t *testing.T) { require.JSONEq(t, `{"me": [{"count(answer)": 1, "uid": "0x100"}]}`, js) - txn = s.dg.NewReadOnlyTxn() + txn = dg.NewReadOnlyTxn() vars = map[string]string{"$num": "2"} resp, err = txn.QueryWithVars(ctxb, countQuery, vars) require.NoError(t, err) @@ -773,11 +746,11 @@ func TestCountIndexConcurrentTxns(t *testing.T) { } func TestCountIndexSerialTxns(t *testing.T) { - require.NoError(t, s.dg.DropAll()) - require.NoError(t, s.dg.SetupSchema("answer: [uid] @count .")) + require.NoError(t, dg.DropAll()) + require.NoError(t, dg.SetupSchema("answer: [uid] @count .")) // Expected Edge count of 0x100: 1 - txn0 := s.dg.NewTxn() + txn0 := dg.NewTxn() mu := api.Mutation{SetNquads: []byte("<0x100> <0x200> .")} _, err := txn0.Mutate(ctxb, &mu) require.NoError(t, err) @@ -786,20 +759,20 @@ func TestCountIndexSerialTxns(t *testing.T) { // Expected edge count of 0x1: 2 // This should NOT appear in the query result // The following two mutations are in serial txns. - txn1 := s.dg.NewTxn() + txn1 := dg.NewTxn() mu = api.Mutation{SetNquads: []byte("<0x1> <0x2> .")} _, err = txn1.Mutate(ctxb, &mu) require.NoError(t, err) require.NoError(t, txn1.Commit(ctxb)) - txn2 := s.dg.NewTxn() + txn2 := dg.NewTxn() mu = api.Mutation{SetNquads: []byte("<0x1> <0x3> .")} _, err = txn2.Mutate(ctxb, &mu) require.NoError(t, err) require.NoError(t, txn2.Commit(ctxb)) // Verify query - txn := s.dg.NewReadOnlyTxn() + txn := dg.NewReadOnlyTxn() vars := map[string]string{"$num": "1"} resp, err := txn.QueryWithVars(ctxb, countQuery, vars) require.NoError(t, err) @@ -807,7 +780,7 @@ func TestCountIndexSerialTxns(t *testing.T) { require.JSONEq(t, `{"me": [{"count(answer)": 1, "uid": "0x100"}]}`, js) - txn = s.dg.NewReadOnlyTxn() + txn = dg.NewReadOnlyTxn() vars = map[string]string{"$num": "2"} resp, err = txn.QueryWithVars(ctxb, countQuery, vars) require.NoError(t, err) @@ -818,11 +791,11 @@ func TestCountIndexSerialTxns(t *testing.T) { } func TestCountIndexSameTxn(t *testing.T) { - require.NoError(t, s.dg.DropAll()) - require.NoError(t, s.dg.SetupSchema("answer: [uid] @count .")) + require.NoError(t, dg.DropAll()) + require.NoError(t, dg.SetupSchema("answer: [uid] @count .")) // Expected Edge count of 0x100: 1 - txn0 := s.dg.NewTxn() + txn0 := dg.NewTxn() mu := api.Mutation{SetNquads: []byte("<0x100> <0x200> .")} _, err := txn0.Mutate(ctxb, &mu) require.NoError(t, err) @@ -831,7 +804,7 @@ func TestCountIndexSameTxn(t *testing.T) { // Expected edge count of 0x1: 2 // This should NOT appear in the query result // The following two mutations are in the same txn. - txn1 := s.dg.NewTxn() + txn1 := dg.NewTxn() mu = api.Mutation{SetNquads: []byte("<0x1> <0x2> .")} _, err = txn1.Mutate(ctxb, &mu) require.NoError(t, err) @@ -841,7 +814,7 @@ func TestCountIndexSameTxn(t *testing.T) { require.NoError(t, txn1.Commit(ctxb)) // Verify query - txn := s.dg.NewReadOnlyTxn() + txn := dg.NewReadOnlyTxn() vars := map[string]string{"$num": "1"} resp, err := txn.QueryWithVars(ctxb, countQuery, vars) require.NoError(t, err) @@ -849,7 +822,7 @@ func TestCountIndexSameTxn(t *testing.T) { require.JSONEq(t, `{"me": [{"count(answer)": 1, "uid": "0x100"}]}`, js) - txn = s.dg.NewReadOnlyTxn() + txn = dg.NewReadOnlyTxn() vars = map[string]string{"$num": "2"} resp, err = txn.QueryWithVars(ctxb, countQuery, vars) require.NoError(t, err) @@ -860,9 +833,9 @@ func TestCountIndexSameTxn(t *testing.T) { } func TestConcurrentQueryMutate(t *testing.T) { - require.NoError(t, s.dg.DropAll()) - require.NoError(t, s.dg.SetupSchema("name: string .")) - txn := s.dg.NewTxn() + require.NoError(t, dg.DropAll()) + require.NoError(t, dg.SetupSchema("name: string .")) + txn := dg.NewTxn() defer func() { require.NoError(t, txn.Discard(context.Background())) }() // Do one query, so a new timestamp is assigned to the txn. @@ -895,10 +868,10 @@ func TestConcurrentQueryMutate(t *testing.T) { } func TestTxnDiscardBeforeCommit(t *testing.T) { - require.NoError(t, s.dg.DropAll()) - require.NoError(t, s.dg.SetupSchema("name: string .")) + require.NoError(t, dg.DropAll()) + require.NoError(t, dg.SetupSchema("name: string .")) - txn := s.dg.NewTxn() + txn := dg.NewTxn() mu := &api.Mutation{ SetNquads: []byte(`_:1 "abc" .`), } @@ -909,8 +882,3 @@ func TestTxnDiscardBeforeCommit(t *testing.T) { // Since client is discarding this transaction server should not throw ErrAborted err. require.NotEqual(t, err, dgo.ErrAborted) } - -func alterSchema(t *testing.T, dg *dgo.Dgraph, schema string) { - op := api.Operation{Schema: schema} - require.NoError(t, dg.Alter(ctxb, &op)) -} diff --git a/t/t.go b/t/t.go index 935c2eee0f1..1d541265f22 100644 --- a/t/t.go +++ b/t/t.go @@ -795,7 +795,6 @@ var loadPackages = []string{ "/systest/bulk_live/bulk", "/systest/bulk_live/live", "/systest/bgindex", - "/contrib/scripts", "/dgraph/cmd/bulk/systest", } diff --git a/test.sh b/test.sh deleted file mode 100755 index d006a2ea02f..00000000000 --- a/test.sh +++ /dev/null @@ -1,298 +0,0 @@ -#!/usr/bin/env bash -# -# usage: test.sh [[pkg_regex] test_regex] - -# Notes for testing under macOS (Sierra and up) -# Required Homebrew (https://brew.sh/) packages: -# - bash -# - curl -# - coreutils -# - gnu-getopt -# - findutils -# -# Your $PATH must have all required packages in .bashrc: -# PATH="/usr/local/opt/gnu-getopt/bin:$PATH" -# PATH="/usr/local/opt/curl/bin:$PATH" -# PATH="/usr/local/opt/coreutils/libexec/gnubin:$PATH" -# PATH="/usr/local/opt/findutils/libexec/gnubin:$PATH" -# export PATH -# -# After brew packages and PATHs are set, run tests with: -# /usr/local/bin/bash test.sh -# -# Keep in mind that the test build will overwrite the "dgraph" -# binary in your $GOPATH/bin with the Linux-ELF binary for Docker. - -set -e -readonly ME=${0##*/} -readonly DGRAPH_ROOT=$(dirname "$0") - -source "${DGRAPH_ROOT}"/contrib/scripts/functions.sh - -PATH+=:${DGRAPH_ROOT}/contrib/scripts/ -GO_TEST_OPTS=() -TEST_FAILED=0 -TEST_SET="unit" -BUILD_TAGS= - -# -# Functions -# - -function Usage { - echo "usage: ${ME} [opts] [[pkg_regex] test_regex] - -options: - - -h --help output this help message - -u --unit run unit tests only - -c --cluster run unit tests and custom cluster test - -C --cluster-only run custom cluster tests only - -f --full run all tests (unit, custom cluster, and systest tests) - -F --systest-only run systest tests only - --oss run tests with 'oss' tagging - -v --verbose run tests in verbose mode - -n --no-cache re-run test even if previous result is in cache - --short run tests with -short=true - -notes: - - Specifying pkg_regex implies -c." -} - -function Info { - echo -e "\e[1;36mINFO: $*\e[0m" -} - -function FmtTime { - local secs=$(($1 % 60)) min=$(($1 / 60 % 60)) hrs=$(($1 / 60 / 60)) - - [[ ${hrs} -gt 0 ]] && printf "%dh " "${hrs}" - [[ ${hrs} -gt 0 || ${min} -gt 0 ]] && printf "%dm " "${min}" - printf "%ds" "${secs}" -} - -function IsCi { - [[ -n ${TEAMCITY_VERSION} ]] -} - -function TestFailed { - TEST_FAILED=1 - [[ -n ${CURRENT_TEST} ]] && echo "${CURRENT_TEST}" >>"${FAILED_TESTS}" -} - -function ListFailedTests { - echo -en "\e[1;31m" - sed 's/^/ /' "${FAILED_TESTS}" - echo -en "\e[0m" -} - -function FindCustomClusterTests { - # look for directories containing a docker compose and *_test.go files - touch "${CUSTOM_CLUSTER_TESTS}" - for FILE in $(find -type f -name docker-compose.yml); do - DIR=$(dirname "${FILE}") - if grep -q "${DIR}" "${MATCHING_TESTS}" && ls "${DIR}" | grep -q "_test.go$"; then - echo "${DIR:1}\$" >>"${CUSTOM_CLUSTER_TESTS}" - fi - done -} - -function FindDefaultClusterTests { - touch "${DEFAULT_CLUSTER_TESTS}" - for PKG in $(grep -v -f "${CUSTOM_CLUSTER_TESTS}" "${MATCHING_TESTS}"); do - echo "${PKG}" >>"${DEFAULT_CLUSTER_TESTS}" - done -} - -function Run { - set -o pipefail - echo -en "...\r" - if IsCi; then - go test -json -v "${GO_TEST_OPTS[*]}" $@ - return - fi - go test "${GO_TEST_OPTS[*]}" $@ | - GREP_COLORS='ne:mt=01;32' egrep --line-buffered --color=always '^ok\ .*|$' | - GREP_COLORS='ne:mt=00;38;5;226' egrep --line-buffered --color=always '^\?\ .*|$' | - GREP_COLORS='ne:mt=01;31' egrep --line-buffered --color=always '.*FAIL.*|$' -} - -function RunCmd { - CURRENT_TEST=$1 - IsCi && echo "##teamcity[testStarted name='$1' captureStandardOutput='true']" - if eval "$@"; then - echo -e "\e[1;32mok $1\e[0m" - IsCi && echo "##teamcity[testFinished name='$1']" - return 0 - else - echo -e "\e[1;31mfail $1\e[0m" - IsCi && echo "##teamcity[testFailed name='$1']" - return 1 - fi -} - -function RunDefaultClusterTests { - while read -r PKG; do - Info "Running test for ${PKG}" - CURRENT_TEST=${PKG} - Run "${PKG}" || TestFailed - done <"${DEFAULT_CLUSTER_TESTS}" - CURRENT_TEST= - return "${TEST_FAILED}" -} - -function RunCustomClusterTests { - while read -r LINE; do - DIR="${LINE:1:-1}" - CFG="${DIR}/docker-compose.yml" - Info "Running tests in directory ${DIR}" - restartCluster "${DIR}"/docker-compose.yml - pushd "${DIR}" >/dev/null - CURRENT_TEST=${DIR} - Run || TestFailed - popd >/dev/null - done <"${CUSTOM_CLUSTER_TESTS}" - CURRENT_TEST= - return "${TEST_FAILED}" -} - -# -# MAIN -# - -echo "test.sh is DEPRECATED. Please use the Go script in t directory instead." - -ARGS=$(getopt -n"${ME}" -o"hucCfFvn" \ - -l"help,unit,cluster,cluster-only,full,systest-only,oss,verbose,no-cache,short,timeout:" -- "$@") || - exit 1 -eval set -- "${ARGS}" -while true; do - case "$1" in - -h | --help) - Usage - exit 0 - ;; - -u | --unit) TEST_SET="unit" ;; - -c | --cluster) TEST_SET="unit:cluster" ;; - -C | --cluster-only) TEST_SET="cluster" ;; - -f | --full) TEST_SET="unit:cluster:systest" ;; - -F | --systest-only) TEST_SET="systest" ;; - -v | --verbose) GO_TEST_OPTS+=("-v") ;; - -n | --no-cache) GO_TEST_OPTS+=("-count=1") ;; - --oss) GO_TEST_OPTS+=("-tags=oss") ;; - --short) GO_TEST_OPTS+=("-short=true") ;; - -t | --timeout) GO_TEST_OPTS+=("-timeout=$2") ;; - --) - shift - break - ;; - esac - shift -done - -cd "${DGRAPH_ROOT}" - -# tests should put temp files under this directory for easier cleanup -export TMPDIR=$(mktemp --tmpdir --directory "${ME}".tmp-XXXXXX) -trap "rm -rf ${TMPDIR}" EXIT - -# docker-compose files may use this to run as user instead of as root -export UID - -MATCHING_TESTS=${TMPDIR}/tests -CUSTOM_CLUSTER_TESTS=${TMPDIR}/custom -DEFAULT_CLUSTER_TESTS=${TMPDIR}/default -FAILED_TESTS=${TMPDIR}/failures - -if [[ $# -eq 0 ]]; then - go list ./... >"${MATCHING_TESTS}" - if [[ ${TEST_SET} == unit ]]; then - Info "Running only unit tests" - fi -elif [[ $# -eq 1 || $# -eq 2 ]]; then - # Remove the trailing slash from pkg_regex. - # This is helpful when autocomplete returns something like `dirname/`. - REGEX=${1%/} - go list ./... | grep "${REGEX}" >"${MATCHING_TESTS}" - Info "Running only tests matching '${REGEX}'" - RUN_ALL= - - if [[ $# -eq 2 ]]; then - GO_TEST_OPTS+=("-v" "-run=$2") - fi -else - echo >&2 "usage: ${ME} [pkg_regex [test_regex]]" - exit 1 -fi - -# assemble list of tests before executing any -FindCustomClusterTests -FindDefaultClusterTests - -# abort all tests on Ctrl-C, not just the current one -trap "echo >&2 SIGINT ; exit 2" SIGINT - -START_TIME=$(date +%s) - -if [[ :${TEST_SET}: == *:unit:* ]]; then - if [[ -s ${DEFAULT_CLUSTER_TESTS} ]]; then - Info "Running tests using the default cluster" - restartCluster - RunDefaultClusterTests || TestFailed - else - Info "Skipping default cluster tests because none match" - fi -fi - -if [[ :${TEST_SET}: == *:cluster:* ]]; then - if [[ -s ${CUSTOM_CLUSTER_TESTS} ]]; then - Info "Running tests using custom clusters" - RunCustomClusterTests || TestFailed - else - Info "Skipping custom cluster tests because none match" - fi -fi - -if [[ :${TEST_SET}: == *:systest:* ]]; then - # TODO: Fix this test. The fix consists of updating the test script to - # download a p directory that's compatible with the badger WAL changes. - # This test is not that useful so it's ok to temporarily disable it. - # Info "Running posting size calculation" - # cd posting - # RunCmd ./size_test.sh || TestFailed - # cd .. - - Info "Running small load test" - RunCmd ./contrib/scripts/load-test.sh || TestFailed - - Info "Running custom test scripts" - RunCmd ./dgraph/cmd/bulk/systest/test-bulk-schema.sh || TestFailed - - Info "Running large bulk load test" - RunCmd ./systest/21million/test-21million.sh || TestFailed - - # Info "Running large live load test" - # RunCmd ./systest/21million/test-21million.sh --loader live || TestFailed - - Info "Running rebuilding index test" - RunCmd ./systest/1million/test-reindex.sh || TestFailed - - Info "Running background index test" - RunCmd ./systest/bgindex/test-bgindex.sh || TestFailed -fi - -Info "Stopping cluster" -stopCluster - -END_TIME=$(date +%s) -Info "Tests completed in" $(FmtTime $((END_TIME - START_TIME))) - -if [[ ${TEST_FAILED} -eq 0 ]]; then - Info "\e[1;32mAll tests passed!" -else - Info "\e[1;31m*** One or more tests failed! ***" - ListFailedTests -fi - -exit "${TEST_FAILED}" From 25a59bc87b2689185264a3779a60c4aba5a497b9 Mon Sep 17 00:00:00 2001 From: shivaji-dgraph Date: Tue, 25 Feb 2025 10:23:04 +0530 Subject: [PATCH 2/2] Increase timeout for system upgrade tests workflow --- .github/workflows/ci-dgraph-system-upgrade-tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci-dgraph-system-upgrade-tests.yml b/.github/workflows/ci-dgraph-system-upgrade-tests.yml index 2c53d18ca64..8ed7ea89cd1 100644 --- a/.github/workflows/ci-dgraph-system-upgrade-tests.yml +++ b/.github/workflows/ci-dgraph-system-upgrade-tests.yml @@ -24,7 +24,7 @@ jobs: dgraph-upgrade-tests: if: github.event.pull_request.draft == false runs-on: warp-ubuntu-latest-x64-4x - timeout-minutes: 60 + timeout-minutes: 90 steps: - uses: actions/checkout@v4 with: