From 08bd80c6a2205f182c26f60c4a86a345677b9e17 Mon Sep 17 00:00:00 2001 From: Phil Renaud Date: Fri, 21 Feb 2025 21:50:18 +0000 Subject: [PATCH] backport of commit 11cf98abe341045a2f97c6169fdc2d05d59cdbd7 --- .changelog/18530.txt | 3 - .changelog/24415.txt | 3 - .changelog/24601.txt | 3 - .changelog/24724.txt | 3 - .changelog/24785.txt | 11 - .changelog/24909.txt | 3 - .changelog/24997.txt | 3 - .changelog/25109.txt | 3 - .changelog/25173.txt | 3 - .github/pull_request_template.md | 35 - .github/workflows/checks.yaml | 2 +- .github/workflows/test-ui.yml | 2 +- .gitignore | 7 - CHANGELOG.md | 161 +-- CODEOWNERS | 17 - GNUmakefile | 2 +- acl/acl_test.go | 4 - acl/policy.go | 38 +- acl/policy_test.go | 86 +- api/contexts/contexts.go | 1 - api/host_volumes.go | 246 ---- api/nodes.go | 2 - api/quota.go | 28 +- api/sentinel.go | 6 - api/tasks.go | 12 +- api/util_test.go | 2 +- ci/test-core.json | 1 - client/acl.go | 2 +- client/agent_endpoint.go | 2 +- client/alloc_endpoint.go | 2 +- client/allocrunner/alloc_runner.go | 2 +- client/allocrunner/alloc_runner_test.go | 2 +- client/allocrunner/hookstats/hookstats.go | 2 +- .../allocrunner/hookstats/hookstats_test.go | 2 +- .../allocrunner/networking_iptables_test.go | 2 - .../allocrunner/taskrunner/remotetask_hook.go | 128 ++ .../allocrunner/taskrunner/sids_hook_test.go | 4 +- client/allocrunner/taskrunner/task_runner.go | 8 +- .../taskrunner/task_runner_hooks.go | 6 + .../taskrunner/task_runner_test.go | 4 +- client/allocrunner/taskrunner/tasklet.go | 2 +- client/client.go | 36 +- client/client_stats_endpoint.go | 2 +- client/client_test.go | 2 +- client/config/arconfig.go | 2 +- client/config/config.go | 7 - client/csi_endpoint.go | 2 +- client/csi_endpoint_test.go | 12 +- client/fingerprint/dynamic_host_volumes.go | 120 -- .../fingerprint/dynamic_host_volumes_test.go | 89 -- client/fingerprint/fingerprint.go | 27 +- client/fingerprint/zstorage_windows.go | 6 +- client/fs_endpoint.go | 2 +- client/host_volume_endpoint.go | 83 -- client/host_volume_endpoint_test.go | 200 --- .../hostvolumemanager/host_volume_plugin.go | 362 ------ .../host_volume_plugin_test.go | 209 --- client/hostvolumemanager/host_volumes.go | 372 ------ client/hostvolumemanager/host_volumes_test.go | 441 ------- .../test_fixtures/test_plugin.sh | 50 - .../test_fixtures/test_plugin_sad.sh | 7 - .../hostvolumemanager/volume_fingerprint.go | 73 -- .../volume_fingerprint_test.go | 144 --- client/lib/numalib/detect_linux.go | 51 +- client/lib/numalib/detect_linux_test.go | 69 - client/lib/numalib/hw/speeds.go | 4 - client/meta_endpoint.go | 2 +- client/node_updater.go | 71 +- client/pluginmanager/csimanager/interface.go | 4 +- client/rpc.go | 5 +- client/state/db_bolt.go | 41 - client/state/db_error.go | 15 - client/state/db_mem.go | 43 +- client/state/db_noop.go | 12 - client/state/db_test.go | 36 - client/state/interface.go | 4 - client/structs/csi.go | 12 +- client/structs/host_volumes.go | 115 -- client/vaultclient/vaultclient.go | 2 +- command/agent/agent.go | 10 +- command/agent/bindata_assetfs.go | 48 +- command/agent/command.go | 31 +- command/agent/config.go | 13 - command/agent/consul/service_client.go | 2 +- command/agent/csi_endpoint.go | 12 +- command/agent/host_volume_endpoint.go | 143 --- command/agent/host_volume_endpoint_test.go | 103 -- command/agent/http.go | 4 +- command/agent/job_endpoint.go | 5 +- command/agent/metrics_endpoint_test.go | 2 +- command/agent/testagent.go | 2 +- command/agent/volumes_endpoint.go | 27 - command/asset/asset.go | 12 - command/asset/volume.csi.hcl | 70 -- command/asset/volume.csi.json | 72 -- command/asset/volume.host.hcl | 29 - command/asset/volume.host.json | 24 - command/node_status.go | 7 +- command/plugin_status.go | 13 +- command/quota_apply.go | 67 +- command/quota_apply_test.go | 53 - command/quota_delete_test.go | 2 +- command/quota_init.go | 14 +- command/quota_init_test.go | 6 +- command/sentinel_apply.go | 17 +- command/setup_consul.go | 3 + command/setup_vault.go | 3 + command/volume_create.go | 48 +- command/volume_create_host.go | 401 ------ command/volume_create_host_test.go | 232 ---- command/volume_delete.go | 85 +- command/volume_delete_host_test.go | 99 -- command/volume_deregister.go | 1 + command/volume_init.go | 181 ++- command/volume_register.go | 37 +- command/volume_register_host.go | 66 - command/volume_register_host_test.go | 102 -- ...er_csi_test.go => volume_register_test.go} | 0 command/volume_status.go | 50 +- command/volume_status_csi.go | 24 +- command/volume_status_host.go | 212 ---- command/volume_status_host_test.go | 172 --- ...atus_csi_test.go => volume_status_test.go} | 0 demo/hostvolume/_test-plugin.sh | 84 -- demo/hostvolume/check.sh | 14 - demo/hostvolume/e2e.sh | 9 - demo/hostvolume/example-plugin-mkfs | 131 -- demo/hostvolume/external-plugin.volume.hcl | 22 - demo/hostvolume/internal-plugin.volume.hcl | 14 - demo/hostvolume/job.nomad.hcl | 48 - demo/hostvolume/no-plugin.volume.hcl | 15 - demo/hostvolume/setup.sh | 14 - demo/hostvolume/teardown.sh | 17 - dev/hooks/pre-push | 11 +- drivers/docker/config_test.go | 3 + drivers/shared/executor/executor_test.go | 2 +- e2e/.gitignore | 1 + e2e/README.md | 11 - e2e/allocexec/docker_exec_test.go | 8 +- e2e/cni/cni_test.go | 6 +- e2e/cni/input/cni_args.nomad.hcl | 7 +- e2e/dynamic_host_volumes/doc.go | 7 - .../dynamic_host_volumes_test.go | 318 ----- .../input/mount-created.nomad.hcl | 49 - .../input/mount-registered.nomad.hcl | 49 - .../input/register-volumes.nomad.hcl | 77 -- .../input/register-volumes.policy.hcl | 25 - .../input/sticky.nomad.hcl | 62 - .../input/volume-create.nomad.hcl | 16 - .../input/volume-sticky.nomad.hcl | 16 - e2e/e2e_test.go | 4 +- e2e/jobsubmissions/jobsubapi_test.go | 19 +- e2e/metrics/input/prometheus.hcl | 1 - e2e/remotetasks/input/ecs.nomad | 46 + e2e/remotetasks/remotetasks.go | 446 +++++++ e2e/terraform/.gitignore | 3 +- e2e/terraform/.terraform.lock.hcl | 177 +++ e2e/terraform/Makefile | 12 +- e2e/terraform/README.md | 27 +- .../{provision-infra => }/compute.tf | 66 +- .../{provision-infra => }/consul-clients.tf | 16 +- .../{provision-infra => }/consul-servers.tf | 34 +- e2e/terraform/ecs-task.json | 21 + e2e/terraform/ecs.tf | 28 + e2e/terraform/ecs.tftpl | 2 + .../etc/acls/consul/consul-agent-policy.hcl | 0 .../etc/acls/consul/nomad-client-policy.hcl | 0 .../etc/acls/consul/nomad-server-policy.hcl | 0 e2e/terraform/etc/acls/vault/nomad-policy.hcl | 44 + .../etc/consul.d/.environment | 0 .../etc/consul.d/clients.hcl | 0 .../etc/consul.d/consul-server.service | 0 .../etc/consul.d/consul.service | 1 - .../etc/consul.d/servers.hcl | 0 .../etc/nomad.d/.environment | 0 .../provision-nomad => }/etc/nomad.d/base.hcl | 0 .../etc/nomad.d/client-consul.hcl | 0 .../etc/nomad.d/client-linux-0.hcl | 0 .../etc/nomad.d/client-linux-1.hcl | 0 .../etc/nomad.d/client-linux-2.hcl | 0 .../etc/nomad.d/client-linux-3.hcl | 0 .../etc/nomad.d/client-linux.hcl | 0 .../etc/nomad.d/client-windows.hcl | 0 .../etc/nomad.d/index.hcl | 0 .../etc/nomad.d/nomad-client.service | 6 - .../etc/nomad.d/nomad-server.service | 0 .../etc/nomad.d/server-consul.hcl | 0 .../etc/nomad.d/server-linux.hcl | 0 .../provision-nomad => }/etc/nomad.d/tls.hcl | 2 +- e2e/terraform/etc/nomad.d/vault.hcl | 11 + e2e/terraform/hcp_vault.tf | 53 + e2e/terraform/{provision-infra => }/iam.tf | 0 e2e/terraform/main.tf | 39 +- .../{provision-infra => }/network.tf | 13 +- .../{provision-infra => }/nomad-acls.tf | 17 +- e2e/terraform/nomad.tf | 86 ++ e2e/terraform/outputs.tf | 58 +- .../packer/ubuntu-jammy-amd64/dnsconfig.sh | 6 +- .../packer/ubuntu-jammy-amd64/setup.sh | 16 + e2e/terraform/provision-infra/.gitignore | 7 - e2e/terraform/provision-infra/hcp_vault.tf | 91 -- e2e/terraform/provision-infra/main.tf | 33 - e2e/terraform/provision-infra/nomad.tf | 102 -- e2e/terraform/provision-infra/outputs.tf | 95 -- .../provision-nomad/etc/nomad.d/vault.hcl | 14 - ...ult-acl-jwt-policy-nomad-workloads.hcl.tpl | 15 - e2e/terraform/provision-infra/variables.tf | 127 -- e2e/terraform/provision-infra/versions.tf | 14 - .../provision-nomad/install-linux.tf | 5 +- .../provision-nomad/install-windows.tf | 3 +- .../provision-nomad/main.tf | 39 +- .../provision-nomad/tls.tf | 4 +- .../provision-nomad/variables.tf | 15 +- .../scripts/anonymous.nomad_policy.hcl | 0 .../scripts/bootstrap-consul.sh | 5 - .../scripts/bootstrap-nomad.sh | 11 +- .../scripts/consul-agents-policy.hcl | 0 .../scripts/nomad-cluster-consul-policy.hcl | 0 e2e/terraform/terraform.tfvars | 4 +- e2e/terraform/{provision-infra => }/tls_ca.tf | 4 +- .../{provision-infra => }/tls_client.tf | 10 +- .../provision-nomad => }/uploads/README.md | 0 .../{provision-infra => }/userdata/README.md | 0 .../userdata/windows-2016.ps1 | 0 e2e/terraform/variables.tf | 48 +- .../terraform/versions.tf | 5 +- .../{provision-infra => }/volumes.tf | 4 +- .../{provision-infra => }/volumes.tftpl | 0 e2e/ui/package-lock.json | 52 +- e2e/ui/package.json | 2 +- e2e/ui/run.sh | 2 +- e2e/v3/volumes3/host3.go | 225 ---- e2e/vaultcompat/cluster_setup_test.go | 10 + e2e/vaultcompat/input/cat.hcl | 25 + e2e/vaultcompat/input/policy_legacy.hcl | 30 + e2e/vaultcompat/run_ce_test.go | 10 + e2e/vaultcompat/vaultcompat_test.go | 47 + e2e/vaultsecrets/input/acl-role.json | 19 - e2e/vaultsecrets/input/default_wi.nomad.hcl | 40 - ...non-default_wi.nomad.hcl => secrets.nomad} | 3 +- e2e/vaultsecrets/vaultsecrets_test.go | 137 +- e2e/volume_mounts/volumes_test.go | 73 -- e2e/{volume_mounts => volumes}/doc.go | 2 +- .../input/volumes.nomad | 5 +- e2e/volumes/volumes_test.go | 109 ++ enos/.gitignore | 2 - enos/README.md | 111 -- enos/enos-modules.hcl | 26 - enos/enos-quality.hcl | 47 - enos/enos-scenario-upgrade.hcl | 380 ------ enos/enos-terraform.hcl | 17 - enos/enos-vars.hcl | 56 - enos/enos.vars.example.hcl | 11 - enos/modules/fetch_artifactory/locals.tf | 23 - enos/modules/fetch_artifactory/main.tf | 36 - enos/modules/fetch_artifactory/outputs.tf | 17 - .../fetch_artifactory/scripts/install.sh | 16 - enos/modules/fetch_artifactory/variables.tf | 65 - .../jobs/docker-service.nomad.hcl | 28 - .../jobs/raw-exec-service.nomad.hcl | 40 - enos/modules/run_workloads/main.tf | 32 - enos/modules/run_workloads/outputs.tf | 16 - .../scripts/wait_for_nomad_api.sh | 25 - enos/modules/run_workloads/variables.tf | 43 - enos/modules/test_cluster_health/main.tf | 59 - .../test_cluster_health/scripts/allocs.sh | 96 -- .../test_cluster_health/scripts/clients.sh | 57 - .../test_cluster_health/scripts/jobs.sh | 24 - .../test_cluster_health/scripts/servers.sh | 92 -- .../test_cluster_health/scripts/versions.sh | 52 - .../scripts/wait_for_nomad_api.sh | 25 - enos/modules/test_cluster_health/variables.tf | 63 - enos/modules/upgrade_clients/main.tf | 211 ---- .../upgrade_clients/scripts/set_metadata.sh | 18 - .../scripts/verify_metadata.sh | 68 - .../scripts/wait_for_nomad_api.sh | 25 - enos/modules/upgrade_clients/variables.tf | 73 -- enos/modules/upgrade_instance/.gitignore | 2 - enos/modules/upgrade_instance/main.tf | 57 - enos/modules/upgrade_instance/variables.tf | 50 - enos/modules/upgrade_servers/main.tf | 157 --- .../scripts/wait_for_stable_cluster.sh | 83 -- enos/modules/upgrade_servers/variables.tf | 73 -- go.mod | 17 +- go.sum | 33 +- helper/funcs.go | 48 - helper/funcs_test.go | 45 - helper/pluginutils/loader/init.go | 9 +- helper/pluginutils/loader/loader_test.go | 7 +- helper/raftutil/msgtypes.go | 3 - helper/users/lookup_windows_test.go | 1 - lib/auth/jwt/validator.go | 2 +- nomad/acl_endpoint.go | 2 +- nomad/alloc_endpoint.go | 2 +- nomad/auth/auth.go | 2 +- nomad/autopilot.go | 2 +- nomad/blocked_evals.go | 2 +- nomad/client_alloc_endpoint.go | 2 +- nomad/client_csi_endpoint.go | 2 +- nomad/client_fs_endpoint.go | 2 +- nomad/client_host_volume_endpoint.go | 96 -- nomad/client_meta_endpoint.go | 2 +- nomad/client_stats_endpoint.go | 2 +- nomad/consul.go | 2 +- nomad/csi_endpoint.go | 2 +- nomad/deployment_endpoint.go | 2 +- nomad/eval_broker.go | 2 +- nomad/eval_endpoint.go | 2 +- nomad/event_endpoint.go | 12 - nomad/event_endpoint_test.go | 60 - nomad/fsm.go | 70 +- nomad/heartbeat.go | 2 +- nomad/host_volume_endpoint.go | 755 ----------- nomad/host_volume_endpoint_ce.go | 29 - nomad/host_volume_endpoint_test.go | 1118 ----------------- nomad/job_endpoint.go | 2 +- nomad/job_endpoint_statuses.go | 2 +- nomad/keyring_endpoint.go | 2 +- nomad/leader.go | 2 +- nomad/lock/delay.go | 2 +- nomad/lock/ttl.go | 2 +- nomad/mock/host_volumes.go | 64 - nomad/namespace_endpoint.go | 2 +- nomad/node_endpoint.go | 2 +- nomad/node_endpoint_test.go | 38 +- nomad/node_pool_endpoint.go | 2 +- nomad/periodic_endpoint.go | 2 +- nomad/periodic_test.go | 37 +- nomad/plan_apply.go | 2 +- nomad/plan_apply_node_tracker.go | 2 +- nomad/plan_endpoint.go | 2 +- nomad/plan_queue.go | 2 +- nomad/rpc.go | 2 +- nomad/rpc_rate_metrics.go | 2 +- nomad/scaling_endpoint.go | 2 +- nomad/search_endpoint.go | 21 +- nomad/search_endpoint_test.go | 93 -- nomad/server.go | 4 +- nomad/service_registration_endpoint.go | 2 +- nomad/state/deployment_events_test.go | 2 +- nomad/state/events.go | 108 -- nomad/state/events_test.go | 120 -- nomad/state/schema.go | 138 +- nomad/state/state_store.go | 139 +- nomad/state/state_store_host_volumes.go | 292 ----- nomad/state/state_store_host_volumes_ce.go | 20 - nomad/state/state_store_host_volumes_test.go | 355 ------ nomad/state/state_store_restore.go | 12 +- .../state_store_task_group_volume_claims.go | 138 -- ...ate_store_task_group_volume_claims_test.go | 87 -- nomad/state/state_store_test.go | 102 -- nomad/stream/event_broker.go | 2 +- nomad/structs/acl.go | 10 - nomad/structs/cni_config_test.go | 3 +- nomad/structs/config/workload_id.go | 2 +- nomad/structs/csi.go | 46 +- nomad/structs/diff_test.go | 24 +- nomad/structs/event.go | 29 +- nomad/structs/funcs.go | 23 +- nomad/structs/funcs_test.go | 48 - nomad/structs/host_volumes.go | 439 ------- nomad/structs/host_volumes_test.go | 344 ----- nomad/structs/search.go | 1 - nomad/structs/structs.go | 79 +- .../{volumes_test.go => volume_test.go} | 18 +- nomad/structs/volumes.go | 55 +- nomad/variables_endpoint.go | 2 +- nomad/vault.go | 2 +- nomad/volumewatcher/volumes_watcher.go | 6 +- nomad/worker.go | 2 +- plugins/csi/plugin.go | 2 +- plugins/drivers/client.go | 1 + plugins/drivers/driver.go | 8 + plugins/drivers/proto/driver.pb.go | 506 ++++---- plugins/drivers/proto/driver.proto | 6 +- plugins/drivers/server.go | 1 + plugins/drivers/task_handle.go | 32 + scheduler/annotate.go | 4 +- scheduler/annotate_test.go | 42 - scheduler/context.go | 7 - scheduler/feasible.go | 222 +--- scheduler/feasible_test.go | 471 +------ scheduler/generic_sched.go | 45 +- scheduler/generic_sched_test.go | 321 +++-- scheduler/reconcile.go | 2 +- scheduler/scheduler.go | 11 - scheduler/stack.go | 21 +- scheduler/util.go | 71 ++ scheduler/util_test.go | 134 ++ ui/yarn.lock | 6 +- website/content/api-docs/events.mdx | 92 +- website/content/api-docs/volumes.mdx | 523 ++------ .../content/docs/commands/setup/consul.mdx | 7 + website/content/docs/commands/setup/vault.mdx | 7 + .../content/docs/commands/volume/create.mdx | 51 +- .../content/docs/commands/volume/delete.mdx | 41 +- .../docs/commands/volume/deregister.mdx | 5 +- website/content/docs/commands/volume/init.mdx | 5 +- .../content/docs/commands/volume/register.mdx | 90 +- .../concepts/plugins/{storage => }/csi.mdx | 43 +- .../concepts/plugins/storage/host-volumes.mdx | 400 ------ .../docs/concepts/plugins/storage/index.mdx | 32 - .../docs/concepts/plugins/task-drivers.mdx | 6 + website/content/docs/concepts/security.mdx | 2 +- website/content/docs/configuration/client.mdx | 24 +- website/content/docs/enterprise/sentinel.mdx | 72 +- .../docs/job-specification/affinity.mdx | 4 - .../docs/job-specification/constraint.mdx | 4 - .../content/docs/job-specification/spread.mdx | 4 - website/content/docs/job-specification/ui.mdx | 2 - .../content/docs/job-specification/volume.mdx | 80 +- .../docs/operations/metrics-reference.mdx | 472 ++++--- .../docs/operations/stateful-workloads.mdx | 57 +- .../docs/other-specifications/acl-policy.mdx | 13 +- .../docs/other-specifications/namespace.mdx | 31 +- .../docs/other-specifications/quota.mdx | 28 +- .../volume/capability.mdx | 52 +- .../docs/other-specifications/volume/csi.mdx | 291 ----- .../docs/other-specifications/volume/host.mdx | 307 ----- .../other-specifications/volume/index.mdx | 282 ++++- .../volume/mount_options.mdx | 8 +- .../volume/topology_request.mdx | 9 +- .../content/docs/upgrade/upgrade-specific.mdx | 40 - website/data/docs-nav-data.json | 23 +- website/package-lock.json | 1 - website/public/img/nomad-ui-block.png | Bin 18682 -> 0 bytes website/redirects.js | 6 - 427 files changed, 3936 insertions(+), 18474 deletions(-) delete mode 100644 .changelog/18530.txt delete mode 100644 .changelog/24415.txt delete mode 100644 .changelog/24601.txt delete mode 100644 .changelog/24724.txt delete mode 100644 .changelog/24785.txt delete mode 100644 .changelog/24909.txt delete mode 100644 .changelog/24997.txt delete mode 100644 .changelog/25109.txt delete mode 100644 .changelog/25173.txt delete mode 100644 .github/pull_request_template.md delete mode 100644 api/host_volumes.go create mode 100644 client/allocrunner/taskrunner/remotetask_hook.go delete mode 100644 client/fingerprint/dynamic_host_volumes.go delete mode 100644 client/fingerprint/dynamic_host_volumes_test.go delete mode 100644 client/host_volume_endpoint.go delete mode 100644 client/host_volume_endpoint_test.go delete mode 100644 client/hostvolumemanager/host_volume_plugin.go delete mode 100644 client/hostvolumemanager/host_volume_plugin_test.go delete mode 100644 client/hostvolumemanager/host_volumes.go delete mode 100644 client/hostvolumemanager/host_volumes_test.go delete mode 100755 client/hostvolumemanager/test_fixtures/test_plugin.sh delete mode 100755 client/hostvolumemanager/test_fixtures/test_plugin_sad.sh delete mode 100644 client/hostvolumemanager/volume_fingerprint.go delete mode 100644 client/hostvolumemanager/volume_fingerprint_test.go delete mode 100644 client/structs/host_volumes.go delete mode 100644 command/agent/host_volume_endpoint.go delete mode 100644 command/agent/host_volume_endpoint_test.go delete mode 100644 command/agent/volumes_endpoint.go delete mode 100644 command/asset/volume.csi.hcl delete mode 100644 command/asset/volume.csi.json delete mode 100644 command/asset/volume.host.hcl delete mode 100644 command/asset/volume.host.json delete mode 100644 command/volume_create_host.go delete mode 100644 command/volume_create_host_test.go delete mode 100644 command/volume_delete_host_test.go delete mode 100644 command/volume_register_host.go delete mode 100644 command/volume_register_host_test.go rename command/{volume_register_csi_test.go => volume_register_test.go} (100%) delete mode 100644 command/volume_status_host.go delete mode 100644 command/volume_status_host_test.go rename command/{volume_status_csi_test.go => volume_status_test.go} (100%) delete mode 100755 demo/hostvolume/_test-plugin.sh delete mode 100755 demo/hostvolume/check.sh delete mode 100755 demo/hostvolume/e2e.sh delete mode 100755 demo/hostvolume/example-plugin-mkfs delete mode 100644 demo/hostvolume/external-plugin.volume.hcl delete mode 100644 demo/hostvolume/internal-plugin.volume.hcl delete mode 100644 demo/hostvolume/job.nomad.hcl delete mode 100644 demo/hostvolume/no-plugin.volume.hcl delete mode 100755 demo/hostvolume/setup.sh delete mode 100755 demo/hostvolume/teardown.sh create mode 100644 e2e/.gitignore delete mode 100644 e2e/dynamic_host_volumes/doc.go delete mode 100644 e2e/dynamic_host_volumes/dynamic_host_volumes_test.go delete mode 100644 e2e/dynamic_host_volumes/input/mount-created.nomad.hcl delete mode 100644 e2e/dynamic_host_volumes/input/mount-registered.nomad.hcl delete mode 100644 e2e/dynamic_host_volumes/input/register-volumes.nomad.hcl delete mode 100644 e2e/dynamic_host_volumes/input/register-volumes.policy.hcl delete mode 100644 e2e/dynamic_host_volumes/input/sticky.nomad.hcl delete mode 100644 e2e/dynamic_host_volumes/input/volume-create.nomad.hcl delete mode 100644 e2e/dynamic_host_volumes/input/volume-sticky.nomad.hcl create mode 100644 e2e/remotetasks/input/ecs.nomad create mode 100644 e2e/remotetasks/remotetasks.go create mode 100644 e2e/terraform/.terraform.lock.hcl rename e2e/terraform/{provision-infra => }/compute.tf (60%) rename e2e/terraform/{provision-infra => }/consul-clients.tf (76%) rename e2e/terraform/{provision-infra => }/consul-servers.tf (80%) create mode 100644 e2e/terraform/ecs-task.json create mode 100644 e2e/terraform/ecs.tf create mode 100644 e2e/terraform/ecs.tftpl rename e2e/terraform/{provision-infra/provision-nomad => }/etc/acls/consul/consul-agent-policy.hcl (100%) rename e2e/terraform/{provision-infra/provision-nomad => }/etc/acls/consul/nomad-client-policy.hcl (100%) rename e2e/terraform/{provision-infra/provision-nomad => }/etc/acls/consul/nomad-server-policy.hcl (100%) create mode 100644 e2e/terraform/etc/acls/vault/nomad-policy.hcl rename e2e/terraform/{provision-infra/provision-nomad => }/etc/consul.d/.environment (100%) rename e2e/terraform/{provision-infra/provision-nomad => }/etc/consul.d/clients.hcl (100%) rename e2e/terraform/{provision-infra/provision-nomad => }/etc/consul.d/consul-server.service (100%) rename e2e/terraform/{provision-infra/provision-nomad => }/etc/consul.d/consul.service (96%) rename e2e/terraform/{provision-infra/provision-nomad => }/etc/consul.d/servers.hcl (100%) rename e2e/terraform/{provision-infra/provision-nomad => }/etc/nomad.d/.environment (100%) rename e2e/terraform/{provision-infra/provision-nomad => }/etc/nomad.d/base.hcl (100%) rename e2e/terraform/{provision-infra/provision-nomad => }/etc/nomad.d/client-consul.hcl (100%) rename e2e/terraform/{provision-infra/provision-nomad => }/etc/nomad.d/client-linux-0.hcl (100%) rename e2e/terraform/{provision-infra/provision-nomad => }/etc/nomad.d/client-linux-1.hcl (100%) rename e2e/terraform/{provision-infra/provision-nomad => }/etc/nomad.d/client-linux-2.hcl (100%) rename e2e/terraform/{provision-infra/provision-nomad => }/etc/nomad.d/client-linux-3.hcl (100%) rename e2e/terraform/{provision-infra/provision-nomad => }/etc/nomad.d/client-linux.hcl (100%) rename e2e/terraform/{provision-infra/provision-nomad => }/etc/nomad.d/client-windows.hcl (100%) rename e2e/terraform/{provision-infra/provision-nomad => }/etc/nomad.d/index.hcl (100%) rename e2e/terraform/{provision-infra/provision-nomad => }/etc/nomad.d/nomad-client.service (65%) rename e2e/terraform/{provision-infra/provision-nomad => }/etc/nomad.d/nomad-server.service (100%) rename e2e/terraform/{provision-infra/provision-nomad => }/etc/nomad.d/server-consul.hcl (100%) rename e2e/terraform/{provision-infra/provision-nomad => }/etc/nomad.d/server-linux.hcl (100%) rename e2e/terraform/{provision-infra/provision-nomad => }/etc/nomad.d/tls.hcl (88%) create mode 100644 e2e/terraform/etc/nomad.d/vault.hcl create mode 100644 e2e/terraform/hcp_vault.tf rename e2e/terraform/{provision-infra => }/iam.tf (100%) rename e2e/terraform/{provision-infra => }/network.tf (93%) rename e2e/terraform/{provision-infra => }/nomad-acls.tf (77%) create mode 100644 e2e/terraform/nomad.tf delete mode 100644 e2e/terraform/provision-infra/.gitignore delete mode 100644 e2e/terraform/provision-infra/hcp_vault.tf delete mode 100644 e2e/terraform/provision-infra/main.tf delete mode 100644 e2e/terraform/provision-infra/nomad.tf delete mode 100644 e2e/terraform/provision-infra/outputs.tf delete mode 100644 e2e/terraform/provision-infra/provision-nomad/etc/nomad.d/vault.hcl delete mode 100644 e2e/terraform/provision-infra/templates/vault-acl-jwt-policy-nomad-workloads.hcl.tpl delete mode 100644 e2e/terraform/provision-infra/variables.tf delete mode 100644 e2e/terraform/provision-infra/versions.tf rename e2e/terraform/{provision-infra => }/provision-nomad/install-linux.tf (95%) rename e2e/terraform/{provision-infra => }/provision-nomad/install-windows.tf (97%) rename e2e/terraform/{provision-infra => }/provision-nomad/main.tf (76%) rename e2e/terraform/{provision-infra => }/provision-nomad/tls.tf (88%) rename e2e/terraform/{provision-infra => }/provision-nomad/variables.tf (84%) rename e2e/terraform/{provision-infra => }/scripts/anonymous.nomad_policy.hcl (100%) rename e2e/terraform/{provision-infra => }/scripts/bootstrap-consul.sh (88%) rename e2e/terraform/{provision-infra => }/scripts/bootstrap-nomad.sh (75%) rename e2e/terraform/{provision-infra => }/scripts/consul-agents-policy.hcl (100%) rename e2e/terraform/{provision-infra => }/scripts/nomad-cluster-consul-policy.hcl (100%) rename e2e/terraform/{provision-infra => }/tls_ca.tf (88%) rename e2e/terraform/{provision-infra => }/tls_client.tf (86%) rename e2e/terraform/{provision-infra/provision-nomad => }/uploads/README.md (100%) rename e2e/terraform/{provision-infra => }/userdata/README.md (100%) rename e2e/terraform/{provision-infra => }/userdata/windows-2016.ps1 (100%) rename enos/enos-providers.hcl => e2e/terraform/versions.tf (57%) rename e2e/terraform/{provision-infra => }/volumes.tf (86%) rename e2e/terraform/{provision-infra => }/volumes.tftpl (100%) delete mode 100644 e2e/v3/volumes3/host3.go create mode 100644 e2e/vaultcompat/input/cat.hcl create mode 100644 e2e/vaultcompat/input/policy_legacy.hcl delete mode 100644 e2e/vaultsecrets/input/acl-role.json delete mode 100644 e2e/vaultsecrets/input/default_wi.nomad.hcl rename e2e/vaultsecrets/input/{non-default_wi.nomad.hcl => secrets.nomad} (95%) delete mode 100644 e2e/volume_mounts/volumes_test.go rename e2e/{volume_mounts => volumes}/doc.go (90%) rename e2e/{volume_mounts => volumes}/input/volumes.nomad (98%) create mode 100644 e2e/volumes/volumes_test.go delete mode 100644 enos/.gitignore delete mode 100644 enos/README.md delete mode 100644 enos/enos-modules.hcl delete mode 100644 enos/enos-quality.hcl delete mode 100644 enos/enos-scenario-upgrade.hcl delete mode 100644 enos/enos-terraform.hcl delete mode 100644 enos/enos-vars.hcl delete mode 100644 enos/enos.vars.example.hcl delete mode 100644 enos/modules/fetch_artifactory/locals.tf delete mode 100644 enos/modules/fetch_artifactory/main.tf delete mode 100644 enos/modules/fetch_artifactory/outputs.tf delete mode 100755 enos/modules/fetch_artifactory/scripts/install.sh delete mode 100644 enos/modules/fetch_artifactory/variables.tf delete mode 100644 enos/modules/run_workloads/jobs/docker-service.nomad.hcl delete mode 100644 enos/modules/run_workloads/jobs/raw-exec-service.nomad.hcl delete mode 100644 enos/modules/run_workloads/main.tf delete mode 100644 enos/modules/run_workloads/outputs.tf delete mode 100755 enos/modules/run_workloads/scripts/wait_for_nomad_api.sh delete mode 100644 enos/modules/run_workloads/variables.tf delete mode 100644 enos/modules/test_cluster_health/main.tf delete mode 100755 enos/modules/test_cluster_health/scripts/allocs.sh delete mode 100755 enos/modules/test_cluster_health/scripts/clients.sh delete mode 100755 enos/modules/test_cluster_health/scripts/jobs.sh delete mode 100755 enos/modules/test_cluster_health/scripts/servers.sh delete mode 100755 enos/modules/test_cluster_health/scripts/versions.sh delete mode 100755 enos/modules/test_cluster_health/scripts/wait_for_nomad_api.sh delete mode 100644 enos/modules/test_cluster_health/variables.tf delete mode 100644 enos/modules/upgrade_clients/main.tf delete mode 100755 enos/modules/upgrade_clients/scripts/set_metadata.sh delete mode 100755 enos/modules/upgrade_clients/scripts/verify_metadata.sh delete mode 100755 enos/modules/upgrade_clients/scripts/wait_for_nomad_api.sh delete mode 100644 enos/modules/upgrade_clients/variables.tf delete mode 100644 enos/modules/upgrade_instance/.gitignore delete mode 100644 enos/modules/upgrade_instance/main.tf delete mode 100644 enos/modules/upgrade_instance/variables.tf delete mode 100644 enos/modules/upgrade_servers/main.tf delete mode 100755 enos/modules/upgrade_servers/scripts/wait_for_stable_cluster.sh delete mode 100644 enos/modules/upgrade_servers/variables.tf delete mode 100644 nomad/client_host_volume_endpoint.go delete mode 100644 nomad/host_volume_endpoint.go delete mode 100644 nomad/host_volume_endpoint_ce.go delete mode 100644 nomad/host_volume_endpoint_test.go delete mode 100644 nomad/mock/host_volumes.go delete mode 100644 nomad/state/state_store_host_volumes.go delete mode 100644 nomad/state/state_store_host_volumes_ce.go delete mode 100644 nomad/state/state_store_host_volumes_test.go delete mode 100644 nomad/state/state_store_task_group_volume_claims.go delete mode 100644 nomad/state/state_store_task_group_volume_claims_test.go delete mode 100644 nomad/structs/host_volumes.go delete mode 100644 nomad/structs/host_volumes_test.go rename nomad/structs/{volumes_test.go => volume_test.go} (93%) rename website/content/docs/concepts/plugins/{storage => }/csi.mdx (73%) delete mode 100644 website/content/docs/concepts/plugins/storage/host-volumes.mdx delete mode 100644 website/content/docs/concepts/plugins/storage/index.mdx delete mode 100644 website/content/docs/other-specifications/volume/csi.mdx delete mode 100644 website/content/docs/other-specifications/volume/host.mdx delete mode 100644 website/public/img/nomad-ui-block.png diff --git a/.changelog/18530.txt b/.changelog/18530.txt deleted file mode 100644 index 2a26b16b3a0..00000000000 --- a/.changelog/18530.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:breaking-change -agent: Plugins stored within the `plugin_dir` will now only be executed when they have a corresponding `plugin` configuration block. Any plugin found without a corresponding configuration block will be skipped. -``` diff --git a/.changelog/24415.txt b/.changelog/24415.txt deleted file mode 100644 index 2d158f8581c..00000000000 --- a/.changelog/24415.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug -client: fixed a bug where AMD CPUs were not correctly fingerprinting base speed -``` diff --git a/.changelog/24601.txt b/.changelog/24601.txt deleted file mode 100644 index e8b8807f300..00000000000 --- a/.changelog/24601.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:breaking-change -sentinel: The sentinel apply command now requires the -scope option -``` diff --git a/.changelog/24724.txt b/.changelog/24724.txt deleted file mode 100644 index 20775747c80..00000000000 --- a/.changelog/24724.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -csi: Added CSI volume and plugin events to the event stream -``` diff --git a/.changelog/24785.txt b/.changelog/24785.txt deleted file mode 100644 index 810e9df3f55..00000000000 --- a/.changelog/24785.txt +++ /dev/null @@ -1,11 +0,0 @@ -```release-note:breaking-change -api: QuotaSpec.RegionLimit is now of type QuotaResources instead of Resources -``` - -```release-note:deprecation -api: QuotaSpec.VariablesLimit field is deprecated and will be removed in Nomad 1.12.0. Use QuotaSpec.RegionLimit.Storage.Variables instead. -``` - -```release-note:deprecation -quotas: the variables_limit field in the quota specification is deprecated and replaced by a new storage block under the region_limit block, with a variables field. The variables_limit field will be removed in Nomad 1.12.0 -``` diff --git a/.changelog/24909.txt b/.changelog/24909.txt deleted file mode 100644 index 6d000d3fd79..00000000000 --- a/.changelog/24909.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:breaking-change -drivers: remove remote task support for task drivers -``` diff --git a/.changelog/24997.txt b/.changelog/24997.txt deleted file mode 100644 index 58c7a4ce4e6..00000000000 --- a/.changelog/24997.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -csi: Accept ID prefixes and wildcard namespace for the volume delete command -``` diff --git a/.changelog/25109.txt b/.changelog/25109.txt deleted file mode 100644 index 4b5ac2a9488..00000000000 --- a/.changelog/25109.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug -scheduler: Fixed a bug that made affinity and spread updates destructive -``` diff --git a/.changelog/25173.txt b/.changelog/25173.txt deleted file mode 100644 index 61dd831a087..00000000000 --- a/.changelog/25173.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -csi: Show volume capabilities in the volume status command -``` diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md deleted file mode 100644 index 6f45b095e98..00000000000 --- a/.github/pull_request_template.md +++ /dev/null @@ -1,35 +0,0 @@ -### Description - - -### Testing & Reproduction steps - - -### Links - - -### Contributor Checklist -- [ ] **Changelog Entry** If this PR changes user-facing behavior, please generate and add a - changelog entry using the `make cl` command. -- [ ] **Testing** Please add tests to cover any new functionality or to demonstrate bug fixes and - ensure regressions will be caught. -- [ ] **Documentation** If the change impacts user-facing functionality such as the CLI, API, UI, - and job configuration, please update the Nomad website documentation to reflect this. Refer to - the [website README](../website/README.md) for docs guidelines. Please also consider whether the - change requires notes within the [upgrade guide](../website/content/docs/upgrade/upgrade-specific.mdx). - -### Reviewer Checklist -- [ ] **Backport Labels** Please add the correct backport labels as described by the internal - backporting document. -- [ ] **Commit Type** Ensure the correct merge method is selected which should be "squash and merge" - in the majority of situations. The main exceptions are long-lived feature branches or merges where - history should be preserved. -- [ ] **Enterprise PRs** If this is an enterprise only PR, please add any required changelog entry - within the public repository. diff --git a/.github/workflows/checks.yaml b/.github/workflows/checks.yaml index f0f04122464..3eabaeb9c78 100644 --- a/.github/workflows/checks.yaml +++ b/.github/workflows/checks.yaml @@ -43,7 +43,7 @@ jobs: run: git config --global url.'https://${{ env.ELEVATED_GITHUB_TOKEN }}@github.com'.insteadOf 'https://github.com' - uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # v5.3.0 with: - cache: ${{ contains(runner.name, 'Github Actions') }} + cache: true go-version-file: .go-version cache-dependency-path: '**/go.sum' - name: Run make check diff --git a/.github/workflows/test-ui.yml b/.github/workflows/test-ui.yml index ee33d00fcde..1be0872e9e5 100644 --- a/.github/workflows/test-ui.yml +++ b/.github/workflows/test-ui.yml @@ -124,7 +124,7 @@ jobs: run: node ../scripts/combine-ui-test-results.js - name: Upload combined results for comparison if: github.event_name == 'push' && github.ref == 'refs/heads/main' - uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 # v4.6.0 + uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3 with: name: test-results-${{ github.sha }} path: ui/combined-test-results.json diff --git a/.gitignore b/.gitignore index 2fc7f49083f..995fa494afc 100644 --- a/.gitignore +++ b/.gitignore @@ -137,10 +137,3 @@ tools/missing/missing # allow security scanner file !scan.hcl - -# generated variables for upgrade tests -enos.vars.hcl -enos/modules/*/*.tfvars - -# local license files -*.hclic diff --git a/CHANGELOG.md b/CHANGELOG.md index dcacd0fd3f1..0c47c6ff723 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -190,85 +190,6 @@ BUG FIXES: * template: Fixed a panic on client restart when using change_mode=script [[GH-24057](https://github.com/hashicorp/nomad/issues/24057)] * ui: Fixes an issue where variables paths would not let namespaced users write variables unless they also had wildcard namespace variable write permissions [[GH-24073](https://github.com/hashicorp/nomad/issues/24073)] -## 1.8.10 (February 11, 2025) - -SECURITY: - -* api: sanitize the SignedIdentities in allocations of events to clean the identity token. [[GH-24966](https://github.com/hashicorp/nomad/issues/24966)] -* build: Updated Go to 1.23.6 [[GH-25041](https://github.com/hashicorp/nomad/issues/25041)] -* event stream: fixes vulnerability CVE-2025-0937, where using a wildcard namespace to subscribe to the events API grants a user with "read" capabilites on any namespace, the ability to read events from all namespaces. [[GH-25089](https://github.com/hashicorp/nomad/issues/25089)] - -IMPROVEMENTS: - -* auth: adds `VerboseLogging` option to auth-method config for debugging SSO [[GH-24892](https://github.com/hashicorp/nomad/issues/24892)] -* event stream: adds ability to authenticate using workload identities [[GH-24849](https://github.com/hashicorp/nomad/issues/24849)] - -BUG FIXES: - -* agent: Fixed a bug where Nomad error log messages within syslog showed via the notice priority [[GH-24820](https://github.com/hashicorp/nomad/issues/24820)] -* agent: Fixed a bug where all syslog entries were marked as notice when using JSON logging format [[GH-24865](https://github.com/hashicorp/nomad/issues/24865)] -* client: Fixed a bug where temporary RPC errors cause the client to poll for changes more frequently thereafter [[GH-25039](https://github.com/hashicorp/nomad/issues/25039)] -* csi: Fixed a bug where volume context from the plugin would be erased on volume updates [[GH-24922](https://github.com/hashicorp/nomad/issues/24922)] -* networking: check network namespaces on Linux during client restarts and fail the allocation if an existing namespace is invalid [[GH-24658](https://github.com/hashicorp/nomad/issues/24658)] -* reporting (Enterprise): Updated the reporting metric to utilize node active heartbeat count. [[GH-24919](https://github.com/hashicorp/nomad/issues/24919)] -* state store: fix for setting correct status for a job version when reverting, and also fixes an issue where jobs were briefly marked dead during restarts [[GH-24974](https://github.com/hashicorp/nomad/issues/24974)] -* taskrunner: fix panic when a task with dynamic user is recovered [[GH-24739](https://github.com/hashicorp/nomad/issues/24739)] -* ui: Ensure pending service check blocks are filled [[GH-24818](https://github.com/hashicorp/nomad/issues/24818)] -* ui: Remove unrequired node read API call when attempting to stream task logs [[GH-24973](https://github.com/hashicorp/nomad/issues/24973)] -* vault: Fixed a bug where successful renewal was logged as an error [[GH-25040](https://github.com/hashicorp/nomad/issues/25040)] - - -## 1.8.9 (January 14, 2025) - - -IMPROVEMENTS: - -* api: Sanitise hcl variables before storage on JobSubmission [[GH-24423](https://github.com/hashicorp/nomad/issues/24423)] -* deps: Upgraded aws-sdk-go from v1 to v2 [[GH-24720](https://github.com/hashicorp/nomad/issues/24720)] - -BUG FIXES: - -* drivers: validate logmon plugin during reattach [[GH-24798](https://github.com/hashicorp/nomad/issues/24798)] - - -## 1.8.8 Enterprise (December 18, 2024) - -SECURITY: - -* api: sanitize the SignedIdentities in allocations to prevent privilege escalation through unredacted workload identity token impersonation associated with ACL policies. [[GH-24683](https://github.com/hashicorp/nomad/issues/24683)] -* security: Added more host environment variables to the default deny list for tasks [[GH-24540](https://github.com/hashicorp/nomad/issues/24540)] -* security: Explicitly set 'Content-Type' header to mitigate XSS vulnerability [[GH-24489](https://github.com/hashicorp/nomad/issues/24489)] -* security: add executeTemplate to default template function_denylist [[GH-24541](https://github.com/hashicorp/nomad/issues/24541)] - -BUG FIXES: - -* agent: Fixed a bug where `retry_join` gave up after a single failure, rather than retrying until max attempts had been reached [[GH-24561](https://github.com/hashicorp/nomad/issues/24561)] -* api: Fixed a bug where alloc exec/logs/fs APIs would return errors for non-global regions [[GH-24644](https://github.com/hashicorp/nomad/issues/24644)] -* cli: Ensure the `operator autopilot health` command only outputs JSON when the `json` flag is supplied [[GH-24655](https://github.com/hashicorp/nomad/issues/24655)] -* consul: Fixed a bug where failures when syncing Consul checks could panic the Nomad agent [[GH-24513](https://github.com/hashicorp/nomad/issues/24513)] -* consul: Fixed a bug where non-root Nomad agents could not recreate a task's Consul token on task restart [[GH-24410](https://github.com/hashicorp/nomad/issues/24410)] -* csi: Fixed a bug where drivers that emit multiple topology segments would cause placements to fail [[GH-24522](https://github.com/hashicorp/nomad/issues/24522)] -* csi: Removed redundant namespace output from volume status command [[GH-24432](https://github.com/hashicorp/nomad/issues/24432)] -* discovery: Fixed a bug where IPv6 addresses would not be accepted from cloud autojoin [[GH-24649](https://github.com/hashicorp/nomad/issues/24649)] -* drivers: fix executor leak when drivers error starting tasks [[GH-24495](https://github.com/hashicorp/nomad/issues/24495)] -* executor: validate executor on reattach to avoid possibility of killing non-Nomad processes [[GH-24538](https://github.com/hashicorp/nomad/issues/24538)] -* fix: handles consul template re-renders on client restart [[GH-24399](https://github.com/hashicorp/nomad/issues/24399)] -* networking: use a tmpfs location for the state of CNI IPAM plugin used by bridge mode, to fix a bug where allocations would fail to restore after host reboot [[GH-24650](https://github.com/hashicorp/nomad/issues/24650)] -* scheduler: take all assigned cpu cores into account instead of only those part of the largest lifecycle [[GH-24304](https://github.com/hashicorp/nomad/issues/24304)] -* vault: Fixed a bug where expired secret leases were treated as non-fatal and retried [[GH-24409](https://github.com/hashicorp/nomad/issues/24409)] - -## 1.8.7 Enterprise (November 8, 2024) - -SECURITY: - -* csi: Fixed a bug where a user with csi-write-volume permissions to one namespace can create volumes in another namespace (CVE-2024-10975) [[GH-24396](https://github.com/hashicorp/nomad/issues/24396)] - -BUG FIXES: - -* connect: add validation to ensure that connect native services specify a port [[GH-24329](https://github.com/hashicorp/nomad/issues/24329)] -* keyring: Fixed a panic on server startup when decrypting AEAD key data with empty RSA block [[GH-24383](https://github.com/hashicorp/nomad/issues/24383)] -* scheduler: fixed a bug where resource calculation did not account correctly for poststart tasks [[GH-24297](https://github.com/hashicorp/nomad/issues/24297)] - ## 1.8.6 Enterprise(October 21, 2024) IMPROVEMENTS: @@ -408,7 +329,7 @@ BUG FIXES: * server: Fixed a bug where expiring heartbeats for garbage collected nodes could panic the server [[GH-23383](https://github.com/hashicorp/nomad/issues/23383)] * template: Fix template rendering on Windows [[GH-23432](https://github.com/hashicorp/nomad/issues/23432)] * ui: Actions run from jobs with explicit name properties now work from the web UI [[GH-23553](https://github.com/hashicorp/nomad/issues/23553)] -* ui: Don't show keyboard nav hints when taking a screenshot [[GH-23365](https://github.com/hashicorp/nomad/issues/23365)] +* ui: Dont show keyboard nav hints when taking a screenshot [[GH-23365](https://github.com/hashicorp/nomad/issues/23365)] * ui: Fix an issue where a remotely purged job would prevent redirect from taking place in the web UI [[GH-23492](https://github.com/hashicorp/nomad/issues/23492)] * ui: Fix an issue where access to Job Templates in the UI was restricted to variable.write access [[GH-23458](https://github.com/hashicorp/nomad/issues/23458)] * ui: Fix the Upload Jobspec button on the Run Job page [[GH-23548](https://github.com/hashicorp/nomad/issues/23548)] @@ -505,82 +426,6 @@ BUG FIXES: * ui: Show the namespace in the web UI exec command hint [[GH-20218](https://github.com/hashicorp/nomad/issues/20218)] * windows: Fixed a regression where scanning task processes was inefficient [[GH-20619](https://github.com/hashicorp/nomad/issues/20619)] -## 1.7.18 (February 11, 2025) - -SECURITY: - -* api: sanitize the SignedIdentities in allocations of events to clean the identity token. [[GH-24966](https://github.com/hashicorp/nomad/issues/24966)] -* build: Updated Go to 1.23.6 [[GH-25041](https://github.com/hashicorp/nomad/issues/25041)] -* event stream: fixes vulnerability CVE-2025-0937, where using a wildcard namespace to subscribe to the events API grants a user with "read" capabilites on any namespace, the ability to read events from all namespaces. [[GH-25089](https://github.com/hashicorp/nomad/issues/25089)] - -IMPROVEMENTS: - -* auth: adds `VerboseLogging` option to auth-method config for debugging SSO [[GH-24892](https://github.com/hashicorp/nomad/issues/24892)] -* event stream: adds ability to authenticate using workload identities [[GH-24849](https://github.com/hashicorp/nomad/issues/24849)] - -BUG FIXES: - -* agent: Fixed a bug where Nomad error log messages within syslog showed via the notice priority [[GH-24820](https://github.com/hashicorp/nomad/issues/24820)] -* agent: Fixed a bug where all syslog entries were marked as notice when using JSON logging format [[GH-24865](https://github.com/hashicorp/nomad/issues/24865)] -* client: Fixed a bug where temporary RPC errors cause the client to poll for changes more frequently thereafter [[GH-25039](https://github.com/hashicorp/nomad/issues/25039)] -* csi: Fixed a bug where volume context from the plugin would be erased on volume updates [[GH-24922](https://github.com/hashicorp/nomad/issues/24922)] -* networking: check network namespaces on Linux during client restarts and fail the allocation if an existing namespace is invalid [[GH-24658](https://github.com/hashicorp/nomad/issues/24658)] -* reporting (Enterprise): Updated the reporting metric to utilize node active heartbeat count. [[GH-24919](https://github.com/hashicorp/nomad/issues/24919)] -* state store: fix for setting correct status for a job version when reverting, and also fixes an issue where jobs were briefly marked dead during restarts [[GH-24974](https://github.com/hashicorp/nomad/issues/24974)] -* ui: Ensure pending service check blocks are filled [[GH-24818](https://github.com/hashicorp/nomad/issues/24818)] -* ui: Remove unrequired node read API call when attempting to stream task logs [[GH-24973](https://github.com/hashicorp/nomad/issues/24973)] -* vault: Fixed a bug where successful renewal was logged as an error [[GH-25040](https://github.com/hashicorp/nomad/issues/25040)] - - -## 1.7.17 Enterprise (January 14, 2025) - - -IMPROVEMENTS: - -* deps: Upgraded aws-sdk-go from v1 to v2 [[GH-24720](https://github.com/hashicorp/nomad/issues/24720)] - -BUG FIXES: - -* drivers: validate logmon plugin during reattach [[GH-24798](https://github.com/hashicorp/nomad/issues/24798)] - -## 1.7.16 Enterprise (December 18, 2024) - -SECURITY: - -* api: sanitize the SignedIdentities in allocations to prevent privilege escalation through unredacted workload identity token impersonation associated with ACL policies. [[GH-24683](https://github.com/hashicorp/nomad/issues/24683)] -* security: Added more host environment variables to the default deny list for tasks [[GH-24540](https://github.com/hashicorp/nomad/issues/24540)] -* security: Explicitly set 'Content-Type' header to mitigate XSS vulnerability [[GH-24489](https://github.com/hashicorp/nomad/issues/24489)] -* security: add executeTemplate to default template function_denylist [[GH-24541](https://github.com/hashicorp/nomad/issues/24541)] - -BUG FIXES: - -* agent: Fixed a bug where `retry_join` gave up after a single failure, rather than retrying until max attempts had been reached [[GH-24561](https://github.com/hashicorp/nomad/issues/24561)] -* cli: Ensure the `operator autopilot health` command only outputs JSON when the `json` flag is supplied [[GH-24655](https://github.com/hashicorp/nomad/issues/24655)] -* consul: Fixed a bug where failures when syncing Consul checks could panic the Nomad agent [[GH-24513](https://github.com/hashicorp/nomad/issues/24513)] -* consul: Fixed a bug where non-root Nomad agents could not recreate a task's Consul token on task restart [[GH-24410](https://github.com/hashicorp/nomad/issues/24410)] -* csi: Fixed a bug where drivers that emit multiple topology segments would cause placements to fail [[GH-24522](https://github.com/hashicorp/nomad/issues/24522)] -* csi: Removed redundant namespace output from volume status command [[GH-24432](https://github.com/hashicorp/nomad/issues/24432)] -* discovery: Fixed a bug where IPv6 addresses would not be accepted from cloud autojoin [[GH-24649](https://github.com/hashicorp/nomad/issues/24649)] -* drivers: fix executor leak when drivers error starting tasks [[GH-24495](https://github.com/hashicorp/nomad/issues/24495)] -* executor: validate executor on reattach to avoid possibility of killing non-Nomad processes [[GH-24538](https://github.com/hashicorp/nomad/issues/24538)] -* fix: handles consul template re-renders on client restart [[GH-24399](https://github.com/hashicorp/nomad/issues/24399)] -* networking: use a tmpfs location for the state of CNI IPAM plugin used by bridge mode, to fix a bug where allocations would fail to restore after host reboot [[GH-24650](https://github.com/hashicorp/nomad/issues/24650)] -* scheduler: take all assigned cpu cores into account instead of only those part of the largest lifecycle [[GH-24304](https://github.com/hashicorp/nomad/issues/24304)] -* vault: Fixed a bug where expired secret leases were treated as non-fatal and retried [[GH-24409](https://github.com/hashicorp/nomad/issues/24409)] -* windows: Restore process accounting logic from Nomad 1.6.x [[GH-24494](https://github.com/hashicorp/nomad/issues/24494)] - -## 1.7.15 Enterprise (November 8, 2024) - -SECURITY: - -* csi: Fixed a bug where a user with csi-write-volume permissions to one namespace can create volumes in another namespace (CVE-2024-10975) [[GH-24396](https://github.com/hashicorp/nomad/issues/24396)] - -BUG FIXES: - -* connect: add validation to ensure that connect native services specify a port [[GH-24329](https://github.com/hashicorp/nomad/issues/24329)] -* deps: Fixed a bug where restarting Nomad could cause an unrelated process with the same PID as a failed executor to be killed [[GH-24265](https://github.com/hashicorp/nomad/issues/24265)] -* scheduler: fixed a bug where resource calculation did not account correctly for poststart tasks [[GH-24297](https://github.com/hashicorp/nomad/issues/24297)] - ## 1.7.14 Enterprise (October 21, 2024) IMPROVEMENTS: @@ -872,7 +717,7 @@ IMPROVEMENTS: * audit (Enterprise): Added ACL token role links to audit log auth objects [[GH-19415](https://github.com/hashicorp/nomad/issues/19415)] * ui: Added a new example template with Task Actions [[GH-19153](https://github.com/hashicorp/nomad/issues/19153)] -* ui: Don't allow new jobspec download until template is populated, and remove group count from jobs index [[GH-19377](https://github.com/hashicorp/nomad/issues/19377)] +* ui: dont allow new jobspec download until template is populated, and remove group count from jobs index [[GH-19377](https://github.com/hashicorp/nomad/issues/19377)] * ui: make the exec window look nicer on mobile screens [[GH-19332](https://github.com/hashicorp/nomad/issues/19332)] BUG FIXES: @@ -947,7 +792,7 @@ IMPROVEMENTS: * ui: for system and sysbatch jobs, now show client name on hover in job panel [[GH-19051](https://github.com/hashicorp/nomad/issues/19051)] * ui: nicer comment styles in UI example jobs [[GH-19037](https://github.com/hashicorp/nomad/issues/19037)] * ui: show plan output warnings alongside placement failures and dry-run info when running a job through the web ui [[GH-19225](https://github.com/hashicorp/nomad/issues/19225)] -* ui: simplify presentation of task event times (10m2.230948s becomes 10m2s etc.) [[GH-18595](https://github.com/hashicorp/nomad/issues/18595)] +* ui: simplify presentation of task event times (10m2.230948s bceomes 10m2s etc.) [[GH-18595](https://github.com/hashicorp/nomad/issues/18595)] * vars: Added a locking feature for Nomad Variables [[GH-18520](https://github.com/hashicorp/nomad/issues/18520)] DEPRECATIONS: diff --git a/CODEOWNERS b/CODEOWNERS index 9114b89772a..73cfb9f9e0d 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -2,20 +2,3 @@ /.release/ @hashicorp/github-nomad-core @hashicorp/nomad-eng /.github/workflows/build.yml @hashicorp/github-nomad-core @hashicorp/nomad-eng - -# codeowner default -* @hashicorp/github-nomad-core @hashicorp/nomad-eng - - -# engineering and web presence get notified of, and can approve changes to web tooling, but not content. - -/website/ @hashicorp/web-presence @hashicorp/github-nomad-core @hashicorp/nomad-eng -/website/data/ -/website/public/ -/website/content/ - -# education and engineering get notified of, and can approve changes to web content. - -/website/data/ @hashicorp/nomad-docs @hashicorp/github-nomad-core @hashicorp/nomad-eng -/website/public/ @hashicorp/nomad-docs @hashicorp/github-nomad-core @hashicorp/nomad-eng -/website/content/ @hashicorp/nomad-docs @hashicorp/github-nomad-core @hashicorp/nomad-eng diff --git a/GNUmakefile b/GNUmakefile index 251dba9f2fa..f8b0735563a 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -23,7 +23,7 @@ ifndef BIN BIN := $(GOPATH)/bin endif -GO_TAGS := hashicorpmetrics $(GO_TAGS) +GO_TAGS := $(GO_TAGS) ifeq ($(CI),true) GO_TAGS := codegen_generated $(GO_TAGS) diff --git a/acl/acl_test.go b/acl/acl_test.go index 24ccf2b4103..cf0c4bda3f4 100644 --- a/acl/acl_test.go +++ b/acl/acl_test.go @@ -79,12 +79,10 @@ func TestACLManagement(t *testing.T) { // Check default namespace rights must.True(t, acl.AllowNamespaceOperation("default", NamespaceCapabilityListJobs)) must.True(t, acl.AllowNamespaceOperation("default", NamespaceCapabilitySubmitJob)) - must.True(t, acl.AllowNamespaceOperation("default", NamespaceCapabilityHostVolumeCreate)) must.True(t, acl.AllowNamespace("default")) // Check non-specified namespace must.True(t, acl.AllowNamespaceOperation("foo", NamespaceCapabilityListJobs)) - must.True(t, acl.AllowNamespaceOperation("foo", NamespaceCapabilityHostVolumeCreate)) must.True(t, acl.AllowNamespace("foo")) // Check node pool rights. @@ -157,11 +155,9 @@ func TestACLMerge(t *testing.T) { // Check default namespace rights must.True(t, acl.AllowNamespaceOperation("default", NamespaceCapabilityListJobs)) must.False(t, acl.AllowNamespaceOperation("default", NamespaceCapabilitySubmitJob)) - must.False(t, acl.AllowNamespaceOperation("default", NamespaceCapabilityHostVolumeRegister)) // Check non-specified namespace must.False(t, acl.AllowNamespaceOperation("foo", NamespaceCapabilityListJobs)) - must.False(t, acl.AllowNamespaceOperation("foo", NamespaceCapabilityHostVolumeCreate)) // Check rights in the node pool specified in policies. must.True(t, acl.AllowNodePoolOperation("my-pool", NodePoolCapabilityRead)) diff --git a/acl/policy.go b/acl/policy.go index 17a7aed2170..c4fe9e4d673 100644 --- a/acl/policy.go +++ b/acl/policy.go @@ -47,11 +47,6 @@ const ( NamespaceCapabilityCSIReadVolume = "csi-read-volume" NamespaceCapabilityCSIListVolume = "csi-list-volume" NamespaceCapabilityCSIMountVolume = "csi-mount-volume" - NamespaceCapabilityHostVolumeCreate = "host-volume-create" - NamespaceCapabilityHostVolumeRegister = "host-volume-register" - NamespaceCapabilityHostVolumeRead = "host-volume-read" - NamespaceCapabilityHostVolumeWrite = "host-volume-write" - NamespaceCapabilityHostVolumeDelete = "host-volume-delete" NamespaceCapabilityListScalingPolicies = "list-scaling-policies" NamespaceCapabilityReadScalingPolicy = "read-scaling-policy" NamespaceCapabilityReadJobScaling = "read-job-scaling" @@ -212,7 +207,7 @@ func isNamespaceCapabilityValid(cap string) bool { NamespaceCapabilityReadFS, NamespaceCapabilityAllocLifecycle, NamespaceCapabilityAllocExec, NamespaceCapabilityAllocNodeExec, NamespaceCapabilityCSIReadVolume, NamespaceCapabilityCSIWriteVolume, NamespaceCapabilityCSIListVolume, NamespaceCapabilityCSIMountVolume, NamespaceCapabilityCSIRegisterPlugin, - NamespaceCapabilityListScalingPolicies, NamespaceCapabilityReadScalingPolicy, NamespaceCapabilityReadJobScaling, NamespaceCapabilityScaleJob, NamespaceCapabilityHostVolumeCreate, NamespaceCapabilityHostVolumeRegister, NamespaceCapabilityHostVolumeWrite, NamespaceCapabilityHostVolumeRead: + NamespaceCapabilityListScalingPolicies, NamespaceCapabilityReadScalingPolicy, NamespaceCapabilityReadJobScaling, NamespaceCapabilityScaleJob: return true // Separate the enterprise-only capabilities case NamespaceCapabilitySentinelOverride, NamespaceCapabilitySubmitRecommendation: @@ -246,7 +241,6 @@ func expandNamespacePolicy(policy string) []string { NamespaceCapabilityReadJobScaling, NamespaceCapabilityListScalingPolicies, NamespaceCapabilityReadScalingPolicy, - NamespaceCapabilityHostVolumeRead, } write := make([]string, len(read)) @@ -263,7 +257,6 @@ func expandNamespacePolicy(policy string) []string { NamespaceCapabilityCSIMountVolume, NamespaceCapabilityCSIWriteVolume, NamespaceCapabilitySubmitRecommendation, - NamespaceCapabilityHostVolumeCreate, }...) switch policy { @@ -285,32 +278,6 @@ func expandNamespacePolicy(policy string) []string { } } -// expandNamespaceCapabilities adds extra capabilities implied by fine-grained -// capabilities. -func expandNamespaceCapabilities(ns *NamespacePolicy) { - extraCaps := []string{} - for _, cap := range ns.Capabilities { - switch cap { - case NamespaceCapabilityHostVolumeWrite: - extraCaps = append(extraCaps, - NamespaceCapabilityHostVolumeRegister, - NamespaceCapabilityHostVolumeCreate, - NamespaceCapabilityHostVolumeDelete, - NamespaceCapabilityHostVolumeRead) - case NamespaceCapabilityHostVolumeRegister: - extraCaps = append(extraCaps, - NamespaceCapabilityHostVolumeCreate, - NamespaceCapabilityHostVolumeRead) - case NamespaceCapabilityHostVolumeCreate: - extraCaps = append(extraCaps, NamespaceCapabilityHostVolumeRead) - } - } - - // These may end up being duplicated, but they'll get deduplicated in NewACL - // when inserted into the radix tree. - ns.Capabilities = append(ns.Capabilities, extraCaps...) -} - func isNodePoolCapabilityValid(cap string) bool { switch cap { case NodePoolCapabilityDelete, NodePoolCapabilityRead, NodePoolCapabilityWrite, @@ -421,9 +388,6 @@ func Parse(rules string) (*Policy, error) { ns.Capabilities = append(ns.Capabilities, extraCap...) } - // Expand implicit capabilities - expandNamespaceCapabilities(ns) - if ns.Variables != nil { if len(ns.Variables.Paths) == 0 { return nil, fmt.Errorf("Invalid variable policy: no variable paths in namespace %s", ns.Name) diff --git a/acl/policy_test.go b/acl/policy_test.go index 938557aa08a..117b82ba3d6 100644 --- a/acl/policy_test.go +++ b/acl/policy_test.go @@ -5,6 +5,7 @@ package acl import ( "fmt" + "strings" "testing" "github.com/hashicorp/nomad/ci" @@ -16,9 +17,9 @@ func TestParse(t *testing.T) { ci.Parallel(t) type tcase struct { - Raw string - ExpectErr string - Expect *Policy + Raw string + ErrStr string + Expect *Policy } tcases := []tcase{ { @@ -42,7 +43,6 @@ func TestParse(t *testing.T) { NamespaceCapabilityReadJobScaling, NamespaceCapabilityListScalingPolicies, NamespaceCapabilityReadScalingPolicy, - NamespaceCapabilityHostVolumeRead, }, }, }, @@ -118,7 +118,6 @@ func TestParse(t *testing.T) { NamespaceCapabilityReadJobScaling, NamespaceCapabilityListScalingPolicies, NamespaceCapabilityReadScalingPolicy, - NamespaceCapabilityHostVolumeRead, }, }, { @@ -133,7 +132,6 @@ func TestParse(t *testing.T) { NamespaceCapabilityReadJobScaling, NamespaceCapabilityListScalingPolicies, NamespaceCapabilityReadScalingPolicy, - NamespaceCapabilityHostVolumeRead, NamespaceCapabilityScaleJob, NamespaceCapabilitySubmitJob, NamespaceCapabilityDispatchJob, @@ -144,8 +142,6 @@ func TestParse(t *testing.T) { NamespaceCapabilityCSIMountVolume, NamespaceCapabilityCSIWriteVolume, NamespaceCapabilitySubmitRecommendation, - NamespaceCapabilityHostVolumeCreate, - NamespaceCapabilityHostVolumeRead, }, }, { @@ -342,7 +338,6 @@ func TestParse(t *testing.T) { NamespaceCapabilityReadJobScaling, NamespaceCapabilityListScalingPolicies, NamespaceCapabilityReadScalingPolicy, - NamespaceCapabilityHostVolumeRead, }, }, { @@ -357,7 +352,6 @@ func TestParse(t *testing.T) { NamespaceCapabilityReadJobScaling, NamespaceCapabilityListScalingPolicies, NamespaceCapabilityReadScalingPolicy, - NamespaceCapabilityHostVolumeRead, NamespaceCapabilityScaleJob, NamespaceCapabilitySubmitJob, NamespaceCapabilityDispatchJob, @@ -368,8 +362,6 @@ func TestParse(t *testing.T) { NamespaceCapabilityCSIMountVolume, NamespaceCapabilityCSIWriteVolume, NamespaceCapabilitySubmitRecommendation, - NamespaceCapabilityHostVolumeCreate, - NamespaceCapabilityHostVolumeRead, }, }, { @@ -646,54 +638,6 @@ func TestParse(t *testing.T) { }, }, }, - { - ` - namespace "default" { - capabilities = ["host-volume-register"] - } - - namespace "other" { - capabilities = ["host-volume-create"] - } - - namespace "foo" { - capabilities = ["host-volume-write"] - } - `, - "", - &Policy{ - Namespaces: []*NamespacePolicy{ - { - Name: "default", - Policy: "", - Capabilities: []string{ - NamespaceCapabilityHostVolumeRegister, - NamespaceCapabilityHostVolumeCreate, - NamespaceCapabilityHostVolumeRead, - }, - }, - { - Name: "other", - Policy: "", - Capabilities: []string{ - NamespaceCapabilityHostVolumeCreate, - NamespaceCapabilityHostVolumeRead, - }, - }, - { - Name: "foo", - Policy: "", - Capabilities: []string{ - NamespaceCapabilityHostVolumeWrite, - NamespaceCapabilityHostVolumeRegister, - NamespaceCapabilityHostVolumeCreate, - NamespaceCapabilityHostVolumeDelete, - NamespaceCapabilityHostVolumeRead, - }, - }, - }, - }, - }, { ` node_pool "pool-read-only" { @@ -934,18 +878,22 @@ func TestParse(t *testing.T) { } for idx, tc := range tcases { - t.Run(fmt.Sprintf("%02d", idx), func(t *testing.T) { + t.Run(fmt.Sprintf("%d", idx), func(t *testing.T) { p, err := Parse(tc.Raw) - if tc.ExpectErr == "" { - must.NoError(t, err) - } else { - must.ErrorContains(t, err, tc.ExpectErr) + if err != nil { + if tc.ErrStr == "" { + t.Fatalf("Unexpected err: %v", err) + } + if !strings.Contains(err.Error(), tc.ErrStr) { + t.Fatalf("Unexpected err: %v", err) + } + return } - - if tc.Expect != nil { - tc.Expect.Raw = tc.Raw - must.Eq(t, tc.Expect, p) + if err == nil && tc.ErrStr != "" { + t.Fatalf("Missing expected err") } + tc.Expect.Raw = tc.Raw + assert.EqualValues(t, tc.Expect, p) }) } } diff --git a/api/contexts/contexts.go b/api/contexts/contexts.go index 20f099a38e7..5176f5b8290 100644 --- a/api/contexts/contexts.go +++ b/api/contexts/contexts.go @@ -23,7 +23,6 @@ const ( Plugins Context = "plugins" Variables Context = "vars" Volumes Context = "volumes" - HostVolumes Context = "host_volumes" // These Context types are used to associate a search result from a lower // level Nomad object with one of the higher level Context types above. diff --git a/api/host_volumes.go b/api/host_volumes.go deleted file mode 100644 index 576e2fbd6e0..00000000000 --- a/api/host_volumes.go +++ /dev/null @@ -1,246 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package api - -import "net/url" - -// HostVolume represents a Dynamic Host Volume: a volume associated with a -// specific Nomad client agent but created via API. -type HostVolume struct { - // Namespace is the Nomad namespace for the host volume, which constrains - // which jobs can mount it. - Namespace string `mapstructure:"namespace" hcl:"namespace"` - - // ID is a UUID-like string generated by the server. - ID string `mapstructure:"id" hcl:"id"` - - // Name is the name that group.volume will use to identify the volume - // source. Not expected to be unique. - Name string `mapstructure:"name" hcl:"name"` - - // PluginID is the name of the host volume plugin on the client that will be - // used for creating the volume. If omitted, the client will use its default - // built-in plugin. - PluginID string `mapstructure:"plugin_id" hcl:"plugin_id"` - - // NodePool is the node pool of the node where the volume is placed. If the - // user doesn't provide a node ID, a node will be selected using the - // NodePool and Constraints. If the user provides both NodePool and NodeID, - // NodePool will be used to validate the request. If omitted, the server - // will populate this value in before writing the volume to Raft. - NodePool string `mapstructure:"node_pool" hcl:"node_pool"` - - // NodeID is the node where the volume is placed. If the user doesn't - // provide a NodeID, one will be selected using the NodePool and - // Constraints. If omitted, this field will then be populated by the server - // before writing the volume to Raft. - NodeID string `mapstructure:"node_id" hcl:"node_id"` - - // Constraints are optional. If the NodeID is not provided, the NodePool and - // Constraints are used to select a node. If the NodeID is provided, - // Constraints are used to validate that the node meets those constraints at - // the time of volume creation. - Constraints []*Constraint `json:",omitempty" hcl:"constraint"` - - // Because storage may allow only specific intervals of size, we accept a - // min and max and return the actual capacity when the volume is created or - // updated on the client - RequestedCapacityMinBytes int64 `mapstructure:"capacity_min" hcl:"capacity_min"` - RequestedCapacityMaxBytes int64 `mapstructure:"capacity_max" hcl:"capacity_max"` - CapacityBytes int64 `mapstructure:"capacity" hcl:"capacity"` - - // RequestedCapabilities defines the options available to group.volume - // blocks. The scheduler checks against the listed capability blocks and - // selects a node for placement if *any* capability block works. - RequestedCapabilities []*HostVolumeCapability `hcl:"capability"` - - // Parameters are an opaque map of parameters for the host volume plugin. - Parameters map[string]string `json:",omitempty"` - - // HostPath is the path on disk where the volume's mount point was - // created. We record this to make debugging easier. - HostPath string `mapstructure:"host_path" hcl:"host_path"` - - // State represents the overall state of the volume. One of pending, ready, - // deleted. - State HostVolumeState - - CreateIndex uint64 - CreateTime int64 - - ModifyIndex uint64 - ModifyTime int64 - - // Allocations is the list of non-client-terminal allocations with claims on - // this host volume. They are denormalized on read and this field will be - // never written to Raft - Allocations []*AllocationListStub `json:",omitempty" mapstructure:"-" hcl:"-"` -} - -// HostVolume state reports the current status of the host volume -type HostVolumeState string - -const ( - HostVolumeStatePending HostVolumeState = "pending" - HostVolumeStateReady HostVolumeState = "ready" - HostVolumeStateUnavailable HostVolumeState = "unavailable" -) - -// HostVolumeCapability is the requested attachment and access mode for a volume -type HostVolumeCapability struct { - AttachmentMode HostVolumeAttachmentMode `mapstructure:"attachment_mode" hcl:"attachment_mode"` - AccessMode HostVolumeAccessMode `mapstructure:"access_mode" hcl:"access_mode"` -} - -// HostVolumeAttachmentMode chooses the type of storage API that will be used to -// interact with the device. -type HostVolumeAttachmentMode string - -const ( - HostVolumeAttachmentModeUnknown HostVolumeAttachmentMode = "" - HostVolumeAttachmentModeBlockDevice HostVolumeAttachmentMode = "block-device" - HostVolumeAttachmentModeFilesystem HostVolumeAttachmentMode = "file-system" -) - -// HostVolumeAccessMode indicates how Nomad should make the volume available to -// concurrent allocations. -type HostVolumeAccessMode string - -const ( - HostVolumeAccessModeUnknown HostVolumeAccessMode = "" - - HostVolumeAccessModeSingleNodeReader HostVolumeAccessMode = "single-node-reader-only" - HostVolumeAccessModeSingleNodeWriter HostVolumeAccessMode = "single-node-writer" - HostVolumeAccessModeSingleNodeSingleWriter HostVolumeAccessMode = "single-node-single-writer" - HostVolumeAccessModeSingleNodeMultiWriter HostVolumeAccessMode = "single-node-multi-writer" -) - -// HostVolumeStub is used for responses for the List Volumes endpoint -type HostVolumeStub struct { - Namespace string - ID string - Name string - PluginID string - NodePool string - NodeID string - CapacityBytes int64 - State HostVolumeState - - CreateIndex uint64 - CreateTime int64 - - ModifyIndex uint64 - ModifyTime int64 -} - -// HostVolumes is used to access the host volumes API. -type HostVolumes struct { - client *Client -} - -// HostVolumes returns a new handle on the host volumes API. -func (c *Client) HostVolumes() *HostVolumes { - return &HostVolumes{client: c} -} - -type HostVolumeCreateRequest struct { - Volume *HostVolume - - // PolicyOverride overrides Sentinel soft-mandatory policy enforcement - PolicyOverride bool -} - -type HostVolumeRegisterRequest struct { - Volume *HostVolume - - // PolicyOverride overrides Sentinel soft-mandatory policy enforcement - PolicyOverride bool -} - -type HostVolumeCreateResponse struct { - Volume *HostVolume - Warnings string -} - -type HostVolumeRegisterResponse struct { - Volume *HostVolume - Warnings string -} - -type HostVolumeListRequest struct { - NodeID string - NodePool string -} - -type HostVolumeDeleteRequest struct { - ID string -} - -// Create forwards to client agents so a host volume can be created on those -// hosts, and registers the volume with Nomad servers. -func (hv *HostVolumes) Create(req *HostVolumeCreateRequest, opts *WriteOptions) (*HostVolumeCreateResponse, *WriteMeta, error) { - var out *HostVolumeCreateResponse - wm, err := hv.client.put("/v1/volume/host/create", req, &out, opts) - if err != nil { - return nil, wm, err - } - return out, wm, nil -} - -// Register registers a host volume that was created out-of-band with the Nomad -// servers. -func (hv *HostVolumes) Register(req *HostVolumeRegisterRequest, opts *WriteOptions) (*HostVolumeRegisterResponse, *WriteMeta, error) { - var out *HostVolumeRegisterResponse - wm, err := hv.client.put("/v1/volume/host/register", req, &out, opts) - if err != nil { - return nil, wm, err - } - return out, wm, nil -} - -// Get queries for a single host volume, by ID -func (hv *HostVolumes) Get(id string, opts *QueryOptions) (*HostVolume, *QueryMeta, error) { - var out *HostVolume - path, err := url.JoinPath("/v1/volume/host/", url.PathEscape(id)) - if err != nil { - return nil, nil, err - } - qm, err := hv.client.query(path, &out, opts) - if err != nil { - return nil, qm, err - } - return out, qm, nil -} - -// List queries for a set of host volumes, by namespace, node, node pool, or -// name prefix. -func (hv *HostVolumes) List(req *HostVolumeListRequest, opts *QueryOptions) ([]*HostVolumeStub, *QueryMeta, error) { - var out []*HostVolumeStub - qv := url.Values{} - qv.Set("type", "host") - if req != nil { - if req.NodeID != "" { - qv.Set("node_id", req.NodeID) - } - if req.NodePool != "" { - qv.Set("node_pool", req.NodePool) - } - } - - qm, err := hv.client.query("/v1/volumes?"+qv.Encode(), &out, opts) - if err != nil { - return nil, qm, err - } - return out, qm, nil -} - -// Delete deletes a host volume -func (hv *HostVolumes) Delete(req *HostVolumeDeleteRequest, opts *WriteOptions) (*WriteMeta, error) { - path, err := url.JoinPath("/v1/volume/host/", url.PathEscape(req.ID)) - if err != nil { - return nil, err - } - wm, err := hv.client.delete(path, nil, nil, opts) - return wm, err -} diff --git a/api/nodes.go b/api/nodes.go index 1d4cf4e65d9..809382bf79b 100644 --- a/api/nodes.go +++ b/api/nodes.go @@ -517,8 +517,6 @@ type DriverInfo struct { type HostVolumeInfo struct { Path string ReadOnly bool - // ID is set for dynamic host volumes only. - ID string } // HostNetworkInfo is used to return metadata about a given HostNetwork diff --git a/api/quota.go b/api/quota.go index 3423440d379..261d3d1d101 100644 --- a/api/quota.go +++ b/api/quota.go @@ -127,43 +127,17 @@ type QuotaLimit struct { // referencing namespace in the region. A value of zero is treated as // unlimited and a negative value is treated as fully disallowed. This is // useful for once we support GPUs - RegionLimit *QuotaResources + RegionLimit *Resources // VariablesLimit is the maximum total size of all variables // Variable.EncryptedData. A value of zero is treated as unlimited and a // negative value is treated as fully disallowed. - // - // DEPRECATED: use RegionLimit.Storage.VariablesMB instead. This field will - // be removed in Nomad 1.12.0. VariablesLimit *int `mapstructure:"variables_limit" hcl:"variables_limit,optional"` // Hash is the hash of the object and is used to make replication efficient. Hash []byte } -type QuotaResources struct { - CPU *int `hcl:"cpu,optional"` - Cores *int `hcl:"cores,optional"` - MemoryMB *int `mapstructure:"memory" hcl:"memory,optional"` - MemoryMaxMB *int `mapstructure:"memory_max" hcl:"memory_max,optional"` - Devices []*RequestedDevice `hcl:"device,block"` - NUMA *NUMAResource `hcl:"numa,block"` - SecretsMB *int `mapstructure:"secrets" hcl:"secrets,optional"` - Storage *QuotaStorageResources `mapstructure:"storage" hcl:"storage,block"` -} - -type QuotaStorageResources struct { - // VariablesMB is the maximum total size of all variables - // Variable.EncryptedData, in megabytes (2^20 bytes). A value of zero is - // treated as unlimited and a negative value is treated as fully disallowed. - VariablesMB int `hcl:"variables"` - - // HostVolumesMB is the maximum provisioned size of all dynamic host - // volumes, in megabytes (2^20 bytes). A value of zero is treated as - // unlimited and a negative value is treated as fully disallowed. - HostVolumesMB int `hcl:"host_volumes"` -} - // QuotaUsage is the resource usage of a Quota type QuotaUsage struct { Name string diff --git a/api/sentinel.go b/api/sentinel.go index 1e93308847d..e8a0644ae16 100644 --- a/api/sentinel.go +++ b/api/sentinel.go @@ -82,9 +82,3 @@ type SentinelPolicyListStub struct { CreateIndex uint64 ModifyIndex uint64 } - -// Possible Sentinel scopes -const ( - SentinelScopeSubmitJob = "submit-job" - SentinelScopeSubmitHostVolume = "submit-host-volume" -) diff --git a/api/tasks.go b/api/tasks.go index cdf747a7290..21d99bf4c2c 100644 --- a/api/tasks.go +++ b/api/tasks.go @@ -455,7 +455,6 @@ type VolumeRequest struct { Type string `hcl:"type,optional"` Source string `hcl:"source,optional"` ReadOnly bool `hcl:"read_only,optional"` - Sticky bool `hcl:"sticky,optional"` AccessMode string `hcl:"access_mode,optional"` AttachmentMode string `hcl:"attachment_mode,optional"` MountOptions *CSIMountOptions `hcl:"mount_options,block"` @@ -1112,6 +1111,17 @@ type TaskState struct { StartedAt time.Time FinishedAt time.Time Events []*TaskEvent + + // Experimental - TaskHandle is based on drivers.TaskHandle and used + // by remote task drivers to migrate task handles between allocations. + TaskHandle *TaskHandle +} + +// Experimental - TaskHandle is based on drivers.TaskHandle and used by remote +// task drivers to migrate task handles between allocations. +type TaskHandle struct { + Version int + DriverState []byte } const ( diff --git a/api/util_test.go b/api/util_test.go index 769027020da..0e179b307a6 100644 --- a/api/util_test.go +++ b/api/util_test.go @@ -116,7 +116,7 @@ func testQuotaSpec() *QuotaSpec { Limits: []*QuotaLimit{ { Region: "global", - RegionLimit: &QuotaResources{ + RegionLimit: &Resources{ CPU: pointerOf(2000), MemoryMB: pointerOf(2000), Devices: []*RequestedDevice{{ diff --git a/ci/test-core.json b/ci/test-core.json index 5ec461809ed..95f354fbe99 100644 --- a/ci/test-core.json +++ b/ci/test-core.json @@ -17,7 +17,6 @@ "client/dynamicplugins/...", "client/fingerprint/...", "client/hoststats/...", - "client/hostvolumemanager/...", "client/interfaces/...", "client/lib/...", "client/logmon/...", diff --git a/client/acl.go b/client/acl.go index 914f4fe71de..fee8472d403 100644 --- a/client/acl.go +++ b/client/acl.go @@ -6,7 +6,7 @@ package client import ( "time" - metrics "github.com/hashicorp/go-metrics/compat" + "github.com/armon/go-metrics" "github.com/hashicorp/go-set/v3" "github.com/hashicorp/nomad/acl" "github.com/hashicorp/nomad/nomad/structs" diff --git a/client/agent_endpoint.go b/client/agent_endpoint.go index a47dcde051a..4d676bc3129 100644 --- a/client/agent_endpoint.go +++ b/client/agent_endpoint.go @@ -18,8 +18,8 @@ import ( "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/nomad/structs" + metrics "github.com/armon/go-metrics" log "github.com/hashicorp/go-hclog" - metrics "github.com/hashicorp/go-metrics/compat" sframer "github.com/hashicorp/nomad/client/lib/streamframer" cstructs "github.com/hashicorp/nomad/client/structs" diff --git a/client/alloc_endpoint.go b/client/alloc_endpoint.go index 3a7fde44546..e8a98745f56 100644 --- a/client/alloc_endpoint.go +++ b/client/alloc_endpoint.go @@ -12,7 +12,7 @@ import ( "net/http" "time" - metrics "github.com/hashicorp/go-metrics/compat" + "github.com/armon/go-metrics" "github.com/hashicorp/go-msgpack/v2/codec" "github.com/hashicorp/nomad/acl" cstructs "github.com/hashicorp/nomad/client/structs" diff --git a/client/allocrunner/alloc_runner.go b/client/allocrunner/alloc_runner.go index 095b93cb578..9b395984545 100644 --- a/client/allocrunner/alloc_runner.go +++ b/client/allocrunner/alloc_runner.go @@ -10,8 +10,8 @@ import ( "sync" "time" + "github.com/armon/go-metrics" log "github.com/hashicorp/go-hclog" - metrics "github.com/hashicorp/go-metrics/compat" multierror "github.com/hashicorp/go-multierror" "github.com/hashicorp/nomad/client/allocdir" "github.com/hashicorp/nomad/client/allocrunner/hookstats" diff --git a/client/allocrunner/alloc_runner_test.go b/client/allocrunner/alloc_runner_test.go index af967985463..591aa596ec5 100644 --- a/client/allocrunner/alloc_runner_test.go +++ b/client/allocrunner/alloc_runner_test.go @@ -12,8 +12,8 @@ import ( "testing" "time" + "github.com/armon/go-metrics" "github.com/hashicorp/consul/api" - metrics "github.com/hashicorp/go-metrics/compat" multierror "github.com/hashicorp/go-multierror" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/allochealth" diff --git a/client/allocrunner/hookstats/hookstats.go b/client/allocrunner/hookstats/hookstats.go index 8b6543ae181..583c441419c 100644 --- a/client/allocrunner/hookstats/hookstats.go +++ b/client/allocrunner/hookstats/hookstats.go @@ -6,7 +6,7 @@ package hookstats import ( "time" - metrics "github.com/hashicorp/go-metrics/compat" + "github.com/armon/go-metrics" "github.com/hashicorp/nomad/client/allocrunner/interfaces" ) diff --git a/client/allocrunner/hookstats/hookstats_test.go b/client/allocrunner/hookstats/hookstats_test.go index 834f01f3b04..ecc4d3f5794 100644 --- a/client/allocrunner/hookstats/hookstats_test.go +++ b/client/allocrunner/hookstats/hookstats_test.go @@ -8,7 +8,7 @@ import ( "testing" "time" - metrics "github.com/hashicorp/go-metrics/compat" + "github.com/armon/go-metrics" "github.com/hashicorp/nomad/ci" "github.com/shoenig/test/must" ) diff --git a/client/allocrunner/networking_iptables_test.go b/client/allocrunner/networking_iptables_test.go index fc9d4ce3fb5..c7751892cb9 100644 --- a/client/allocrunner/networking_iptables_test.go +++ b/client/allocrunner/networking_iptables_test.go @@ -1,8 +1,6 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: BUSL-1.1 -//go:build linux - package allocrunner import ( diff --git a/client/allocrunner/taskrunner/remotetask_hook.go b/client/allocrunner/taskrunner/remotetask_hook.go new file mode 100644 index 00000000000..edbbda48b62 --- /dev/null +++ b/client/allocrunner/taskrunner/remotetask_hook.go @@ -0,0 +1,128 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package taskrunner + +import ( + "context" + + hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/nomad/client/allocrunner/interfaces" + "github.com/hashicorp/nomad/nomad/structs" + "github.com/hashicorp/nomad/plugins/drivers" +) + +var _ interfaces.TaskPrestartHook = (*remoteTaskHook)(nil) +var _ interfaces.TaskPreKillHook = (*remoteTaskHook)(nil) + +// remoteTaskHook reattaches to remotely executing tasks. +// Deprecated: remote tasks drivers are no longer developed or supported. +type remoteTaskHook struct { + tr *TaskRunner + + logger hclog.Logger +} + +func newRemoteTaskHook(tr *TaskRunner, logger hclog.Logger) interfaces.TaskHook { + h := &remoteTaskHook{ + tr: tr, + } + h.logger = logger.Named(h.Name()) + return h +} + +func (h *remoteTaskHook) Name() string { + return "remote_task" +} + +// Prestart performs 2 remote task driver related tasks: +// 1. If there is no local handle, see if there is a handle propagated from a +// previous alloc to be restored. +// 2. If the alloc is lost make sure the task signal is set to detach instead +// of kill. +func (h *remoteTaskHook) Prestart(ctx context.Context, req *interfaces.TaskPrestartRequest, resp *interfaces.TaskPrestartResponse) error { + if h.tr.getDriverHandle() != nil { + // Driver handle already exists so don't try to load remote + // task handle + return nil + } + + h.tr.stateLock.Lock() + th := drivers.NewTaskHandleFromState(h.tr.state) + h.tr.stateLock.Unlock() + + // Task handle will be nil if there was no previous allocation or if + // this is a destructive update + if th == nil { + resp.Done = true + return nil + } + + // The task config is unique per invocation so recreate it here + th.Config = h.tr.buildTaskConfig() + + if err := h.tr.driver.RecoverTask(th); err != nil { + // Soft error here to let a new instance get started instead of + // failing the task since retrying is unlikely to help. + h.logger.Error("error recovering task state", "error", err) + return nil + } + + taskInfo, err := h.tr.driver.InspectTask(th.Config.ID) + if err != nil { + // Soft error here to let a new instance get started instead of + // failing the task since retrying is unlikely to help. + h.logger.Error("error inspecting recovered task state", "error", err) + return nil + } + + h.tr.setDriverHandle(NewDriverHandle(h.tr.driver, th.Config.ID, h.tr.Task(), h.tr.clientConfig.MaxKillTimeout, taskInfo.NetworkOverride)) + + h.tr.stateLock.Lock() + h.tr.localState.TaskHandle = th + h.tr.localState.DriverNetwork = taskInfo.NetworkOverride + h.tr.stateLock.Unlock() + + // Ensure the signal is set according to the allocation's state + h.setSignal(h.tr.Alloc()) + + // Emit TaskStarted manually since the normal task runner logic will + // treat this task like a restored task and skip emitting started. + h.tr.UpdateState(structs.TaskStateRunning, structs.NewTaskEvent(structs.TaskStarted)) + + return nil +} + +// PreKilling tells the remote task driver to detach a remote task instead of +// stopping it. +func (h *remoteTaskHook) PreKilling(ctx context.Context, req *interfaces.TaskPreKillRequest, resp *interfaces.TaskPreKillResponse) error { + alloc := h.tr.Alloc() + h.setSignal(alloc) + return nil +} + +// setSignal to detach if the allocation is lost or draining. Safe to call +// multiple times as it only transitions to using detach -- never back to kill. +func (h *remoteTaskHook) setSignal(alloc *structs.Allocation) { + driverHandle := h.tr.getDriverHandle() + if driverHandle == nil { + // Nothing to do exit early + return + } + + switch { + case alloc.ClientStatus == structs.AllocClientStatusLost: + // Continue on; lost allocs should just detach + h.logger.Debug("detaching from remote task since alloc was lost") + case alloc.DesiredTransition.ShouldMigrate(): + // Continue on; migrating allocs should just detach + h.logger.Debug("detaching from remote task since alloc was drained") + default: + // Nothing to do exit early + return + } + + // Set DetachSignal to indicate to the remote task driver that it + // should detach this remote task and ignore it. + driverHandle.SetKillSignal(drivers.DetachSignal) +} diff --git a/client/allocrunner/taskrunner/sids_hook_test.go b/client/allocrunner/taskrunner/sids_hook_test.go index 35d0b88294c..0df13f3cb5d 100644 --- a/client/allocrunner/taskrunner/sids_hook_test.go +++ b/client/allocrunner/taskrunner/sids_hook_test.go @@ -1,8 +1,8 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: BUSL-1.1 -//go:build linux -// +build linux +//go:build !windows +// +build !windows // todo(shoenig): Once Connect is supported on Windows, we'll need to make this // set of tests work there too. diff --git a/client/allocrunner/taskrunner/task_runner.go b/client/allocrunner/taskrunner/task_runner.go index 0719c021309..b93b75e3d83 100644 --- a/client/allocrunner/taskrunner/task_runner.go +++ b/client/allocrunner/taskrunner/task_runner.go @@ -12,8 +12,8 @@ import ( "sync" "time" + metrics "github.com/armon/go-metrics" log "github.com/hashicorp/go-hclog" - metrics "github.com/hashicorp/go-metrics/compat" multierror "github.com/hashicorp/go-multierror" "github.com/hashicorp/hcl/v2/hcldec" "github.com/hashicorp/nomad/client/allocdir" @@ -1364,6 +1364,12 @@ func (tr *TaskRunner) UpdateState(state string, event *structs.TaskEvent) { tr.logger.Error("error persisting task state", "error", err, "event", event, "state", state) } + // Store task handle for remote tasks + if tr.driverCapabilities != nil && tr.driverCapabilities.RemoteTasks { + tr.logger.Trace("storing remote task handle state") + tr.localState.TaskHandle.Store(tr.state) + } + // Notify the alloc runner of the transition tr.stateUpdater.TaskStateUpdated() } diff --git a/client/allocrunner/taskrunner/task_runner_hooks.go b/client/allocrunner/taskrunner/task_runner_hooks.go index b49b686d2a6..86b1cebd41a 100644 --- a/client/allocrunner/taskrunner/task_runner_hooks.go +++ b/client/allocrunner/taskrunner/task_runner_hooks.go @@ -187,6 +187,12 @@ func (tr *TaskRunner) initHooks() { logger: hookLogger, })) + // If this task driver has remote capabilities, add the remote task + // hook. + if tr.driverCapabilities.RemoteTasks { + tr.runnerHooks = append(tr.runnerHooks, newRemoteTaskHook(tr, hookLogger)) + } + // If this task has a pause schedule, initialize the pause (Enterprise) if task.Schedule != nil { tr.runnerHooks = append(tr.runnerHooks, newPauseHook(tr, hookLogger)) diff --git a/client/allocrunner/taskrunner/task_runner_test.go b/client/allocrunner/taskrunner/task_runner_test.go index 5ec2f7c5d8d..3430e3a3e80 100644 --- a/client/allocrunner/taskrunner/task_runner_test.go +++ b/client/allocrunner/taskrunner/task_runner_test.go @@ -1,8 +1,6 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: BUSL-1.1 -//go:build linux - package taskrunner import ( @@ -20,9 +18,9 @@ import ( "testing" "time" + "github.com/armon/go-metrics" "github.com/golang/snappy" consulapi "github.com/hashicorp/consul/api" - metrics "github.com/hashicorp/go-metrics/compat" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/allocdir" "github.com/hashicorp/nomad/client/allocrunner/hookstats" diff --git a/client/allocrunner/taskrunner/tasklet.go b/client/allocrunner/taskrunner/tasklet.go index 553a2cd10a6..c402f7dcbbd 100644 --- a/client/allocrunner/taskrunner/tasklet.go +++ b/client/allocrunner/taskrunner/tasklet.go @@ -7,8 +7,8 @@ import ( "context" "time" + metrics "github.com/armon/go-metrics" log "github.com/hashicorp/go-hclog" - metrics "github.com/hashicorp/go-metrics/compat" "github.com/hashicorp/nomad/client/allocrunner/taskrunner/interfaces" ) diff --git a/client/client.go b/client/client.go index 608efcb5ccb..0a8d325d55f 100644 --- a/client/client.go +++ b/client/client.go @@ -18,9 +18,9 @@ import ( "sync" "time" + metrics "github.com/armon/go-metrics" consulapi "github.com/hashicorp/consul/api" hclog "github.com/hashicorp/go-hclog" - metrics "github.com/hashicorp/go-metrics/compat" multierror "github.com/hashicorp/go-multierror" "github.com/hashicorp/nomad/client/allocdir" "github.com/hashicorp/nomad/client/allocrunner" @@ -34,7 +34,6 @@ import ( "github.com/hashicorp/nomad/client/dynamicplugins" "github.com/hashicorp/nomad/client/fingerprint" "github.com/hashicorp/nomad/client/hoststats" - hvm "github.com/hashicorp/nomad/client/hostvolumemanager" cinterfaces "github.com/hashicorp/nomad/client/interfaces" "github.com/hashicorp/nomad/client/lib/cgroupslib" "github.com/hashicorp/nomad/client/lib/numalib" @@ -291,8 +290,6 @@ type Client struct { // drivermanager is responsible for managing driver plugins drivermanager drivermanager.Manager - hostVolumeManager *hvm.HostVolumeManager - // baseLabels are used when emitting tagged metrics. All client metrics will // have these tags, and optionally more. baseLabels []metrics.Label @@ -409,11 +406,9 @@ func NewClient(cfg *config.Config, consulCatalog consul.CatalogAPI, consulProxie } c.batchNodeUpdates = newBatchNodeUpdates( - c.logger, c.updateNodeFromDriver, c.updateNodeFromDevices, c.updateNodeFromCSI, - c.updateNodeFromHostVol, ) // Initialize the server manager @@ -538,16 +533,6 @@ func NewClient(cfg *config.Config, consulCatalog consul.CatalogAPI, consulProxie c.devicemanager = devManager c.pluginManagers.RegisterAndRun(devManager) - // set up dynamic host volume manager - c.hostVolumeManager = hvm.NewHostVolumeManager(logger, hvm.Config{ - PluginDir: c.GetConfig().HostVolumePluginDir, - VolumesDir: c.GetConfig().HostVolumesDir, - NodePool: c.Node().NodePool, - StateMgr: c.stateDB, - UpdateNodeVols: c.batchNodeUpdates.updateNodeFromHostVolume, - }) - c.pluginManagers.RegisterAndRun(c.hostVolumeManager) - // Set up the service registration wrapper using the Consul and Nomad // implementations. The Nomad implementation is only ever used on the // client, so we do that here rather than within the agent. @@ -704,13 +689,6 @@ func (c *Client) init() error { c.stateDB = db - // Ensure host_volumes_dir config is not empty. - if conf.HostVolumesDir == "" { - conf = c.UpdateConfig(func(c *config.Config) { - c.HostVolumesDir = filepath.Join(conf.StateDir, "host_volumes") - }) - } - // Ensure the alloc mounts dir exists if we are configured with a custom path. if conf.AllocMountsDir != "" { if err := os.MkdirAll(conf.AllocMountsDir, 0o711); err != nil { @@ -1597,12 +1575,14 @@ func (c *Client) setupNode() error { } node.CgroupParent = newConfig.CgroupParent if node.HostVolumes == nil { - node.HostVolumes = make(map[string]*structs.ClientHostVolumeConfig, len(newConfig.HostVolumes)) - for k, v := range newConfig.HostVolumes { - if _, err := os.Stat(v.Path); err != nil { - return fmt.Errorf("failed to validate volume %s, err: %w", v.Name, err) + if l := len(newConfig.HostVolumes); l != 0 { + node.HostVolumes = make(map[string]*structs.ClientHostVolumeConfig, l) + for k, v := range newConfig.HostVolumes { + if _, err := os.Stat(v.Path); err != nil { + return fmt.Errorf("failed to validate volume %s, err: %v", v.Name, err) + } + node.HostVolumes[k] = v.Copy() } - node.HostVolumes[k] = v.Copy() } } if node.HostNetworks == nil { diff --git a/client/client_stats_endpoint.go b/client/client_stats_endpoint.go index c2f763889e0..ac5ddc81e9c 100644 --- a/client/client_stats_endpoint.go +++ b/client/client_stats_endpoint.go @@ -6,7 +6,7 @@ package client import ( "time" - metrics "github.com/hashicorp/go-metrics/compat" + metrics "github.com/armon/go-metrics" "github.com/hashicorp/nomad/client/structs" nstructs "github.com/hashicorp/nomad/nomad/structs" ) diff --git a/client/client_test.go b/client/client_test.go index bc3d3e04826..40a58033233 100644 --- a/client/client_test.go +++ b/client/client_test.go @@ -14,8 +14,8 @@ import ( "testing" "time" + "github.com/armon/go-metrics" memdb "github.com/hashicorp/go-memdb" - metrics "github.com/hashicorp/go-metrics/compat" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/allocrunner" "github.com/hashicorp/nomad/client/allocrunner/interfaces" diff --git a/client/config/arconfig.go b/client/config/arconfig.go index 8f29e803dae..e2e7f16e7e7 100644 --- a/client/config/arconfig.go +++ b/client/config/arconfig.go @@ -6,8 +6,8 @@ package config import ( "context" + "github.com/armon/go-metrics" log "github.com/hashicorp/go-hclog" - metrics "github.com/hashicorp/go-metrics/compat" "github.com/hashicorp/nomad/client/allocdir" arinterfaces "github.com/hashicorp/nomad/client/allocrunner/interfaces" "github.com/hashicorp/nomad/client/consul" diff --git a/client/config/config.go b/client/config/config.go index 5666cb8c839..03c861b0075 100644 --- a/client/config/config.go +++ b/client/config/config.go @@ -317,13 +317,6 @@ type Config struct { // HostVolumes is a map of the configured host volumes by name. HostVolumes map[string]*structs.ClientHostVolumeConfig - // HostVolumesDir is the suggested directory for plugins to put volumes. - // Volume plugins may ignore this suggestion, but we provide this default. - HostVolumesDir string - - // HostVolumePluginDir is the directory with dynamic host volume plugins. - HostVolumePluginDir string - // HostNetworks is a map of the conigured host networks by name. HostNetworks map[string]*structs.ClientHostNetworkConfig diff --git a/client/csi_endpoint.go b/client/csi_endpoint.go index 0135f0a8fbc..9d32f27cd1e 100644 --- a/client/csi_endpoint.go +++ b/client/csi_endpoint.go @@ -9,8 +9,8 @@ import ( "fmt" "time" + metrics "github.com/armon/go-metrics" grpc_retry "github.com/grpc-ecosystem/go-grpc-middleware/retry" - metrics "github.com/hashicorp/go-metrics/compat" "github.com/hashicorp/nomad/client/dynamicplugins" "github.com/hashicorp/nomad/client/pluginmanager/csimanager" diff --git a/client/csi_endpoint_test.go b/client/csi_endpoint_test.go index e05fac36b33..e9b2cc267ad 100644 --- a/client/csi_endpoint_test.go +++ b/client/csi_endpoint_test.go @@ -80,7 +80,7 @@ func TestCSIController_AttachVolume(t *testing.T) { VolumeID: "1234-4321-1234-4321", ClientCSINodeID: "abcde", AttachmentMode: nstructs.CSIVolumeAttachmentModeFilesystem, - AccessMode: nstructs.VolumeAccessMode("foo"), + AccessMode: nstructs.CSIVolumeAccessMode("foo"), }, ExpectedErr: errors.New("CSI.ControllerAttachVolume: unknown volume access mode: foo"), }, @@ -93,7 +93,7 @@ func TestCSIController_AttachVolume(t *testing.T) { VolumeID: "1234-4321-1234-4321", ClientCSINodeID: "abcde", AccessMode: nstructs.CSIVolumeAccessModeMultiNodeReader, - AttachmentMode: nstructs.VolumeAttachmentMode("bar"), + AttachmentMode: nstructs.CSIVolumeAttachmentMode("bar"), }, ExpectedErr: errors.New("CSI.ControllerAttachVolume: unknown volume attachment mode: bar"), }, @@ -217,7 +217,7 @@ func TestCSIController_ValidateVolume(t *testing.T) { }, VolumeID: "1234-4321-1234-4321", VolumeCapabilities: []*nstructs.CSIVolumeCapability{{ - AttachmentMode: nstructs.VolumeAttachmentMode("bar"), + AttachmentMode: nstructs.CSIVolumeAttachmentMode("bar"), AccessMode: nstructs.CSIVolumeAccessModeMultiNodeReader, }}, }, @@ -232,7 +232,7 @@ func TestCSIController_ValidateVolume(t *testing.T) { VolumeID: "1234-4321-1234-4321", VolumeCapabilities: []*nstructs.CSIVolumeCapability{{ AttachmentMode: nstructs.CSIVolumeAttachmentModeFilesystem, - AccessMode: nstructs.VolumeAccessMode("foo"), + AccessMode: nstructs.CSIVolumeAccessMode("foo"), }}, }, ExpectedErr: errors.New("CSI.ControllerValidateVolume: unknown volume access mode: foo"), @@ -395,7 +395,7 @@ func TestCSIController_CreateVolume(t *testing.T) { VolumeCapabilities: []*nstructs.CSIVolumeCapability{ { AttachmentMode: nstructs.CSIVolumeAttachmentModeFilesystem, - AccessMode: nstructs.VolumeAccessMode("foo"), + AccessMode: nstructs.CSIVolumeAccessMode("foo"), }, }, }, @@ -411,7 +411,7 @@ func TestCSIController_CreateVolume(t *testing.T) { VolumeCapabilities: []*nstructs.CSIVolumeCapability{ { AccessMode: nstructs.CSIVolumeAccessModeMultiNodeReader, - AttachmentMode: nstructs.VolumeAttachmentMode("bar"), + AttachmentMode: nstructs.CSIVolumeAttachmentMode("bar"), }, }, }, diff --git a/client/fingerprint/dynamic_host_volumes.go b/client/fingerprint/dynamic_host_volumes.go deleted file mode 100644 index 290733ce81d..00000000000 --- a/client/fingerprint/dynamic_host_volumes.go +++ /dev/null @@ -1,120 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package fingerprint - -import ( - "context" - "os" - "strings" - "sync" - "time" - - "github.com/hashicorp/go-hclog" - hvm "github.com/hashicorp/nomad/client/hostvolumemanager" - "github.com/hashicorp/nomad/helper" -) - -func NewPluginsHostVolumeFingerprint(logger hclog.Logger) Fingerprint { - return &DynamicHostVolumePluginFingerprint{ - logger: logger.Named("host_volume_plugins"), - } -} - -var _ ReloadableFingerprint = &DynamicHostVolumePluginFingerprint{} - -type DynamicHostVolumePluginFingerprint struct { - logger hclog.Logger -} - -func (h *DynamicHostVolumePluginFingerprint) Reload() { - // host volume plugins are re-detected on agent reload -} - -func (h *DynamicHostVolumePluginFingerprint) Fingerprint(request *FingerprintRequest, response *FingerprintResponse) error { - // always add "mkdir" plugin - h.logger.Debug("detected plugin built-in", - "plugin_id", hvm.HostVolumePluginMkdirID, "version", hvm.HostVolumePluginMkdirVersion) - defer response.AddAttribute("plugins.host_volume."+hvm.HostVolumePluginMkdirID+".version", hvm.HostVolumePluginMkdirVersion) - response.Detected = true - - // this config value will be empty in -dev mode - pluginDir := request.Config.HostVolumePluginDir - if pluginDir == "" { - return nil - } - - plugins, err := GetHostVolumePluginVersions(h.logger, pluginDir, request.Node.NodePool) - if err != nil { - if os.IsNotExist(err) { - h.logger.Debug("plugin dir does not exist", "dir", pluginDir) - } else { - h.logger.Warn("error finding plugins", "dir", pluginDir, "error", err) - } - return nil // don't halt agent start - } - - // if this was a reload, wipe what was there before - for k := range request.Node.Attributes { - if strings.HasPrefix(k, "plugins.host_volume.") { - response.RemoveAttribute(k) - } - } - - // set the attribute(s) - for plugin, version := range plugins { - h.logger.Debug("detected plugin", "plugin_id", plugin, "version", version) - response.AddAttribute("plugins.host_volume."+plugin+".version", version) - } - - return nil -} - -func (h *DynamicHostVolumePluginFingerprint) Periodic() (bool, time.Duration) { - return false, 0 -} - -// GetHostVolumePluginVersions finds all the executable files on disk that -// respond to a `fingerprint` call. The return map's keys are plugin IDs, -// and the values are version strings. -func GetHostVolumePluginVersions(log hclog.Logger, pluginDir, nodePool string) (map[string]string, error) { - files, err := helper.FindExecutableFiles(pluginDir) - if err != nil { - return nil, err - } - - plugins := make(map[string]string) - mut := sync.Mutex{} - var wg sync.WaitGroup - - for file := range files { - wg.Add(1) - go func(file string) { - defer wg.Done() - // really should take way less than a second - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() - - log := log.With("plugin_id", file) - - p, err := hvm.NewHostVolumePluginExternal(log, pluginDir, file, "", nodePool) - if err != nil { - log.Warn("error getting plugin", "error", err) - return - } - - fprint, err := p.Fingerprint(ctx) - if err != nil { - log.Debug("failed to get version from plugin", "error", err) - return - } - - mut.Lock() - plugins[file] = fprint.Version.String() - mut.Unlock() - }(file) - } - - wg.Wait() - return plugins, nil -} diff --git a/client/fingerprint/dynamic_host_volumes_test.go b/client/fingerprint/dynamic_host_volumes_test.go deleted file mode 100644 index 28b331bcfc1..00000000000 --- a/client/fingerprint/dynamic_host_volumes_test.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package fingerprint - -import ( - "os" - "path/filepath" - "runtime" - "testing" - - "github.com/hashicorp/nomad/client/config" - hvm "github.com/hashicorp/nomad/client/hostvolumemanager" - "github.com/hashicorp/nomad/helper/testlog" - "github.com/hashicorp/nomad/nomad/structs" - "github.com/shoenig/test/must" -) - -// this is more of a full integration test of: -// fingerprint <- find plugins <- find executables -func TestPluginsHostVolumeFingerprint(t *testing.T) { - cfg := &config.Config{HostVolumePluginDir: ""} - node := &structs.Node{Attributes: map[string]string{}} - req := &FingerprintRequest{Config: cfg, Node: node} - fp := NewPluginsHostVolumeFingerprint(testlog.HCLogger(t)) - - // this fingerprint is not mandatory, so no error should be returned - for name, path := range map[string]string{ - "empty": "", - "non-existent": "/nowhere", - "impossible": "dynamic_host_volumes_test.go", - } { - t.Run(name, func(t *testing.T) { - resp := FingerprintResponse{} - cfg.HostVolumePluginDir = path - err := fp.Fingerprint(req, &resp) - must.NoError(t, err) - must.True(t, resp.Detected) // always true due to "mkdir" built-in - }) - } - - if runtime.GOOS == "windows" { - t.Skip("test scripts not built for windows") // db TODO(1.10.0) - } - - // happy path: dir exists. this one will contain a single valid plugin. - tmp := t.TempDir() - cfg.HostVolumePluginDir = tmp - - files := []struct { - name string - contents string - perm os.FileMode - }{ - // only this first one should be detected as a valid plugin - {"happy-plugin", "#!/usr/bin/env sh\necho '{\"version\": \"0.0.1\"}'", 0700}, - {"not-a-plugin", "#!/usr/bin/env sh\necho 'not a version'", 0700}, - {"unhappy-plugin", "#!/usr/bin/env sh\necho 'sad plugin is sad'; exit 1", 0700}, - {"not-executable", "do not execute me", 0400}, - } - for _, f := range files { - must.NoError(t, os.WriteFile(filepath.Join(tmp, f.name), []byte(f.contents), f.perm)) - } - // directories should be ignored - must.NoError(t, os.Mkdir(filepath.Join(tmp, "a-directory"), 0700)) - - // do the fingerprint - resp := FingerprintResponse{} - err := fp.Fingerprint(req, &resp) - must.NoError(t, err) - must.Eq(t, map[string]string{ - "plugins.host_volume.mkdir.version": hvm.HostVolumePluginMkdirVersion, // built-in - "plugins.host_volume.happy-plugin.version": "0.0.1", - }, resp.Attributes) - - // do it again after deleting our one good plugin. - // repeat runs should wipe attributes, so nothing should remain. - node.Attributes = resp.Attributes - must.NoError(t, os.Remove(filepath.Join(tmp, "happy-plugin"))) - - resp = FingerprintResponse{} - err = fp.Fingerprint(req, &resp) - must.NoError(t, err) - must.Eq(t, map[string]string{ - "plugins.host_volume.happy-plugin.version": "", // empty value means removed - - "plugins.host_volume.mkdir.version": hvm.HostVolumePluginMkdirVersion, // built-in - }, resp.Attributes) -} diff --git a/client/fingerprint/fingerprint.go b/client/fingerprint/fingerprint.go index 9e721ffe730..9d8ff07cf57 100644 --- a/client/fingerprint/fingerprint.go +++ b/client/fingerprint/fingerprint.go @@ -32,20 +32,19 @@ var ( // hostFingerprinters contains the host fingerprints which are available for a // given platform. hostFingerprinters = map[string]Factory{ - "arch": NewArchFingerprint, - "consul": NewConsulFingerprint, - "cni": NewCNIFingerprint, // networks - "cpu": NewCPUFingerprint, - "host": NewHostFingerprint, - "landlock": NewLandlockFingerprint, - "memory": NewMemoryFingerprint, - "network": NewNetworkFingerprint, - "nomad": NewNomadFingerprint, - "plugins_cni": NewPluginsCNIFingerprint, - "host_volume_plugins": NewPluginsHostVolumeFingerprint, - "signal": NewSignalFingerprint, - "storage": NewStorageFingerprint, - "vault": NewVaultFingerprint, + "arch": NewArchFingerprint, + "consul": NewConsulFingerprint, + "cni": NewCNIFingerprint, // networks + "cpu": NewCPUFingerprint, + "host": NewHostFingerprint, + "landlock": NewLandlockFingerprint, + "memory": NewMemoryFingerprint, + "network": NewNetworkFingerprint, + "nomad": NewNomadFingerprint, + "plugins_cni": NewPluginsCNIFingerprint, + "signal": NewSignalFingerprint, + "storage": NewStorageFingerprint, + "vault": NewVaultFingerprint, } // envFingerprinters contains the fingerprints that are environment specific. diff --git a/client/fingerprint/zstorage_windows.go b/client/fingerprint/zstorage_windows.go index adf8de77aaa..21b34771732 100644 --- a/client/fingerprint/zstorage_windows.go +++ b/client/fingerprint/zstorage_windows.go @@ -5,10 +5,8 @@ package fingerprint -import ( - "syscall" - "unsafe" -) +import "unsafe" +import "syscall" var _ unsafe.Pointer diff --git a/client/fs_endpoint.go b/client/fs_endpoint.go index 693dd4d981a..33f0cdb085b 100644 --- a/client/fs_endpoint.go +++ b/client/fs_endpoint.go @@ -18,7 +18,7 @@ import ( "syscall" "time" - metrics "github.com/hashicorp/go-metrics/compat" + metrics "github.com/armon/go-metrics" "github.com/hashicorp/go-msgpack/v2/codec" "github.com/hpcloud/tail/watch" diff --git a/client/host_volume_endpoint.go b/client/host_volume_endpoint.go deleted file mode 100644 index 690d28d2654..00000000000 --- a/client/host_volume_endpoint.go +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package client - -import ( - "context" - "time" - - metrics "github.com/hashicorp/go-metrics/compat" - cstructs "github.com/hashicorp/nomad/client/structs" -) - -type HostVolume struct { - c *Client -} - -func newHostVolumesEndpoint(c *Client) *HostVolume { - v := &HostVolume{c: c} - return v -} - -var hostVolumeRequestTimeout = time.Minute - -func (v *HostVolume) Create( - req *cstructs.ClientHostVolumeCreateRequest, - resp *cstructs.ClientHostVolumeCreateResponse) error { - - defer metrics.MeasureSince([]string{"client", "host_volume", "create"}, time.Now()) - ctx, cancelFn := v.requestContext() - defer cancelFn() - - cresp, err := v.c.hostVolumeManager.Create(ctx, req) - if err != nil { - v.c.logger.Error("failed to create host volume", "name", req.Name, "error", err) - return err - } - - resp.CapacityBytes = cresp.CapacityBytes - resp.HostPath = cresp.HostPath - - v.c.logger.Info("created host volume", "id", req.ID, "path", resp.HostPath) - return nil -} - -func (v *HostVolume) Register( - req *cstructs.ClientHostVolumeRegisterRequest, - resp *cstructs.ClientHostVolumeRegisterResponse) error { - - defer metrics.MeasureSince([]string{"client", "host_volume", "register"}, time.Now()) - ctx, cancelFn := v.requestContext() - defer cancelFn() - - err := v.c.hostVolumeManager.Register(ctx, req) - if err != nil { - v.c.logger.Error("failed to register host volume", "name", req.Name, "error", err) - return err - } - - v.c.logger.Info("registered host volume", "id", req.ID, "path", req.HostPath) - return nil -} - -func (v *HostVolume) Delete( - req *cstructs.ClientHostVolumeDeleteRequest, - resp *cstructs.ClientHostVolumeDeleteResponse) error { - defer metrics.MeasureSince([]string{"client", "host_volume", "create"}, time.Now()) - ctx, cancelFn := v.requestContext() - defer cancelFn() - - _, err := v.c.hostVolumeManager.Delete(ctx, req) - if err != nil { - v.c.logger.Error("failed to delete host volume", "ID", req.ID, "error", err) - return err - } - - v.c.logger.Info("deleted host volume", "id", req.ID, "path", req.HostPath) - return nil -} - -func (v *HostVolume) requestContext() (context.Context, context.CancelFunc) { - return context.WithTimeout(context.Background(), hostVolumeRequestTimeout) -} diff --git a/client/host_volume_endpoint_test.go b/client/host_volume_endpoint_test.go deleted file mode 100644 index a1005e210d1..00000000000 --- a/client/host_volume_endpoint_test.go +++ /dev/null @@ -1,200 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package client - -import ( - "path/filepath" - "sort" - "testing" - - "github.com/hashicorp/nomad/ci" - hvm "github.com/hashicorp/nomad/client/hostvolumemanager" - "github.com/hashicorp/nomad/client/state" - cstructs "github.com/hashicorp/nomad/client/structs" - "github.com/hashicorp/nomad/helper/testlog" - "github.com/hashicorp/nomad/helper/uuid" - "github.com/shoenig/test/must" -) - -func TestHostVolume(t *testing.T) { - ci.Parallel(t) - - client, cleanup := TestClient(t, nil) - defer cleanup() - - memdb := state.NewMemDB(testlog.HCLogger(t)) - client.stateDB = memdb - - tmp := t.TempDir() - manager := hvm.NewHostVolumeManager(testlog.HCLogger(t), hvm.Config{ - StateMgr: client.stateDB, - UpdateNodeVols: client.updateNodeFromHostVol, - PluginDir: "/no/ext/plugins", - VolumesDir: tmp, - }) - client.hostVolumeManager = manager - hostPathCreate := filepath.Join(tmp, "test-vol-id-1") - hostPathRegister := t.TempDir() - - t.Run("happy", func(t *testing.T) { - - /* create */ - - req := &cstructs.ClientHostVolumeCreateRequest{ - Name: "created-volume", - ID: "test-vol-id-1", - PluginID: "mkdir", // real plugin really makes a dir - } - var resp cstructs.ClientHostVolumeCreateResponse - err := client.ClientRPC("HostVolume.Create", req, &resp) - must.NoError(t, err) - must.Eq(t, cstructs.ClientHostVolumeCreateResponse{ - HostPath: hostPathCreate, - CapacityBytes: 0, // "mkdir" always returns zero - }, resp) - // technically this is testing "mkdir" more than the RPC - must.DirExists(t, hostPathCreate) - // ensure we saved to client state - vols, err := memdb.GetDynamicHostVolumes() - must.NoError(t, err) - must.Len(t, 1, vols) - expectState := &cstructs.HostVolumeState{ - ID: req.ID, - CreateReq: req, - HostPath: hostPathCreate, - } - must.Eq(t, expectState, vols[0]) - // and should be fingerprinted - must.Eq(t, hvm.VolumeMap{ - req.Name: { - ID: req.ID, - Name: req.Name, - Path: hostPathCreate, - }, - }, client.Node().HostVolumes) - - /* register */ - - regReq := &cstructs.ClientHostVolumeRegisterRequest{ - ID: "test-vol-id-2", - Name: "registered-volume", - NodeID: uuid.Generate(), - HostPath: hostPathRegister, - CapacityBytes: 1000, - } - var regResp cstructs.ClientHostVolumeRegisterResponse - err = client.ClientRPC("HostVolume.Register", regReq, ®Resp) - must.NoError(t, err) - - // ensure we saved to client state - vols, err = memdb.GetDynamicHostVolumes() - must.NoError(t, err) - must.Len(t, 2, vols) - sort.Slice(vols, func(i, j int) bool { return vols[i].ID < vols[j].ID }) - expectState = &cstructs.HostVolumeState{ - ID: regReq.ID, - HostPath: hostPathRegister, - CreateReq: &cstructs.ClientHostVolumeCreateRequest{ - ID: regReq.ID, - Name: regReq.Name, - NodeID: regReq.NodeID, - }, - } - must.Eq(t, expectState, vols[1]) - // and should be fingerprinted - must.Eq(t, hvm.VolumeMap{ - req.Name: { - ID: req.ID, - Name: req.Name, - Path: hostPathCreate, - }, - regReq.Name: { - ID: regReq.ID, - Name: regReq.Name, - Path: hostPathRegister, - }, - }, client.Node().HostVolumes) - - /* delete */ - - delReq := &cstructs.ClientHostVolumeDeleteRequest{ - Name: "created-volume", - ID: "test-vol-id-1", - PluginID: "mkdir", - HostPath: hostPathCreate, - } - var delResp cstructs.ClientHostVolumeDeleteResponse - err = client.ClientRPC("HostVolume.Delete", delReq, &delResp) - must.NoError(t, err) - must.NotNil(t, delResp) - // again, actually testing the "mkdir" plugin - must.DirNotExists(t, hostPathCreate) - // client state should be deleted - vols, err = memdb.GetDynamicHostVolumes() - must.NoError(t, err) - must.Len(t, 1, vols) - // and the fingerprint, too - must.Eq(t, hvm.VolumeMap{ - regReq.Name: { - ID: regReq.ID, - Name: regReq.Name, - Path: hostPathRegister, - }, - }, client.Node().HostVolumes) - - delReq.Name = "registered-volume" - delReq.ID = "test-vol-id-2" - err = client.ClientRPC("HostVolume.Delete", delReq, &delResp) - must.NoError(t, err) - must.NotNil(t, delResp) - - vols, err = memdb.GetDynamicHostVolumes() - must.NoError(t, err) - must.Len(t, 0, vols) - must.Eq(t, hvm.VolumeMap{}, client.Node().HostVolumes) - }) - - t.Run("missing plugin", func(t *testing.T) { - req := &cstructs.ClientHostVolumeCreateRequest{ - PluginID: "non-existent", - } - var resp cstructs.ClientHostVolumeCreateResponse - err := client.ClientRPC("HostVolume.Create", req, &resp) - must.EqError(t, err, `no such plugin: "non-existent"`) - - delReq := &cstructs.ClientHostVolumeDeleteRequest{ - PluginID: "non-existent", - } - var delResp cstructs.ClientHostVolumeDeleteResponse - err = client.ClientRPC("HostVolume.Delete", delReq, &delResp) - must.EqError(t, err, `no such plugin: "non-existent"`) - }) - - t.Run("error from plugin", func(t *testing.T) { - // "mkdir" plugin can't create a directory within a file - client.hostVolumeManager = hvm.NewHostVolumeManager(testlog.HCLogger(t), hvm.Config{ - StateMgr: client.stateDB, - UpdateNodeVols: client.updateNodeFromHostVol, - PluginDir: "/no/ext/plugins", - VolumesDir: "host_volume_endpoint_test.go", - }) - - req := &cstructs.ClientHostVolumeCreateRequest{ - ID: "test-vol-id-1", - Name: "created-volume", - PluginID: "mkdir", - } - var resp cstructs.ClientHostVolumeCreateResponse - err := client.ClientRPC("HostVolume.Create", req, &resp) - must.ErrorContains(t, err, "host_volume_endpoint_test.go/test-vol-id-1: not a directory") - - delReq := &cstructs.ClientHostVolumeDeleteRequest{ - ID: "test-vol-id-1", - PluginID: "mkdir", - } - var delResp cstructs.ClientHostVolumeDeleteResponse - err = client.ClientRPC("HostVolume.Delete", delReq, &delResp) - must.ErrorContains(t, err, "host_volume_endpoint_test.go/test-vol-id-1: not a directory") - }) -} diff --git a/client/hostvolumemanager/host_volume_plugin.go b/client/hostvolumemanager/host_volume_plugin.go deleted file mode 100644 index c51336b416e..00000000000 --- a/client/hostvolumemanager/host_volume_plugin.go +++ /dev/null @@ -1,362 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package hostvolumemanager - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io" - "os" - "os/exec" - "path/filepath" - "time" - - "github.com/hashicorp/go-hclog" - "github.com/hashicorp/go-multierror" - "github.com/hashicorp/go-version" - cstructs "github.com/hashicorp/nomad/client/structs" - "github.com/hashicorp/nomad/helper" -) - -const ( - // environment variables for external plugins - - EnvOperation = "DHV_OPERATION" - EnvVolumesDir = "DHV_VOLUMES_DIR" - EnvPluginDir = "DHV_PLUGIN_DIR" - EnvCreatedPath = "DHV_CREATED_PATH" - EnvNamespace = "DHV_NAMESPACE" - EnvVolumeName = "DHV_VOLUME_NAME" - EnvVolumeID = "DHV_VOLUME_ID" - EnvNodeID = "DHV_NODE_ID" - EnvNodePool = "DHV_NODE_POOL" - EnvCapacityMin = "DHV_CAPACITY_MIN_BYTES" - EnvCapacityMax = "DHV_CAPACITY_MAX_BYTES" - EnvParameters = "DHV_PARAMETERS" -) - -// HostVolumePlugin manages the lifecycle of volumes. -type HostVolumePlugin interface { - Fingerprint(ctx context.Context) (*PluginFingerprint, error) - Create(ctx context.Context, req *cstructs.ClientHostVolumeCreateRequest) (*HostVolumePluginCreateResponse, error) - Delete(ctx context.Context, req *cstructs.ClientHostVolumeDeleteRequest) error -} - -// PluginFingerprint gets set on the node for volume scheduling. -// Plugins are expected to respond to 'fingerprint' calls with json that -// unmarshals to this struct. -type PluginFingerprint struct { - Version *version.Version `json:"version"` -} - -// HostVolumePluginCreateResponse gets stored on the volume in server state. -// Plugins are expected to respond to 'create' calls with json that -// unmarshals to this struct. -type HostVolumePluginCreateResponse struct { - Path string `json:"path"` - SizeBytes int64 `json:"bytes"` -} - -const HostVolumePluginMkdirID = "mkdir" -const HostVolumePluginMkdirVersion = "0.0.1" - -var _ HostVolumePlugin = &HostVolumePluginMkdir{} - -// HostVolumePluginMkdir is a plugin that creates a directory within the -// specified VolumesDir. It is built-in to Nomad, so is always available. -type HostVolumePluginMkdir struct { - ID string - VolumesDir string - - log hclog.Logger -} - -func (p *HostVolumePluginMkdir) Fingerprint(_ context.Context) (*PluginFingerprint, error) { - v, err := version.NewVersion(HostVolumePluginMkdirVersion) - return &PluginFingerprint{ - Version: v, - }, err -} - -func (p *HostVolumePluginMkdir) Create(_ context.Context, - req *cstructs.ClientHostVolumeCreateRequest) (*HostVolumePluginCreateResponse, error) { - - path := filepath.Join(p.VolumesDir, req.ID) - log := p.log.With( - "operation", "create", - "volume_id", req.ID, - "path", path) - log.Debug("running plugin") - - resp := &HostVolumePluginCreateResponse{ - Path: path, - // "mkdir" volumes, being simple directories, have unrestricted size - SizeBytes: 0, - } - - if _, err := os.Stat(path); err == nil { - // already exists - return resp, nil - } else if !os.IsNotExist(err) { - // doesn't exist, but some other path error - log.Debug("error with plugin", "error", err) - return nil, err - } - - err := os.MkdirAll(path, 0o700) - if err != nil { - log.Debug("error with plugin", "error", err) - return nil, err - } - - log.Debug("plugin ran successfully") - return resp, nil -} - -func (p *HostVolumePluginMkdir) Delete(_ context.Context, req *cstructs.ClientHostVolumeDeleteRequest) error { - path := filepath.Join(p.VolumesDir, req.ID) - log := p.log.With( - "operation", "delete", - "volume_id", req.ID, - "path", path) - log.Debug("running plugin") - - err := os.RemoveAll(path) - if err != nil { - log.Debug("error with plugin", "error", err) - return err - } - - log.Debug("plugin ran successfully") - return nil -} - -var _ HostVolumePlugin = &HostVolumePluginExternal{} - -// NewHostVolumePluginExternal returns an external host volume plugin -// if the specified executable exists on disk. -func NewHostVolumePluginExternal(log hclog.Logger, - pluginDir, filename, volumesDir, nodePool string) (*HostVolumePluginExternal, error) { - // this should only be called with already-detected executables, - // but we'll double-check it anyway, so we can provide a tidy error message - // if it has changed between fingerprinting and execution. - executable := filepath.Join(pluginDir, filename) - f, err := os.Stat(executable) - if err != nil { - if os.IsNotExist(err) { - return nil, fmt.Errorf("%w: %q", ErrPluginNotExists, filename) - } - return nil, err - } - if !helper.IsExecutable(f) { - return nil, fmt.Errorf("%w: %q", ErrPluginNotExecutable, filename) - } - return &HostVolumePluginExternal{ - ID: filename, - Executable: executable, - VolumesDir: volumesDir, - PluginDir: pluginDir, - NodePool: nodePool, - log: log, - }, nil -} - -// HostVolumePluginExternal calls an executable on disk. All operations -// *must* be idempotent, and safe to be called concurrently per volume. -// For each call, the executable's stdout and stderr may be logged, so plugin -// authors should not include any sensitive information in their plugin outputs. -type HostVolumePluginExternal struct { - ID string - Executable string - VolumesDir string - PluginDir string - NodePool string - - log hclog.Logger -} - -// Fingerprint calls the executable with the following parameters: -// arguments: $1=fingerprint -// environment: -// - DHV_OPERATION=fingerprint -// -// Response should be valid JSON on stdout, with a "version" key, e.g.: -// {"version": "0.0.1"} -// The version value should be a valid version number as allowed by -// version.NewVersion() -// -// Must complete within 5 seconds -func (p *HostVolumePluginExternal) Fingerprint(ctx context.Context) (*PluginFingerprint, error) { - ctx, cancel := context.WithTimeout(ctx, 5*time.Second) - defer cancel() - cmd := exec.CommandContext(ctx, p.Executable, "fingerprint") - cmd.Env = []string{EnvOperation + "=fingerprint"} - stdout, stderr, err := runCommand(cmd) - if err != nil { - p.log.Debug("error with plugin", - "operation", "version", - "stdout", string(stdout), - "stderr", string(stderr), - "error", err) - return nil, fmt.Errorf("error getting version from plugin %q: %w", p.ID, err) - } - fprint := &PluginFingerprint{} - if err := json.Unmarshal(stdout, fprint); err != nil { - return nil, fmt.Errorf("error parsing fingerprint output as json: %w", err) - } - return fprint, nil -} - -// Create calls the executable with the following parameters: -// arguments: $1=create -// environment: -// - DHV_OPERATION=create -// - DHV_VOLUMES_DIR={directory to put the volume in} -// - DHV_PLUGIN_DIR={path to directory containing plugins} -// - DHV_NAMESPACE={volume namespace} -// - DHV_VOLUME_NAME={name from the volume specification} -// - DHV_VOLUME_ID={volume ID generated by Nomad} -// - DHV_NODE_ID={Nomad node ID} -// - DHV_NODE_POOL={Nomad node pool} -// - DHV_CAPACITY_MIN_BYTES={capacity_min from the volume spec, expressed in bytes} -// - DHV_CAPACITY_MAX_BYTES={capacity_max from the volume spec, expressed in bytes} -// - DHV_PARAMETERS={stringified json of parameters from the volume spec} -// -// Response should be valid JSON on stdout with "path" and "bytes", e.g.: -// {"path": "/path/that/was/created", "bytes": 50000000} -// "path" must be provided to confirm the requested path is what was -// created by the plugin. "bytes" is the actual size of the volume created -// by the plugin; if excluded, it will default to 0. -// -// Must complete within 60 seconds (timeout on RPC) -func (p *HostVolumePluginExternal) Create(ctx context.Context, - req *cstructs.ClientHostVolumeCreateRequest) (*HostVolumePluginCreateResponse, error) { - - params, err := json.Marshal(req.Parameters) - if err != nil { - // should never happen; req.Parameters is a simple map[string]string - return nil, fmt.Errorf("error marshaling volume pramaters: %w", err) - } - envVars := []string{ - fmt.Sprintf("%s=%s", EnvOperation, "create"), - fmt.Sprintf("%s=%s", EnvVolumesDir, p.VolumesDir), - fmt.Sprintf("%s=%s", EnvPluginDir, p.PluginDir), - fmt.Sprintf("%s=%s", EnvNodePool, p.NodePool), - // values from volume spec - fmt.Sprintf("%s=%s", EnvNamespace, req.Namespace), - fmt.Sprintf("%s=%s", EnvVolumeName, req.Name), - fmt.Sprintf("%s=%s", EnvVolumeID, req.ID), - fmt.Sprintf("%s=%d", EnvCapacityMin, req.RequestedCapacityMinBytes), - fmt.Sprintf("%s=%d", EnvCapacityMax, req.RequestedCapacityMaxBytes), - fmt.Sprintf("%s=%s", EnvNodeID, req.NodeID), - fmt.Sprintf("%s=%s", EnvParameters, params), - } - - log := p.log.With("volume_name", req.Name, "volume_id", req.ID) - stdout, _, err := p.runPlugin(ctx, log, "create", envVars) - if err != nil { - return nil, fmt.Errorf("error creating volume %q with plugin %q: %w", req.ID, p.ID, err) - } - - var pluginResp HostVolumePluginCreateResponse - err = json.Unmarshal(stdout, &pluginResp) - if err != nil { - // note: if a plugin does not return valid json, a volume may be - // created without any respective state in Nomad, since we return - // an error here after the plugin has done who-knows-what. - return nil, err - } - return &pluginResp, nil -} - -// Delete calls the executable with the following parameters: -// arguments: $1=delete -// environment: -// - DHV_OPERATION=delete -// - DHV_CREATED_PATH={path that `create` returned} -// - DHV_VOLUMES_DIR={directory that volumes should be put in} -// - DHV_PLUGIN_DIR={path to directory containing plugins} -// - DHV_NAMESPACE={volume namespace} -// - DHV_VOLUME_NAME={name from the volume specification} -// - DHV_VOLUME_ID={volume ID generated by Nomad} -// - DHV_NODE_ID={Nomad node ID} -// - DHV_NODE_POOL={Nomad node pool} -// - DHV_PARAMETERS={stringified json of parameters from the volume spec} -// -// Response on stdout is discarded. -// -// Must complete within 60 seconds (timeout on RPC) -func (p *HostVolumePluginExternal) Delete(ctx context.Context, - req *cstructs.ClientHostVolumeDeleteRequest) error { - - params, err := json.Marshal(req.Parameters) - if err != nil { - // should never happen; req.Parameters is a simple map[string]string - return fmt.Errorf("error marshaling volume pramaters: %w", err) - } - envVars := []string{ - fmt.Sprintf("%s=%s", EnvOperation, "delete"), - fmt.Sprintf("%s=%s", EnvVolumesDir, p.VolumesDir), - fmt.Sprintf("%s=%s", EnvPluginDir, p.PluginDir), - fmt.Sprintf("%s=%s", EnvNodePool, p.NodePool), - // from create response - fmt.Sprintf("%s=%s", EnvCreatedPath, req.HostPath), - // values from volume spec - fmt.Sprintf("%s=%s", EnvNamespace, req.Namespace), - fmt.Sprintf("%s=%s", EnvVolumeName, req.Name), - fmt.Sprintf("%s=%s", EnvVolumeID, req.ID), - fmt.Sprintf("%s=%s", EnvNodeID, req.NodeID), - fmt.Sprintf("%s=%s", EnvParameters, params), - } - - log := p.log.With("volume_name", req.Name, "volume_id", req.ID) - _, _, err = p.runPlugin(ctx, log, "delete", envVars) - if err != nil { - return fmt.Errorf("error deleting volume %q with plugin %q: %w", req.ID, p.ID, err) - } - return nil -} - -// runPlugin executes the... executable -func (p *HostVolumePluginExternal) runPlugin(ctx context.Context, log hclog.Logger, - op string, env []string) (stdout, stderr []byte, err error) { - - log = log.With("operation", op) - log.Debug("running plugin") - - // set up plugin execution - cmd := exec.CommandContext(ctx, p.Executable, op) - cmd.Env = env - - stdout, stderr, err = runCommand(cmd) - - log = log.With( - "stdout", string(stdout), - "stderr", string(stderr), - ) - if err != nil { - log.Debug("error with plugin", "error", err) - return stdout, stderr, err - } - log.Debug("plugin ran successfully") - return stdout, stderr, nil -} - -// runCommand executes the provided Cmd and captures stdout and stderr. -func runCommand(cmd *exec.Cmd) (stdout, stderr []byte, err error) { - var errBuf bytes.Buffer - cmd.Stderr = io.Writer(&errBuf) - mErr := &multierror.Error{} - stdout, err = cmd.Output() - if err != nil { - mErr = multierror.Append(mErr, err) - } - stderr, err = io.ReadAll(&errBuf) - if err != nil { - mErr = multierror.Append(mErr, err) - } - return stdout, stderr, helper.FlattenMultierror(mErr.ErrorOrNil()) -} diff --git a/client/hostvolumemanager/host_volume_plugin_test.go b/client/hostvolumemanager/host_volume_plugin_test.go deleted file mode 100644 index 826f7b943bd..00000000000 --- a/client/hostvolumemanager/host_volume_plugin_test.go +++ /dev/null @@ -1,209 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package hostvolumemanager - -import ( - "path/filepath" - "runtime" - "testing" - - "github.com/hashicorp/go-version" - cstructs "github.com/hashicorp/nomad/client/structs" - "github.com/hashicorp/nomad/helper/testlog" - "github.com/shoenig/test/must" -) - -func TestHostVolumePluginMkdir(t *testing.T) { - volID := "test-vol-id" - tmp := t.TempDir() - target := filepath.Join(tmp, volID) - - plug := &HostVolumePluginMkdir{ - ID: "test-mkdir-plugin", - VolumesDir: tmp, - log: testlog.HCLogger(t), - } - - // contexts don't matter here, since they're thrown away by this plugin, - // but sending timeout contexts anyway, in case the plugin changes later. - _, err := plug.Fingerprint(timeout(t)) - must.NoError(t, err) - - t.Run("happy", func(t *testing.T) { - // run multiple times, should be idempotent - for range 2 { - resp, err := plug.Create(timeout(t), - &cstructs.ClientHostVolumeCreateRequest{ - ID: volID, // minimum required by this plugin - }) - must.NoError(t, err) - must.Eq(t, &HostVolumePluginCreateResponse{ - Path: target, - SizeBytes: 0, - }, resp) - must.DirExists(t, target) - } - - // delete should be idempotent, too - for range 2 { - err = plug.Delete(timeout(t), - &cstructs.ClientHostVolumeDeleteRequest{ - ID: volID, - }) - must.NoError(t, err) - must.DirNotExists(t, target) - } - }) - - t.Run("sad", func(t *testing.T) { - // can't mkdir inside a file - plug.VolumesDir = "host_volume_plugin_test.go" - - resp, err := plug.Create(timeout(t), - &cstructs.ClientHostVolumeCreateRequest{ - ID: volID, // minimum required by this plugin - }) - must.ErrorContains(t, err, "host_volume_plugin_test.go/test-vol-id: not a directory") - must.Nil(t, resp) - - err = plug.Delete(timeout(t), - &cstructs.ClientHostVolumeDeleteRequest{ - ID: volID, - }) - must.ErrorContains(t, err, "host_volume_plugin_test.go/test-vol-id: not a directory") - }) -} - -func TestNewHostVolumePluginExternal(t *testing.T) { - log := testlog.HCLogger(t) - var err error - - _, err = NewHostVolumePluginExternal(log, ".", "non-existent", "target", "") - must.ErrorIs(t, err, ErrPluginNotExists) - - _, err = NewHostVolumePluginExternal(log, ".", "host_volume_plugin_test.go", "target", "") - must.ErrorIs(t, err, ErrPluginNotExecutable) - - t.Run("unix", func(t *testing.T) { - if runtime.GOOS == "windows" { - t.Skip("skipped because windows") // db TODO(1.10.0) - } - p, err := NewHostVolumePluginExternal(log, - "./test_fixtures", "test_plugin.sh", "test-target", "test-pool") - must.NoError(t, err) - must.Eq(t, &HostVolumePluginExternal{ - ID: "test_plugin.sh", - Executable: "test_fixtures/test_plugin.sh", - VolumesDir: "test-target", - PluginDir: "./test_fixtures", - NodePool: "test-pool", - log: log, - }, p) - }) -} - -func TestHostVolumePluginExternal(t *testing.T) { - if runtime.GOOS == "windows" { - t.Skip("skipped because windows") // db TODO(1.10.0) - } - - volID := "test-vol-id" - tmp := t.TempDir() - target := filepath.Join(tmp, volID) - - expectVersion, err := version.NewVersion("0.0.2") - must.NoError(t, err) - - t.Run("happy", func(t *testing.T) { - - log, getLogs := logRecorder(t) - plug, err := NewHostVolumePluginExternal(log, - "./test_fixtures", "test_plugin.sh", tmp, "test-node-pool") - must.NoError(t, err) - - // fingerprint - v, err := plug.Fingerprint(timeout(t)) - logged := getLogs() - must.NoError(t, err, must.Sprintf("logs: %s", logged)) - must.Eq(t, expectVersion, v.Version, must.Sprintf("logs: %s", logged)) - - // create - resp, err := plug.Create(timeout(t), - &cstructs.ClientHostVolumeCreateRequest{ - Name: "test-vol-name", - ID: volID, - Namespace: "test-namespace", - NodeID: "test-node", - RequestedCapacityMinBytes: 5, - RequestedCapacityMaxBytes: 10, - Parameters: map[string]string{"key": "val"}, - }) - logged = getLogs() - must.NoError(t, err, must.Sprintf("logs: %s", logged)) - - must.Eq(t, &HostVolumePluginCreateResponse{ - Path: target, - SizeBytes: 5, - }, resp) - must.DirExists(t, target) - must.StrContains(t, logged, "OPERATION=create") // stderr from `env` - must.StrContains(t, logged, `stdout="{`) // stdout from printf - - // delete - err = plug.Delete(timeout(t), - &cstructs.ClientHostVolumeDeleteRequest{ - Name: "test-vol-name", - ID: volID, - HostPath: resp.Path, - Namespace: "test-namespace", - NodeID: "test-node", - Parameters: map[string]string{"key": "val"}, - }) - logged = getLogs() - must.NoError(t, err, must.Sprintf("logs: %s", logged)) - must.DirNotExists(t, target) - must.StrContains(t, logged, "OPERATION=delete") // stderr from `env` - must.StrContains(t, logged, "removed directory") // stdout from `rm -v` - }) - - t.Run("sad", func(t *testing.T) { - - log, getLogs := logRecorder(t) - plug, err := NewHostVolumePluginExternal(log, "./test_fixtures", "test_plugin_sad.sh", tmp, "") - must.NoError(t, err) - - v, err := plug.Fingerprint(timeout(t)) - must.EqError(t, err, `error getting version from plugin "test_plugin_sad.sh": exit status 1`) - must.Nil(t, v) - logged := getLogs() - must.StrContains(t, logged, "fingerprint: sad plugin is sad") - must.StrContains(t, logged, "fingerprint: it tells you all about it in stderr") - - // reset logger - log, getLogs = logRecorder(t) - plug.log = log - - resp, err := plug.Create(timeout(t), - &cstructs.ClientHostVolumeCreateRequest{ - ID: volID, - }) - must.EqError(t, err, `error creating volume "test-vol-id" with plugin "test_plugin_sad.sh": exit status 1`) - must.Nil(t, resp) - logged = getLogs() - must.StrContains(t, logged, "create: sad plugin is sad") - must.StrContains(t, logged, "create: it tells you all about it in stderr") - - log, getLogs = logRecorder(t) - plug.log = log - - err = plug.Delete(timeout(t), - &cstructs.ClientHostVolumeDeleteRequest{ - ID: volID, - }) - must.EqError(t, err, `error deleting volume "test-vol-id" with plugin "test_plugin_sad.sh": exit status 1`) - logged = getLogs() - must.StrContains(t, logged, "delete: sad plugin is sad") - must.StrContains(t, logged, "delete: it tells you all about it in stderr") - }) -} diff --git a/client/hostvolumemanager/host_volumes.go b/client/hostvolumemanager/host_volumes.go deleted file mode 100644 index 7b4ce33d992..00000000000 --- a/client/hostvolumemanager/host_volumes.go +++ /dev/null @@ -1,372 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package hostvolumemanager - -import ( - "context" - "errors" - "fmt" - "os" - "sync" - - "github.com/hashicorp/go-hclog" - "github.com/hashicorp/go-multierror" - cstructs "github.com/hashicorp/nomad/client/structs" - "github.com/hashicorp/nomad/helper" - "github.com/hashicorp/nomad/nomad/structs" -) - -var ( - ErrPluginNotExists = errors.New("no such plugin") - ErrPluginNotExecutable = errors.New("plugin not executable") - ErrVolumeNameExists = errors.New("volume name already exists on this node") -) - -// HostVolumeStateManager manages the lifecycle of volumes in client state. -type HostVolumeStateManager interface { - PutDynamicHostVolume(*cstructs.HostVolumeState) error - GetDynamicHostVolumes() ([]*cstructs.HostVolumeState, error) - DeleteDynamicHostVolume(string) error -} - -// Config is used to configure a HostVolumeManager. -type Config struct { - // PluginDir is where external plugins may be found. - PluginDir string - - // VolumesDir is where plugins should place the directory - // that will later become a volume's HostPath - VolumesDir string - - // NodePool is passed into external plugin execution environment. - NodePool string - - // StateMgr manages client state to restore on agent restarts. - StateMgr HostVolumeStateManager - - // UpdateNodeVols is run to update the node when a volume is created - // or deleted. - UpdateNodeVols HostVolumeNodeUpdater -} - -// HostVolumeManager executes plugins, manages volume metadata in client state, -// and registers volumes with the client node. -type HostVolumeManager struct { - pluginDir string - volumesDir string - nodePool string - stateMgr HostVolumeStateManager - updateNodeVols HostVolumeNodeUpdater - builtIns map[string]HostVolumePlugin - locker *volLocker - log hclog.Logger -} - -// NewHostVolumeManager includes default builtin plugins. -func NewHostVolumeManager(logger hclog.Logger, config Config) *HostVolumeManager { - logger = logger.Named("host_volume_manager") - return &HostVolumeManager{ - pluginDir: config.PluginDir, - volumesDir: config.VolumesDir, - nodePool: config.NodePool, - stateMgr: config.StateMgr, - updateNodeVols: config.UpdateNodeVols, - builtIns: map[string]HostVolumePlugin{ - HostVolumePluginMkdirID: &HostVolumePluginMkdir{ - ID: HostVolumePluginMkdirID, - VolumesDir: config.VolumesDir, - log: logger.With("plugin_id", HostVolumePluginMkdirID), - }, - }, - locker: &volLocker{}, - log: logger, - } -} - -// Create runs the appropriate plugin for the given request, saves the request -// to state, and updates the node with the volume. -func (hvm *HostVolumeManager) Create(ctx context.Context, - req *cstructs.ClientHostVolumeCreateRequest) (*cstructs.ClientHostVolumeCreateResponse, error) { - - log := hvm.log.With("volume_name", req.Name, "volume_id", req.ID) - - plug, err := hvm.getPlugin(req.PluginID) - if err != nil { - return nil, err - } - - // can't have two of the same volume name w/ different IDs per client node - isNewVolume, err := hvm.locker.lock(req.Name, req.ID) - if err != nil { - return nil, err - } - - pluginResp, err := plug.Create(ctx, req) - if err != nil { - hvm.locker.release(req.Name) - return nil, err - } - - volState := &cstructs.HostVolumeState{ - ID: req.ID, - HostPath: pluginResp.Path, - CreateReq: req, - } - if err := hvm.stateMgr.PutDynamicHostVolume(volState); err != nil { - // if we fail to write to state on initial create, - // delete the volume so it isn't left lying around - // without Nomad knowing about it. - log.Error("failed to save volume in client state", "error", err) - if isNewVolume { - log.Error("initial create detected, running delete") - delErr := plug.Delete(ctx, &cstructs.ClientHostVolumeDeleteRequest{ - ID: req.ID, - PluginID: req.PluginID, - NodeID: req.NodeID, - HostPath: hvm.volumesDir, - Parameters: req.Parameters, - }) - if delErr != nil { - log.Warn("error deleting volume after state store failure", "error", delErr) - err = multierror.Append(err, delErr) - } - // free up the volume name whether delete succeeded or not. - hvm.locker.release(req.Name) - } - return nil, err - } - - hvm.updateNodeVols(req.Name, genVolConfig(req, pluginResp.Path)) - - resp := &cstructs.ClientHostVolumeCreateResponse{ - VolumeName: req.Name, - VolumeID: req.ID, - HostPath: pluginResp.Path, - CapacityBytes: pluginResp.SizeBytes, - } - - return resp, nil -} - -// Register saves the request to state, and updates the node with the volume. -func (hvm *HostVolumeManager) Register(ctx context.Context, - req *cstructs.ClientHostVolumeRegisterRequest) error { - - // can't have two of the same volume name w/ different IDs per client node - if _, err := hvm.locker.lock(req.Name, req.ID); err != nil { - return err - } - - _, err := os.Stat(req.HostPath) - if err != nil { - hvm.locker.release(req.Name) - return fmt.Errorf("could not verify host path for %q: %w", req.Name, err) - } - - // generate a stub create request and plugin response for the fingerprint - // and client state - creq := &cstructs.ClientHostVolumeCreateRequest{ - ID: req.ID, - Name: req.Name, - NodeID: req.NodeID, - Parameters: req.Parameters, - } - volState := &cstructs.HostVolumeState{ - ID: req.ID, - CreateReq: creq, - HostPath: req.HostPath, - } - if err := hvm.stateMgr.PutDynamicHostVolume(volState); err != nil { - hvm.log.Error("failed to save volume in state", "volume_id", req.ID, "error", err) - hvm.locker.release(req.Name) - return err - } - - hvm.updateNodeVols(req.Name, genVolConfig(creq, req.HostPath)) - return nil -} - -// Delete runs the appropriate plugin for the given request, removes it from -// state, and updates the node to remove the volume. -func (hvm *HostVolumeManager) Delete(ctx context.Context, - req *cstructs.ClientHostVolumeDeleteRequest) (*cstructs.ClientHostVolumeDeleteResponse, error) { - - if req.PluginID != "" { - plug, err := hvm.getPlugin(req.PluginID) - if err != nil { - return nil, err - } - - err = plug.Delete(ctx, req) - if err != nil { - return nil, err - } - } - - if err := hvm.stateMgr.DeleteDynamicHostVolume(req.ID); err != nil { - hvm.log.Error("failed to delete volume in state", "volume_id", req.ID, "error", err) - return nil, err // bail so a user may retry - } - - // free up volume name for reuse - hvm.locker.release(req.Name) - - hvm.updateNodeVols(req.Name, nil) - - resp := &cstructs.ClientHostVolumeDeleteResponse{ - VolumeName: req.Name, - VolumeID: req.ID, - } - - return resp, nil -} - -// getPlugin finds either a built-in plugin or an external plugin. -func (hvm *HostVolumeManager) getPlugin(id string) (HostVolumePlugin, error) { - if plug, ok := hvm.builtIns[id]; ok { - return plug, nil - } - log := hvm.log.With("plugin_id", id) - return NewHostVolumePluginExternal(log, hvm.pluginDir, id, hvm.volumesDir, hvm.nodePool) -} - -// restoreFromState loads all volumes from client state and runs Create for -// each one, so volumes are restored upon agent restart or host reboot. -func (hvm *HostVolumeManager) restoreFromState(ctx context.Context) (VolumeMap, error) { - vols, err := hvm.stateMgr.GetDynamicHostVolumes() - if err != nil { - return nil, err - } - - volumes := make(VolumeMap) - var mut sync.Mutex - - if len(vols) == 0 { - return volumes, nil // nothing to do - } - - group := multierror.Group{} - for _, vol := range vols { - group.Go(func() error { - var volCfg *structs.ClientHostVolumeConfig - var err error - if vol.CreateReq.PluginID == "" { - volCfg, err = hvm.restoreForRegister(vol) - } else { - volCfg, err = hvm.restoreForCreate(ctx, vol) - } - if err != nil { - return err - } - - mut.Lock() - volumes[vol.CreateReq.Name] = volCfg - mut.Unlock() - return nil - }) - } - mErr := group.Wait() - return volumes, helper.FlattenMultierror(mErr.ErrorOrNil()) -} - -// restoreForCreate restores a single volume that was previously created by -// Create, by "recreating" the volumes. Plugins have the best knowledge of their -// side effects, and they must be idempotent. -func (hvm *HostVolumeManager) restoreForCreate(ctx context.Context, vol *cstructs.HostVolumeState) (*structs.ClientHostVolumeConfig, error) { - // missing plugins with associated volumes in state are considered - // client-stopping errors. they need to be fixed by cluster admins. - plug, err := hvm.getPlugin(vol.CreateReq.PluginID) - if err != nil { - return nil, err - } - - // lock the name so future creates can't produce duplicates. - _, err = hvm.locker.lock(vol.CreateReq.Name, vol.CreateReq.ID) - // state should never have duplicate vol names, and restore happens - // prior to node registration, so new creates shouldn't come in - // concurrently, but check for error just in case. - if err != nil { - hvm.log.Error("error during restore", - "volume_name", vol.CreateReq.Name, - "volume_id", vol.CreateReq.ID, - "error", err) - // don't stop the world if it does happen, because an admin - // couldn't do anything about it short of wiping client state. - return nil, nil - } - - resp, err := plug.Create(ctx, vol.CreateReq) - if err != nil { - // plugin execution errors are only logged - hvm.log.Error("failed to restore", - "plugin_id", vol.CreateReq.PluginID, "volume_id", vol.ID, "error", err) - return nil, nil - } - - return genVolConfig(vol.CreateReq, resp.Path), nil -} - -// restoreForRegister restores a single volume that was previously created by -// Register, by converting the stored struct. It otherwise behaves the same as -// restoreForCreate. -func (hvm *HostVolumeManager) restoreForRegister(vol *cstructs.HostVolumeState) (*structs.ClientHostVolumeConfig, error) { - _, err := hvm.locker.lock(vol.CreateReq.Name, vol.CreateReq.ID) - if err != nil { - hvm.log.Error("error during restore", - "volume_name", vol.CreateReq.Name, - "volume_id", vol.CreateReq.ID, - "error", err) - return nil, nil - } - - _, err = os.Stat(vol.HostPath) - if err != nil { - hvm.log.Error("failed to restore: could not verify host path", - "volume_id", vol.ID, "error", err, "path", vol.HostPath) - return nil, nil - } - - return genVolConfig(vol.CreateReq, vol.HostPath), nil -} - -// genVolConfig generates the host volume config for the node to report as -// available to the servers for job scheduling. -func genVolConfig(req *cstructs.ClientHostVolumeCreateRequest, hostPath string) *structs.ClientHostVolumeConfig { - return &structs.ClientHostVolumeConfig{ - Name: req.Name, - ID: req.ID, - Path: hostPath, - - // dynamic volumes, like CSI, have more robust `capabilities`, - // so we always set ReadOnly to false, and let the scheduler - // decide when to ignore this and check capabilities instead. - ReadOnly: false, - } -} - -// volLocker is used to ensure that volumes on each node are unique by name. -// The volume scheduler will prevent this too, but only after node fingerprint, -// so we need to protect against concurrent duplicate creates. -type volLocker struct { - locks sync.Map -} - -// lock the provided name, return true if it was not already locked, -// and error if it was already locked with a different ID. -func (l *volLocker) lock(name, id string) (bool, error) { - current, exists := l.locks.LoadOrStore(name, id) - if exists && id != current.(string) { - return false, ErrVolumeNameExists - } - return !exists, nil -} - -func (l *volLocker) release(name string) { - l.locks.Delete(name) -} - -func (l *volLocker) isLocked(name string) bool { - _, locked := l.locks.Load(name) - return locked -} diff --git a/client/hostvolumemanager/host_volumes_test.go b/client/hostvolumemanager/host_volumes_test.go deleted file mode 100644 index 35a3e95b327..00000000000 --- a/client/hostvolumemanager/host_volumes_test.go +++ /dev/null @@ -1,441 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package hostvolumemanager - -import ( - "bytes" - "context" - "errors" - "io" - "path/filepath" - "sort" - "testing" - "time" - - "github.com/hashicorp/go-hclog" - "github.com/hashicorp/go-version" - cstate "github.com/hashicorp/nomad/client/state" - cstructs "github.com/hashicorp/nomad/client/structs" - "github.com/hashicorp/nomad/helper/testlog" - "github.com/hashicorp/nomad/nomad/structs" - "github.com/shoenig/test" - "github.com/shoenig/test/must" -) - -func TestHostVolumeManager(t *testing.T) { - log := testlog.HCLogger(t) - errDB := &cstate.ErrDB{} - memDB := cstate.NewMemDB(log) - node := newFakeNode(t) - - volumesDirCreate := t.TempDir() - volumesDirRegister := t.TempDir() - - hvm := NewHostVolumeManager(log, Config{ - PluginDir: "./test_fixtures", - VolumesDir: volumesDirCreate, - StateMgr: errDB, - UpdateNodeVols: node.updateVol, - }) - - plug := &fakePlugin{volsDir: volumesDirCreate} - hvm.builtIns["test-plugin"] = plug - - ctx := timeout(t) - - t.Run("create", func(t *testing.T) { - // plugin doesn't exist - name := "created-volume" - req := &cstructs.ClientHostVolumeCreateRequest{ - Name: name, - ID: "vol-id-1", - PluginID: "nope", - - RequestedCapacityMinBytes: 5, - } - _, err := hvm.Create(ctx, req) - must.ErrorIs(t, err, ErrPluginNotExists) - - // error from plugin - req.PluginID = "test-plugin" - plug.createErr = errors.New("sad create") - _, err = hvm.Create(ctx, req) - must.ErrorIs(t, err, plug.createErr) - assertNotLocked(t, hvm, name) - plug.reset() - - // error saving state, then error from cleanup attempt - plug.deleteErr = errors.New("sad delete") - _, err = hvm.Create(ctx, req) - must.ErrorIs(t, err, cstate.ErrDBError) - must.ErrorIs(t, err, plug.deleteErr) - assertNotLocked(t, hvm, name) - plug.reset() - - // error saving state, successful cleanup - _, err = hvm.Create(ctx, req) - must.ErrorIs(t, err, cstate.ErrDBError) - must.Eq(t, "vol-id-1", plug.deleted) - assertNotLocked(t, hvm, name) - plug.reset() - - // happy path - hvm.stateMgr = memDB - resp, err := hvm.Create(ctx, req) - must.NoError(t, err) - expectResp := &cstructs.ClientHostVolumeCreateResponse{ - VolumeName: "created-volume", - VolumeID: "vol-id-1", - HostPath: filepath.Join(volumesDirCreate, "vol-id-1"), - CapacityBytes: 5, - } - must.Eq(t, expectResp, resp) - stateDBs, err := memDB.GetDynamicHostVolumes() - must.NoError(t, err) - // should be saved to state - must.Len(t, 1, stateDBs) - must.Eq(t, "vol-id-1", stateDBs[0].ID) - must.Eq(t, "vol-id-1", stateDBs[0].CreateReq.ID) - // should be registered with node - must.MapContainsKey(t, node.vols, name, must.Sprintf("no %q in %+v", name, node.vols)) - assertLocked(t, hvm, name) - - // repeat create with same ID but different size may update the volume - req.RequestedCapacityMinBytes = 10 - expectResp.CapacityBytes = 10 - resp, err = hvm.Create(ctx, req) - must.NoError(t, err) - must.Eq(t, expectResp, resp) - - // error saving state on restore/update should not run delete - hvm.stateMgr = errDB - resp, err = hvm.Create(ctx, req) - must.ErrorIs(t, err, cstate.ErrDBError) - must.Nil(t, resp) - must.Eq(t, "", plug.deleted) - plug.reset() - hvm.stateMgr = memDB - - // duplicate create with the same vol name but different ID should fail - _, err = hvm.Create(ctx, &cstructs.ClientHostVolumeCreateRequest{ - Name: name, - ID: "different-vol-id", - PluginID: "test-plugin", - }) - must.ErrorIs(t, err, ErrVolumeNameExists) - }) - - t.Run("register", func(t *testing.T) { - name := "registered-volume" - req := &cstructs.ClientHostVolumeRegisterRequest{ - ID: "vol-id-2", - Name: name, - HostPath: volumesDirRegister, - CapacityBytes: 1000, - } - err := hvm.Register(ctx, req) - must.NoError(t, err) - - // should be saved to state and registered with node - stateDBs, err := memDB.GetDynamicHostVolumes() - must.NoError(t, err) - must.Len(t, 2, stateDBs) - sort.Slice(stateDBs, func(i, j int) bool { return stateDBs[i].ID < stateDBs[j].ID }) - must.Eq(t, "vol-id-2", stateDBs[1].ID) - must.Eq(t, "vol-id-2", stateDBs[1].CreateReq.ID) - must.MapContainsKey(t, node.vols, name, must.Sprintf("no %q in %+v", name, node.vols)) - assertLocked(t, hvm, name) - }) - - // despite being a subtest, this needs to run after "create" and "register" - t.Run("delete", func(t *testing.T) { - name := "created-volume" - // should be locked from "create" above - assertLocked(t, hvm, name) - - // plugin doesn't exist - req := &cstructs.ClientHostVolumeDeleteRequest{ - Name: name, - ID: "vol-id-1", - PluginID: "nope", - } - _, err := hvm.Delete(ctx, req) - must.ErrorIs(t, err, ErrPluginNotExists) - assertLocked(t, hvm, name) - - // error from plugin - req.PluginID = "test-plugin" - plug.deleteErr = errors.New("sad delete") - _, err = hvm.Delete(ctx, req) - must.ErrorIs(t, err, plug.deleteErr) - assertLocked(t, hvm, name) - plug.reset() - - // error saving state - hvm.stateMgr = errDB - _, err = hvm.Delete(ctx, req) - must.ErrorIs(t, err, cstate.ErrDBError) - assertLocked(t, hvm, name) - - // happy path - hvm.stateMgr = memDB - - // and delete it - resp, err := hvm.Delete(ctx, req) - must.NoError(t, err) - must.Eq(t, &cstructs.ClientHostVolumeDeleteResponse{ - VolumeName: "created-volume", - VolumeID: "vol-id-1", - }, resp) - must.Eq(t, VolumeMap{ - "registered-volume": &structs.ClientHostVolumeConfig{ - Name: "registered-volume", - Path: volumesDirRegister, - ID: "vol-id-2", - }, - }, node.vols, must.Sprint("created-volume should be deleted from node")) - stateVols, err := memDB.GetDynamicHostVolumes() - must.NoError(t, err) - must.Len(t, 1, stateVols, must.Sprint("only one volume should be deleted")) - - assertNotLocked(t, hvm, name) - assertLocked(t, hvm, "registered-volume") - - req.Name = "registered-volume" - req.ID = "vol-id-2" - req.PluginID = "" - resp, err = hvm.Delete(ctx, req) - must.NoError(t, err) - - must.Eq(t, VolumeMap{}, node.vols, must.Sprint("all volumes should be deleted from node")) - stateVols, err = memDB.GetDynamicHostVolumes() - must.NoError(t, err) - must.Nil(t, stateVols, must.Sprint("all volumes should be deleted")) - assertNotLocked(t, hvm, "registered-volume") - }) -} - -type fakePlugin struct { - volsDir string - created string - deleted string - fingerprintErr error - createErr error - deleteErr error -} - -func (p *fakePlugin) reset() { - p.deleted, p.fingerprintErr, p.createErr, p.deleteErr = "", nil, nil, nil -} - -func (p *fakePlugin) Fingerprint(_ context.Context) (*PluginFingerprint, error) { - if p.fingerprintErr != nil { - return nil, p.fingerprintErr - } - v, err := version.NewVersion("0.0.1") - return &PluginFingerprint{ - Version: v, - }, err -} - -func (p *fakePlugin) Create(_ context.Context, req *cstructs.ClientHostVolumeCreateRequest) (*HostVolumePluginCreateResponse, error) { - if p.createErr != nil { - return nil, p.createErr - } - p.created = req.ID - return &HostVolumePluginCreateResponse{ - Path: filepath.Join(p.volsDir, req.ID), - SizeBytes: req.RequestedCapacityMinBytes, - }, nil -} - -func (p *fakePlugin) Delete(_ context.Context, req *cstructs.ClientHostVolumeDeleteRequest) error { - if p.deleteErr != nil { - return p.deleteErr - } - p.deleted = req.ID - return nil -} - -func assertLocked(t *testing.T, hvm *HostVolumeManager, name string) { - t.Helper() - must.True(t, hvm.locker.isLocked(name), must.Sprintf("vol name %q should be locked", name)) -} - -func assertNotLocked(t *testing.T, hvm *HostVolumeManager, name string) { - t.Helper() - must.False(t, hvm.locker.isLocked(name), must.Sprintf("vol name %q should not be locked", name)) -} - -func TestHostVolumeManager_restoreFromState(t *testing.T) { - log := testlog.HCLogger(t) - hostPath := t.TempDir() - - vol1 := &cstructs.HostVolumeState{ - ID: "test-vol-id-1", - CreateReq: &cstructs.ClientHostVolumeCreateRequest{ - Name: "created-volume", - ID: "test-vol-id-1", - PluginID: "mkdir", - }, - } - vol2 := &cstructs.HostVolumeState{ - ID: "test-vol-id-2", - HostPath: hostPath, - CreateReq: &cstructs.ClientHostVolumeCreateRequest{ - Name: "registered-volume", - ID: "test-vol-id-2", - PluginID: "", // this signifies a Register operation - }, - } - - node := newFakeNode(t) - - t.Run("no vols", func(t *testing.T) { - state := cstate.NewMemDB(log) - hvm := NewHostVolumeManager(log, Config{ - StateMgr: state, - // no other fields are necessary when there are zero volumes - }) - vols, err := hvm.restoreFromState(timeout(t)) - must.NoError(t, err) - must.Eq(t, VolumeMap{}, vols) - }) - - t.Run("happy", func(t *testing.T) { - // put our volume in state - state := cstate.NewMemDB(log) - must.NoError(t, state.PutDynamicHostVolume(vol1)) - must.NoError(t, state.PutDynamicHostVolume(vol2)) - - // new volume manager should load it from state and run Create, - // resulting in a volume directory in this mountDir. - volsDir := t.TempDir() - volPath := filepath.Join(volsDir, vol1.ID) - - hvm := NewHostVolumeManager(log, Config{ - StateMgr: state, - UpdateNodeVols: node.updateVol, - PluginDir: "/wherever", - VolumesDir: volsDir, - }) - - vols, err := hvm.restoreFromState(timeout(t)) - must.NoError(t, err) - - expect := map[string]*structs.ClientHostVolumeConfig{ - "created-volume": { - Name: "created-volume", - ID: "test-vol-id-1", - Path: volPath, - ReadOnly: false, - }, - "registered-volume": { - Name: "registered-volume", - ID: "test-vol-id-2", - Path: hostPath, - ReadOnly: false, - }, - } - must.Eq(t, expect, vols) - - must.DirExists(t, volPath) - - assertLocked(t, hvm, "created-volume") - assertLocked(t, hvm, "registered-volume") - }) - - t.Run("state error", func(t *testing.T) { - state := &cstate.ErrDB{} - hvm := NewHostVolumeManager(log, Config{StateMgr: state}) - vols, err := hvm.restoreFromState(timeout(t)) - must.ErrorIs(t, err, cstate.ErrDBError) - must.Nil(t, vols) - }) - - t.Run("plugin missing", func(t *testing.T) { - state := cstate.NewMemDB(log) - vol := &cstructs.HostVolumeState{ - CreateReq: &cstructs.ClientHostVolumeCreateRequest{ - PluginID: "nonexistent-plugin", - }, - } - must.NoError(t, state.PutDynamicHostVolume(vol)) - - hvm := NewHostVolumeManager(log, Config{StateMgr: state}) - vols, err := hvm.restoreFromState(timeout(t)) - must.ErrorIs(t, err, ErrPluginNotExists) - must.MapEmpty(t, vols) - }) - - t.Run("plugin error", func(t *testing.T) { - state := cstate.NewMemDB(log) - vol := &cstructs.HostVolumeState{ - ID: "test-volume", - CreateReq: &cstructs.ClientHostVolumeCreateRequest{ - PluginID: "test-plugin", - }, - } - must.NoError(t, state.PutDynamicHostVolume(vol)) - - log, getLogs := logRecorder(t) - hvm := NewHostVolumeManager(log, Config{StateMgr: state}) - plug := &fakePlugin{ - createErr: errors.New("sad create"), - } - hvm.builtIns["test-plugin"] = plug - - vols, err := hvm.restoreFromState(timeout(t)) - // error during restore should not halt the whole client - must.NoError(t, err) - must.NotNil(t, vols) - // but it should log - logs := getLogs() - must.StrContains(t, logs, "[ERROR]") - must.StrContains(t, logs, `failed to restore: plugin_id=test-plugin volume_id=test-volume error="sad create"`) - }) -} - -type fakeNode struct { - vols VolumeMap - log hclog.Logger -} - -func (n *fakeNode) updateVol(name string, volume *structs.ClientHostVolumeConfig) { - UpdateVolumeMap(n.log, n.vols, name, volume) -} - -func newFakeNode(t *testing.T) *fakeNode { - return &fakeNode{ - vols: make(VolumeMap), - log: testlog.HCLogger(t), - } -} - -// timeout provides a context that times out in 1 second -func timeout(t *testing.T) context.Context { - t.Helper() - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - t.Cleanup(cancel) - return ctx -} - -// logRecorder is here so we can assert that stdout/stderr appear in logs -func logRecorder(t *testing.T) (hclog.Logger, func() string) { - t.Helper() - buf := &bytes.Buffer{} - logger := hclog.New(&hclog.LoggerOptions{ - Name: "log-recorder", - Output: buf, - Level: hclog.Debug, - IncludeLocation: true, - DisableTime: true, - }) - return logger, func() string { - bts, err := io.ReadAll(buf) - test.NoError(t, err) - buf.Reset() - return string(bts) - } -} diff --git a/client/hostvolumemanager/test_fixtures/test_plugin.sh b/client/hostvolumemanager/test_fixtures/test_plugin.sh deleted file mode 100755 index 115cfede839..00000000000 --- a/client/hostvolumemanager/test_fixtures/test_plugin.sh +++ /dev/null @@ -1,50 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -# plugin for host_volume_plugin_test.go -set -xeuo pipefail - -env 1>&2 - -test "$1" == "$DHV_OPERATION" - -echo 'all operations should ignore stderr' 1>&2 - -case $1 in - fingerprint) - echo '{"version": "0.0.2"}' ;; - create) - test "$DHV_VOLUME_NAME" == 'test-vol-name' - test "$DHV_VOLUME_ID" == 'test-vol-id' - test "$DHV_NAMESPACE" == 'test-namespace' - test "$DHV_CAPACITY_MIN_BYTES" -eq 5 - test "$DHV_CAPACITY_MAX_BYTES" -eq 10 - test "$DHV_NODE_ID" == 'test-node' - test "$DHV_NODE_POOL" == 'test-node-pool' - test "$DHV_PARAMETERS" == '{"key":"val"}' - test "$DHV_PLUGIN_DIR" == './test_fixtures' - test -d "$DHV_VOLUMES_DIR" - target="$DHV_VOLUMES_DIR/$DHV_VOLUME_ID" - test "$target" != '/' - mkdir -p "$target" - printf '{"path": "%s", "bytes": 5}' "$target" - ;; - delete) - test "$DHV_NODE_ID" == 'test-node' - test "$DHV_NODE_POOL" == 'test-node-pool' - test "$DHV_NAMESPACE" == 'test-namespace' - test "$DHV_VOLUME_NAME" == 'test-vol-name' - test "$DHV_VOLUME_ID" == 'test-vol-id' - test "$DHV_PARAMETERS" == '{"key":"val"}' - test "$DHV_PLUGIN_DIR" == './test_fixtures' - test -d "$DHV_VOLUMES_DIR" - target="$DHV_VOLUMES_DIR/$DHV_VOLUME_ID" - test "$target" != '/' - test "$DHV_CREATED_PATH" == "$target" - rm -rfv "$target" - ;; - *) - echo "unknown operation $1" - exit 1 ;; -esac diff --git a/client/hostvolumemanager/test_fixtures/test_plugin_sad.sh b/client/hostvolumemanager/test_fixtures/test_plugin_sad.sh deleted file mode 100755 index 6f883297a98..00000000000 --- a/client/hostvolumemanager/test_fixtures/test_plugin_sad.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -echo "$1: sad plugin is sad" -echo "$1: it tells you all about it in stderr" 1>&2 -exit 1 diff --git a/client/hostvolumemanager/volume_fingerprint.go b/client/hostvolumemanager/volume_fingerprint.go deleted file mode 100644 index dff87a77749..00000000000 --- a/client/hostvolumemanager/volume_fingerprint.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package hostvolumemanager - -import ( - "context" - - "github.com/hashicorp/go-hclog" - "github.com/hashicorp/nomad/nomad/structs" -) - -// this file is for fingerprinting *volumes* -// *plugins* are detected in client/fingerprint/dynamic_host_volumes.go - -// HostVolumeNodeUpdater is used to add or remove volumes from the Node. -type HostVolumeNodeUpdater func(name string, volume *structs.ClientHostVolumeConfig) - -// VolumeMap keys are volume `name`s, identical to Node.HostVolumes. -type VolumeMap map[string]*structs.ClientHostVolumeConfig - -// UpdateVolumeMap returns true if it changes the provided `volumes` map. -// If `vol` is nil, key `name` will be removed from the map, if present. -// If it is not nil, `name: vol` will be set on the map, if different. -// -// Since it may mutate the map, the caller should make a copy -// or acquire a lock as appropriate for their context. -func UpdateVolumeMap(log hclog.Logger, volumes VolumeMap, name string, vol *structs.ClientHostVolumeConfig) (changed bool) { - current, exists := volumes[name] - if vol == nil { - if exists { - delete(volumes, name) - changed = true - } - } else { - // if the volume already exists with no ID, it will be because it was - // added to client agent config after having been previously created - // as a dynamic vol. dynamic takes precedence, but log a warning. - if exists && current.ID == "" { - log.Warn("overriding static host volume with dynamic", "name", name, "id", vol.ID) - } - if !exists || !vol.Equal(current) { - volumes[name] = vol - changed = true - } - } - return changed -} - -// WaitForFirstFingerprint implements client.FingerprintingPluginManager -// so any existing volumes are added to the client node on agent start. -func (hvm *HostVolumeManager) WaitForFirstFingerprint(ctx context.Context) <-chan struct{} { - // the fingerprint manager puts batchFirstFingerprintsTimeout (50 seconds) - // on the context that it sends to us here so we don't need another - // timeout. we just need to cancel to report when we are done. - ctx, cancel := context.WithCancel(ctx) - defer cancel() - volumes, err := hvm.restoreFromState(ctx) - if err != nil { - hvm.log.Error("failed to restore state", "error", err) - return ctx.Done() - } - for name, vol := range volumes { - hvm.updateNodeVols(name, vol) // => batchNodeUpdates.updateNodeFromHostVolume() - } - return ctx.Done() -} -func (hvm *HostVolumeManager) Run() {} -func (hvm *HostVolumeManager) Shutdown() {} -func (hvm *HostVolumeManager) PluginType() string { - // "Plugin"Type is misleading, because this is for *volumes* but ok. - return "dynamic_host_volume" -} diff --git a/client/hostvolumemanager/volume_fingerprint_test.go b/client/hostvolumemanager/volume_fingerprint_test.go deleted file mode 100644 index 06c8c80f175..00000000000 --- a/client/hostvolumemanager/volume_fingerprint_test.go +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package hostvolumemanager - -import ( - "path/filepath" - "testing" - - "github.com/hashicorp/nomad/client/state" - cstructs "github.com/hashicorp/nomad/client/structs" - "github.com/hashicorp/nomad/helper/testlog" - "github.com/hashicorp/nomad/nomad/structs" - "github.com/shoenig/test/must" -) - -func TestUpdateVolumeMap(t *testing.T) { - cases := []struct { - name string - - vols VolumeMap - volName string - vol *structs.ClientHostVolumeConfig - - expectMap VolumeMap - expectChange bool - - expectLog string - }{ - { - name: "delete absent", - vols: VolumeMap{}, - volName: "anything", - vol: nil, - expectMap: VolumeMap{}, - expectChange: false, - }, - { - name: "delete present", - vols: VolumeMap{"deleteme": {}}, - volName: "deleteme", - vol: nil, - expectMap: VolumeMap{}, - expectChange: true, - }, - { - name: "add absent", - vols: VolumeMap{}, - volName: "addme", - vol: &structs.ClientHostVolumeConfig{}, - expectMap: VolumeMap{"addme": {}}, - expectChange: true, - }, - { - name: "add present", - vols: VolumeMap{"ignoreme": {}}, - volName: "ignoreme", - vol: &structs.ClientHostVolumeConfig{}, - expectMap: VolumeMap{"ignoreme": {}}, - expectChange: false, - }, - { - // this should not happen with dynamic vols, but test anyway - name: "change present", - vols: VolumeMap{"changeme": {ID: "before"}}, - volName: "changeme", - vol: &structs.ClientHostVolumeConfig{ID: "after"}, - expectMap: VolumeMap{"changeme": {ID: "after"}}, - expectChange: true, - }, - { - // this should only happen during agent start, if a static vol has - // been added to config after a previous dynamic vol was created - // with the same name. - name: "override static", - vols: VolumeMap{"overrideme": {ID: ""}}, // static vols have no ID - volName: "overrideme", - vol: &structs.ClientHostVolumeConfig{ID: "dynamic-vol-id"}, - expectMap: VolumeMap{"overrideme": {ID: "dynamic-vol-id"}}, - expectChange: true, - expectLog: "overriding static host volume with dynamic: name=overrideme id=dynamic-vol-id", - }, - } - - for _, tc := range cases { - t.Run(tc.name, func(t *testing.T) { - log, getLogs := logRecorder(t) - - changed := UpdateVolumeMap(log, tc.vols, tc.volName, tc.vol) - must.Eq(t, tc.expectMap, tc.vols) - - if tc.expectChange { - must.True(t, changed, must.Sprint("expect volume to have been changed")) - } else { - must.False(t, changed, must.Sprint("expect volume not to have been changed")) - } - - must.StrContains(t, getLogs(), tc.expectLog) - }) - } -} - -func TestWaitForFirstFingerprint(t *testing.T) { - log := testlog.HCLogger(t) - tmp := t.TempDir() - memDB := state.NewMemDB(log) - node := newFakeNode(t) - hvm := NewHostVolumeManager(log, Config{ - PluginDir: "", - VolumesDir: tmp, - StateMgr: memDB, - UpdateNodeVols: node.updateVol, - }) - plug := &fakePlugin{volsDir: tmp} - hvm.builtIns = map[string]HostVolumePlugin{ - "test-plugin": plug, - } - must.NoError(t, memDB.PutDynamicHostVolume(&cstructs.HostVolumeState{ - ID: "vol-id", - CreateReq: &cstructs.ClientHostVolumeCreateRequest{ - ID: "vol-id", - Name: "vol-name", - PluginID: "test-plugin", - }, - })) - - ctx := timeout(t) - done := hvm.WaitForFirstFingerprint(ctx) - select { - case <-ctx.Done(): - t.Fatal("fingerprint timed out") - case <-done: - } - - must.Eq(t, "vol-id", plug.created) - must.Eq(t, VolumeMap{ - "vol-name": &structs.ClientHostVolumeConfig{ - Name: "vol-name", - ID: "vol-id", - Path: filepath.Join(tmp, "vol-id"), - ReadOnly: false, - }, - }, node.vols) -} diff --git a/client/lib/numalib/detect_linux.go b/client/lib/numalib/detect_linux.go index dafdd9c3556..db3f935d0ce 100644 --- a/client/lib/numalib/detect_linux.go +++ b/client/lib/numalib/detect_linux.go @@ -32,18 +32,16 @@ func PlatformScanners(cpuDisableDmidecode bool) []SystemScanner { } const ( - sysRoot = "/sys/devices/system" - nodeOnline = sysRoot + "/node/online" - cpuOnline = sysRoot + "/cpu/online" - distanceFile = sysRoot + "/node/node%d/distance" - cpulistFile = sysRoot + "/node/node%d/cpulist" - cpuDriverFile = sysRoot + "/cpu/cpu%d/cpufreq/scaling_driver" - cpuMaxFile = sysRoot + "/cpu/cpu%d/cpufreq/cpuinfo_max_freq" - cpuCpccNominalFile = sysRoot + "/cpu/cpu%d/acpi_cppc/nominal_freq" - cpuIntelBaseFile = sysRoot + "/cpu/cpu%d/cpufreq/base_frequency" - cpuSocketFile = sysRoot + "/cpu/cpu%d/topology/physical_package_id" - cpuSiblingFile = sysRoot + "/cpu/cpu%d/topology/thread_siblings_list" - deviceFiles = "/sys/bus/pci/devices" + sysRoot = "/sys/devices/system" + nodeOnline = sysRoot + "/node/online" + cpuOnline = sysRoot + "/cpu/online" + distanceFile = sysRoot + "/node/node%d/distance" + cpulistFile = sysRoot + "/node/node%d/cpulist" + cpuMaxFile = sysRoot + "/cpu/cpu%d/cpufreq/cpuinfo_max_freq" + cpuBaseFile = sysRoot + "/cpu/cpu%d/cpufreq/base_frequency" + cpuSocketFile = sysRoot + "/cpu/cpu%d/topology/physical_package_id" + cpuSiblingFile = sysRoot + "/cpu/cpu%d/topology/thread_siblings_list" + deviceFiles = "/sys/bus/pci/devices" ) // pathReaderFn is a path reader function, injected into all value getters to @@ -135,8 +133,8 @@ func (*Sysfs) discoverCores(st *Topology, readerFunc pathReaderFn) { st.nodeIDs = idset.From[hw.NodeID]([]hw.NodeID{0}) const node = 0 const socket = 0 - - base, cpuMax := discoverCoreSpeeds(core, readerFunc) + cpuMax, _ := getNumeric[hw.KHz](cpuMaxFile, 64, readerFunc, core) + base, _ := getNumeric[hw.KHz](cpuBaseFile, 64, readerFunc, core) st.insert(node, socket, core, Performance, cpuMax, base) st.Nodes = st.nodeIDs.Slice() return nil @@ -153,8 +151,9 @@ func (*Sysfs) discoverCores(st *Topology, readerFunc pathReaderFn) { _ = cores.ForEach(func(core hw.CoreID) error { // best effort, zero values are defaults socket, _ := getNumeric[hw.SocketID](cpuSocketFile, 8, readerFunc, core) + cpuMax, _ := getNumeric[hw.KHz](cpuMaxFile, 64, readerFunc, core) + base, _ := getNumeric[hw.KHz](cpuBaseFile, 64, readerFunc, core) siblings, _ := getIDSet[hw.CoreID](cpuSiblingFile, readerFunc, core) - base, cpuMax := discoverCoreSpeeds(core, readerFunc) // if we get an incorrect core number, this means we're not getting the right // data from SysFS. In this case we bail and set default values. @@ -170,28 +169,6 @@ func (*Sysfs) discoverCores(st *Topology, readerFunc pathReaderFn) { } } -func discoverCoreSpeeds(core hw.CoreID, readerFunc pathReaderFn) (hw.KHz, hw.KHz) { - baseSpeed := hw.KHz(0) - maxSpeed := hw.KHz(0) - - driver, _ := getString(cpuDriverFile, readerFunc, core) - - switch driver { - case "acpi-cpufreq": - // Indicates the highest sustained performance level of the processor - baseSpeedMHz, _ := getNumeric[hw.MHz](cpuCpccNominalFile, 64, readerFunc, core) - baseSpeed = baseSpeedMHz.KHz() - default: - // COMPAT(1.9.x): while the `base_frequency` file is specific to the `intel_pstate` scaling driver, we should - // preserve the default while we may uncover more scaling driver specific implementations. - baseSpeed, _ = getNumeric[hw.KHz](cpuIntelBaseFile, 64, readerFunc, core) - } - - maxSpeed, _ = getNumeric[hw.KHz](cpuMaxFile, 64, readerFunc, core) - - return baseSpeed, maxSpeed -} - func getIDSet[T idset.ID](path string, readerFunc pathReaderFn, args ...any) (*idset.Set[T], error) { path = fmt.Sprintf(path, args...) s, err := readerFunc(path) diff --git a/client/lib/numalib/detect_linux_test.go b/client/lib/numalib/detect_linux_test.go index ceffd02c5c2..e253cacb622 100644 --- a/client/lib/numalib/detect_linux_test.go +++ b/client/lib/numalib/detect_linux_test.go @@ -68,37 +68,6 @@ func goodSysData(path string) ([]byte, error) { }[path], nil } -func goodSysDataAMD(path string) ([]byte, error) { - return map[string][]byte{ - "/sys/devices/system/node/online": []byte("0-1"), - "/sys/devices/system/cpu/online": []byte("0-3"), - "/sys/devices/system/node/node0/distance": []byte("10"), - "/sys/devices/system/node/node0/cpulist": []byte("0-3"), - "/sys/devices/system/node/node1/distance": []byte("10"), - "/sys/devices/system/node/node1/cpulist": []byte("0-3"), - "/sys/devices/system/cpu/cpu0/acpi_cppc/nominal_freq": []byte("2450"), - "/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq": []byte("3500000"), - "/sys/devices/system/cpu/cpu0/cpufreq/scaling_driver": []byte("acpi-cpufreq"), - "/sys/devices/system/cpu/cpu0/topology/physical_package_id": []byte("0"), - "/sys/devices/system/cpu/cpu0/topology/thread_siblings_list": []byte("0,2"), - "/sys/devices/system/cpu/cpu1/acpi_cppc/nominal_freq": []byte("2450"), - "/sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_max_freq": []byte("3500000"), - "/sys/devices/system/cpu/cpu1/cpufreq/scaling_driver": []byte("acpi-cpufreq"), - "/sys/devices/system/cpu/cpu1/topology/physical_package_id": []byte("0"), - "/sys/devices/system/cpu/cpu1/topology/thread_siblings_list": []byte("1,3"), - "/sys/devices/system/cpu/cpu2/acpi_cppc/nominal_freq": []byte("2450"), - "/sys/devices/system/cpu/cpu2/cpufreq/cpuinfo_max_freq": []byte("3500000"), - "/sys/devices/system/cpu/cpu2/cpufreq/scaling_driver": []byte("acpi-cpufreq"), - "/sys/devices/system/cpu/cpu2/topology/physical_package_id": []byte("0"), - "/sys/devices/system/cpu/cpu2/topology/thread_siblings_list": []byte("0,2"), - "/sys/devices/system/cpu/cpu3/acpi_cppc/nominal_freq": []byte("2450"), - "/sys/devices/system/cpu/cpu3/cpufreq/cpuinfo_max_freq": []byte("3500000"), - "/sys/devices/system/cpu/cpu3/cpufreq/scaling_driver": []byte("acpi-cpufreq"), - "/sys/devices/system/cpu/cpu3/topology/physical_package_id": []byte("0"), - "/sys/devices/system/cpu/cpu3/topology/thread_siblings_list": []byte("1,3"), - }[path], nil -} - func TestSysfs_discoverOnline(t *testing.T) { st := MockTopology(&idset.Set[hw.NodeID]{}, SLIT{}, []Core{}) goodIDSet := idset.From[hw.NodeID]([]uint8{0, 1}) @@ -226,44 +195,6 @@ func TestSysfs_discoverCores(t *testing.T) { }, }, }}, - {"two nodes and good sys AMD data", twoNodes, goodSysDataAMD, &Topology{ - nodeIDs: twoNodes, - Nodes: twoNodes.Slice(), - Cores: []Core{ - { - SocketID: 1, - NodeID: 0, - ID: 0, - Grade: Performance, - BaseSpeed: 2450, - MaxSpeed: 3500, - }, - { - SocketID: 1, - NodeID: 0, - ID: 1, - Grade: Performance, - BaseSpeed: 2450, - MaxSpeed: 3500, - }, - { - SocketID: 1, - NodeID: 0, - ID: 2, - Grade: Performance, - BaseSpeed: 2450, - MaxSpeed: 3500, - }, - { - SocketID: 1, - NodeID: 0, - ID: 3, - Grade: Performance, - BaseSpeed: 2450, - MaxSpeed: 3500, - }, - }, - }}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { diff --git a/client/lib/numalib/hw/speeds.go b/client/lib/numalib/hw/speeds.go index c3b1a029253..23bd010a86c 100644 --- a/client/lib/numalib/hw/speeds.go +++ b/client/lib/numalib/hw/speeds.go @@ -16,10 +16,6 @@ func (khz KHz) MHz() MHz { return MHz(khz / 1000) } -func (mhz MHz) KHz() KHz { - return KHz(mhz * 1000) -} - func (khz KHz) String() string { return strconv.FormatUint(uint64(khz.MHz()), 10) } diff --git a/client/meta_endpoint.go b/client/meta_endpoint.go index 5eb0f48daed..85b4dfa10fd 100644 --- a/client/meta_endpoint.go +++ b/client/meta_endpoint.go @@ -8,7 +8,7 @@ import ( "net/http" "time" - metrics "github.com/hashicorp/go-metrics/compat" + "github.com/armon/go-metrics" "github.com/hashicorp/nomad/nomad/structs" ) diff --git a/client/node_updater.go b/client/node_updater.go index 36de5663a94..6fe51cdf56e 100644 --- a/client/node_updater.go +++ b/client/node_updater.go @@ -9,9 +9,7 @@ import ( "sync" "time" - "github.com/hashicorp/go-hclog" "github.com/hashicorp/nomad/client/devicemanager" - hvm "github.com/hashicorp/nomad/client/hostvolumemanager" "github.com/hashicorp/nomad/client/pluginmanager/csimanager" "github.com/hashicorp/nomad/client/pluginmanager/drivermanager" "github.com/hashicorp/nomad/nomad/structs" @@ -32,7 +30,7 @@ func (c *Client) batchFirstFingerprints() { ch, err := c.pluginManagers.WaitForFirstFingerprint(ctx) if err != nil { - c.logger.Warn("failed to batch initial fingerprint updates, switching to incremental updates") + c.logger.Warn("failed to batch initial fingerprint updates, switching to incemental updates") goto SEND_BATCH } @@ -48,13 +46,6 @@ SEND_BATCH: newConfig := c.config.Copy() - // host volume updates - var hostVolChanged bool - c.batchNodeUpdates.batchHostVolumeUpdates(func(name string, vol *structs.ClientHostVolumeConfig) { - hostVolChanged = hvm.UpdateVolumeMap(c.logger.Named("node_updater").With("method", "batchFirstFingerprint"), - newConfig.Node.HostVolumes, name, vol) - }) - // csi updates var csiChanged bool c.batchNodeUpdates.batchCSIUpdates(func(name string, info *structs.CSIInfo) { @@ -94,7 +85,7 @@ SEND_BATCH: }) // only update the node if changes occurred - if driverChanged || devicesChanged || csiChanged || hostVolChanged { + if driverChanged || devicesChanged || csiChanged { c.config = newConfig c.updateNode() } @@ -132,24 +123,6 @@ func (c *Client) updateNodeFromCSI(name string, info *structs.CSIInfo) { } } -func (c *Client) updateNodeFromHostVol(name string, vol *structs.ClientHostVolumeConfig) { - c.configLock.Lock() - defer c.configLock.Unlock() - - newConfig := c.config.Copy() - - if newConfig.Node.HostVolumes == nil { - newConfig.Node.HostVolumes = make(map[string]*structs.ClientHostVolumeConfig) - } - - changed := hvm.UpdateVolumeMap(c.logger.Named("node_updater").With("method", "updateNodeFromHostVol"), - newConfig.Node.HostVolumes, name, vol) - if changed { - c.config = newConfig - c.updateNode() - } -} - // updateNodeFromCSIControllerLocked makes the changes to the node from a csi // update but does not send the update to the server. c.configLock must be held // before calling this func. @@ -345,8 +318,6 @@ func (c *Client) updateNodeFromDevicesLocked(devices []*structs.NodeDeviceResour // Once ready, the batches can be flushed and toggled to stop batching and forward // all updates to a configured callback to be performed incrementally type batchNodeUpdates struct { - logger hclog.Logger - // access to driver fields must hold driversMu lock drivers map[string]*structs.DriverInfo driversBatched bool @@ -365,22 +336,14 @@ type batchNodeUpdates struct { csiBatched bool csiCB csimanager.UpdateNodeCSIInfoFunc csiMu sync.Mutex - - hostVolumes hvm.VolumeMap - hostVolumesBatched bool - hostVolumeCB hvm.HostVolumeNodeUpdater - hostVolumeMu sync.Mutex } func newBatchNodeUpdates( - logger hclog.Logger, driverCB drivermanager.UpdateNodeDriverInfoFn, devicesCB devicemanager.UpdateNodeDevicesFn, - csiCB csimanager.UpdateNodeCSIInfoFunc, - hostVolumeCB hvm.HostVolumeNodeUpdater) *batchNodeUpdates { + csiCB csimanager.UpdateNodeCSIInfoFunc) *batchNodeUpdates { return &batchNodeUpdates{ - logger: logger, drivers: make(map[string]*structs.DriverInfo), driverCB: driverCB, devices: []*structs.NodeDeviceResource{}, @@ -388,37 +351,9 @@ func newBatchNodeUpdates( csiNodePlugins: make(map[string]*structs.CSIInfo), csiControllerPlugins: make(map[string]*structs.CSIInfo), csiCB: csiCB, - hostVolumes: make(hvm.VolumeMap), - hostVolumeCB: hostVolumeCB, } } -// this is the one that the volume manager runs -func (b *batchNodeUpdates) updateNodeFromHostVolume(name string, vol *structs.ClientHostVolumeConfig) { - b.hostVolumeMu.Lock() - defer b.hostVolumeMu.Unlock() - if b.hostVolumesBatched { - b.hostVolumeCB(name, vol) // => Client.updateNodeFromHostVol() - return - } - hvm.UpdateVolumeMap(b.logger.Named("node_updater").With("method", "updateNodeFromHostVolume"), - b.hostVolumes, name, vol) -} - -// this one runs on client start -func (b *batchNodeUpdates) batchHostVolumeUpdates(f hvm.HostVolumeNodeUpdater) error { - b.hostVolumeMu.Lock() - defer b.hostVolumeMu.Unlock() - if b.hostVolumesBatched { - return fmt.Errorf("host volume updates already batched") - } - b.hostVolumesBatched = true - for name, vol := range b.hostVolumes { - f(name, vol) // => c.batchNodeUpdates.batchHostVolumeUpdates(FUNC - } - return nil -} - // updateNodeFromCSI implements csimanager.UpdateNodeCSIInfoFunc and is used in // the csi manager to send csi fingerprints to the server. func (b *batchNodeUpdates) updateNodeFromCSI(plugin string, info *structs.CSIInfo) { diff --git a/client/pluginmanager/csimanager/interface.go b/client/pluginmanager/csimanager/interface.go index 31dc8165ef0..85f7e78ae97 100644 --- a/client/pluginmanager/csimanager/interface.go +++ b/client/pluginmanager/csimanager/interface.go @@ -29,8 +29,8 @@ func (mi *MountInfo) Copy() *MountInfo { type UsageOptions struct { ReadOnly bool - AttachmentMode structs.VolumeAttachmentMode - AccessMode structs.VolumeAccessMode + AttachmentMode structs.CSIVolumeAttachmentMode + AccessMode structs.CSIVolumeAccessMode MountOptions *structs.CSIMountOptions } diff --git a/client/rpc.go b/client/rpc.go index 9d344111945..aeba5ee62c7 100644 --- a/client/rpc.go +++ b/client/rpc.go @@ -11,7 +11,7 @@ import ( "strings" "time" - metrics "github.com/hashicorp/go-metrics/compat" + metrics "github.com/armon/go-metrics" "github.com/hashicorp/go-msgpack/v2/codec" "github.com/hashicorp/nomad/client/servers" "github.com/hashicorp/nomad/helper" @@ -28,7 +28,6 @@ type rpcEndpoints struct { Allocations *Allocations Agent *Agent NodeMeta *NodeMeta - HostVolume *HostVolume } // ClientRPC is used to make a local, client only RPC call @@ -302,7 +301,6 @@ func (c *Client) setupClientRpc(rpcs map[string]interface{}) { c.endpoints.Allocations = NewAllocationsEndpoint(c) c.endpoints.Agent = NewAgentEndpoint(c) c.endpoints.NodeMeta = newNodeMetaEndpoint(c) - c.endpoints.HostVolume = newHostVolumesEndpoint(c) c.setupClientRpcServer(c.rpcServer) } @@ -318,7 +316,6 @@ func (c *Client) setupClientRpcServer(server *rpc.Server) { server.Register(c.endpoints.Allocations) server.Register(c.endpoints.Agent) server.Register(c.endpoints.NodeMeta) - server.Register(c.endpoints.HostVolume) } // rpcConnListener is a long lived function that listens for new connections diff --git a/client/state/db_bolt.go b/client/state/db_bolt.go index bef111f6e9a..2471cda3d14 100644 --- a/client/state/db_bolt.go +++ b/client/state/db_bolt.go @@ -138,8 +138,6 @@ var ( // nodeRegistrationKey is the key at which node registration data is stored. nodeRegistrationKey = []byte("node_registration") - - hostVolBucket = []byte("host_volumes_to_create") ) // taskBucketName returns the bucket name for the given task name. @@ -1050,45 +1048,6 @@ func (s *BoltStateDB) GetNodeRegistration() (*cstructs.NodeRegistration, error) return ®, err } -func (s *BoltStateDB) PutDynamicHostVolume(vol *cstructs.HostVolumeState) error { - return s.db.Update(func(tx *boltdd.Tx) error { - b, err := tx.CreateBucketIfNotExists(hostVolBucket) - if err != nil { - return err - } - return b.Put([]byte(vol.ID), vol) - }) -} - -func (s *BoltStateDB) GetDynamicHostVolumes() ([]*cstructs.HostVolumeState, error) { - var vols []*cstructs.HostVolumeState - err := s.db.View(func(tx *boltdd.Tx) error { - b := tx.Bucket(hostVolBucket) - if b == nil { - return nil - } - return b.BoltBucket().ForEach(func(k, v []byte) error { - var vol cstructs.HostVolumeState - err := b.Get(k, &vol) - if err != nil { - return err - } - vols = append(vols, &vol) - return nil - }) - }) - if boltdd.IsErrNotFound(err) { - return nil, nil - } - return vols, err -} - -func (s *BoltStateDB) DeleteDynamicHostVolume(id string) error { - return s.db.Update(func(tx *boltdd.Tx) error { - return tx.Bucket(hostVolBucket).Delete([]byte(id)) - }) -} - // init initializes metadata entries in a newly created state database. func (s *BoltStateDB) init() error { return s.db.Update(func(tx *boltdd.Tx) error { diff --git a/client/state/db_error.go b/client/state/db_error.go index 6c99defa2ad..78ef01b7850 100644 --- a/client/state/db_error.go +++ b/client/state/db_error.go @@ -4,7 +4,6 @@ package state import ( - "errors" "fmt" arstate "github.com/hashicorp/nomad/client/allocrunner/state" @@ -17,10 +16,6 @@ import ( "github.com/hashicorp/nomad/nomad/structs" ) -var _ StateDB = &ErrDB{} - -var ErrDBError = errors.New("Error!") - // ErrDB implements a StateDB that returns errors on restore methods, used for testing type ErrDB struct { // Allocs is a preset slice of allocations used in GetAllAllocations @@ -159,16 +154,6 @@ func (m *ErrDB) GetNodeRegistration() (*cstructs.NodeRegistration, error) { return nil, fmt.Errorf("Error!") } -func (m *ErrDB) PutDynamicHostVolume(_ *cstructs.HostVolumeState) error { - return ErrDBError -} -func (m *ErrDB) GetDynamicHostVolumes() ([]*cstructs.HostVolumeState, error) { - return nil, ErrDBError -} -func (m *ErrDB) DeleteDynamicHostVolume(_ string) error { - return ErrDBError -} - func (m *ErrDB) Close() error { return fmt.Errorf("Error!") } diff --git a/client/state/db_mem.go b/client/state/db_mem.go index 32abd883e77..91e6481b4c9 100644 --- a/client/state/db_mem.go +++ b/client/state/db_mem.go @@ -60,8 +60,6 @@ type MemDB struct { nodeRegistration *cstructs.NodeRegistration - dynamicHostVolumes map[string]*cstructs.HostVolumeState - logger hclog.Logger mu sync.RWMutex @@ -70,16 +68,15 @@ type MemDB struct { func NewMemDB(logger hclog.Logger) *MemDB { logger = logger.Named("memdb") return &MemDB{ - allocs: make(map[string]*structs.Allocation), - deployStatus: make(map[string]*structs.AllocDeploymentStatus), - networkStatus: make(map[string]*structs.AllocNetworkStatus), - acknowledgedState: make(map[string]*arstate.State), - localTaskState: make(map[string]map[string]*state.LocalState), - taskState: make(map[string]map[string]*structs.TaskState), - checks: make(checks.ClientResults), - identities: make(map[string][]*structs.SignedWorkloadIdentity), - dynamicHostVolumes: make(map[string]*cstructs.HostVolumeState), - logger: logger, + allocs: make(map[string]*structs.Allocation), + deployStatus: make(map[string]*structs.AllocDeploymentStatus), + networkStatus: make(map[string]*structs.AllocNetworkStatus), + acknowledgedState: make(map[string]*arstate.State), + localTaskState: make(map[string]map[string]*state.LocalState), + taskState: make(map[string]map[string]*structs.TaskState), + checks: make(checks.ClientResults), + identities: make(map[string][]*structs.SignedWorkloadIdentity), + logger: logger, } } @@ -357,28 +354,6 @@ func (m *MemDB) GetNodeRegistration() (*cstructs.NodeRegistration, error) { return m.nodeRegistration, nil } -func (m *MemDB) PutDynamicHostVolume(vol *cstructs.HostVolumeState) error { - m.mu.Lock() - defer m.mu.Unlock() - m.dynamicHostVolumes[vol.ID] = vol - return nil -} -func (m *MemDB) GetDynamicHostVolumes() ([]*cstructs.HostVolumeState, error) { - m.mu.Lock() - defer m.mu.Unlock() - var vols []*cstructs.HostVolumeState - for _, vol := range m.dynamicHostVolumes { - vols = append(vols, vol) - } - return vols, nil -} -func (m *MemDB) DeleteDynamicHostVolume(s string) error { - m.mu.Lock() - defer m.mu.Unlock() - delete(m.dynamicHostVolumes, s) - return nil -} - func (m *MemDB) Close() error { m.mu.Lock() defer m.mu.Unlock() diff --git a/client/state/db_noop.go b/client/state/db_noop.go index 09488c181a1..345025a4d52 100644 --- a/client/state/db_noop.go +++ b/client/state/db_noop.go @@ -14,8 +14,6 @@ import ( "github.com/hashicorp/nomad/nomad/structs" ) -var _ StateDB = &NoopDB{} - // NoopDB implements a StateDB that does not persist any data. type NoopDB struct{} @@ -147,16 +145,6 @@ func (n NoopDB) GetNodeRegistration() (*cstructs.NodeRegistration, error) { return nil, nil } -func (n NoopDB) PutDynamicHostVolume(_ *cstructs.HostVolumeState) error { - return nil -} -func (n NoopDB) GetDynamicHostVolumes() ([]*cstructs.HostVolumeState, error) { - return nil, nil -} -func (n NoopDB) DeleteDynamicHostVolume(_ string) error { - return nil -} - func (n NoopDB) Close() error { return nil } diff --git a/client/state/db_test.go b/client/state/db_test.go index 3a03cf3a2cc..d13431a6207 100644 --- a/client/state/db_test.go +++ b/client/state/db_test.go @@ -15,7 +15,6 @@ import ( dmstate "github.com/hashicorp/nomad/client/devicemanager/state" "github.com/hashicorp/nomad/client/dynamicplugins" driverstate "github.com/hashicorp/nomad/client/pluginmanager/drivermanager/state" - cstructs "github.com/hashicorp/nomad/client/structs" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" @@ -385,41 +384,6 @@ func TestStateDB_DynamicRegistry(t *testing.T) { }) } -// TestStateDB_HostVolumes asserts the behavior of dynamic host volume state. -func TestStateDB_HostVolumes(t *testing.T) { - ci.Parallel(t) - - testDB(t, func(t *testing.T, db StateDB) { - vols, err := db.GetDynamicHostVolumes() - must.NoError(t, err) - must.Len(t, 0, vols) - - vol := &cstructs.HostVolumeState{ - ID: "test-vol-id", - CreateReq: &cstructs.ClientHostVolumeCreateRequest{ - ID: "test-vol-id", - Name: "test-vol-name", - PluginID: "test-plugin-id", - NodeID: "test-node-id", - RequestedCapacityMinBytes: 5, - RequestedCapacityMaxBytes: 10, - Parameters: map[string]string{"test": "ing"}, - }, - } - - must.NoError(t, db.PutDynamicHostVolume(vol)) - vols, err = db.GetDynamicHostVolumes() - must.NoError(t, err) - must.Len(t, 1, vols) - must.Eq(t, vol, vols[0]) - - must.NoError(t, db.DeleteDynamicHostVolume(vol.ID)) - vols, err = db.GetDynamicHostVolumes() - must.NoError(t, err) - must.Len(t, 0, vols) - }) -} - func TestStateDB_CheckResult_keyForCheck(t *testing.T) { ci.Parallel(t) diff --git a/client/state/interface.go b/client/state/interface.go index 0460a75e20f..a9cd4845038 100644 --- a/client/state/interface.go +++ b/client/state/interface.go @@ -137,10 +137,6 @@ type StateDB interface { PutNodeRegistration(*cstructs.NodeRegistration) error GetNodeRegistration() (*cstructs.NodeRegistration, error) - PutDynamicHostVolume(*cstructs.HostVolumeState) error - GetDynamicHostVolumes() ([]*cstructs.HostVolumeState, error) - DeleteDynamicHostVolume(string) error - // Close the database. Unsafe for further use after calling regardless // of return value. Close() error diff --git a/client/structs/csi.go b/client/structs/csi.go index 10b74a23c71..4078e0ac949 100644 --- a/client/structs/csi.go +++ b/client/structs/csi.go @@ -65,8 +65,8 @@ type ClientCSIControllerValidateVolumeRequest struct { // COMPAT(1.1.1): the AttachmentMode and AccessMode fields are deprecated // and replaced by the VolumeCapabilities field above - AttachmentMode structs.VolumeAttachmentMode - AccessMode structs.VolumeAccessMode + AttachmentMode structs.CSIVolumeAttachmentMode + AccessMode structs.CSIVolumeAccessMode // Parameters as returned by storage provider in CreateVolumeResponse. // This field is optional. @@ -117,10 +117,10 @@ type ClientCSIControllerAttachVolumeRequest struct { // AttachmentMode indicates how the volume should be attached and mounted into // a task. - AttachmentMode structs.VolumeAttachmentMode + AttachmentMode structs.CSIVolumeAttachmentMode // AccessMode indicates the desired concurrent access model for the volume - AccessMode structs.VolumeAccessMode + AccessMode structs.CSIVolumeAccessMode // MountOptions is an optional field that contains additional configuration // when providing an AttachmentMode of CSIVolumeAttachmentModeFilesystem @@ -449,8 +449,8 @@ type ClientCSINodeDetachVolumeRequest struct { // These fields should match the original volume request so that // we can find the mount points on the client - AttachmentMode structs.VolumeAttachmentMode - AccessMode structs.VolumeAccessMode + AttachmentMode structs.CSIVolumeAttachmentMode + AccessMode structs.CSIVolumeAccessMode ReadOnly bool } diff --git a/client/structs/host_volumes.go b/client/structs/host_volumes.go deleted file mode 100644 index 3e97e9c99bb..00000000000 --- a/client/structs/host_volumes.go +++ /dev/null @@ -1,115 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package structs - -type HostVolumeState struct { - ID string - HostPath string - CreateReq *ClientHostVolumeCreateRequest -} - -type ClientHostVolumeCreateRequest struct { - // ID is a UUID-like string generated by the server. - ID string - - // Name is the name that group.volume will use to identify the volume - // source. Not expected to be unique. - Name string - - // PluginID is the name of the host volume plugin on the client that will be - // used for creating the volume. If omitted, the client will use its default - // built-in plugin. - PluginID string - - // Namespace is the Nomad namespace for the volume. - // It's in the client RPC to be included in plugin execution environment. - Namespace string - - // NodeID is the node where the volume is placed. It's included in the - // client RPC request so that the server can route the request to the - // correct node. - NodeID string - - // Because storage may allow only specific intervals of size, we accept a - // min and max and return the actual capacity when the volume is created or - // updated on the client - RequestedCapacityMinBytes int64 - RequestedCapacityMaxBytes int64 - - // Parameters are an opaque map of parameters for the host volume plugin. - Parameters map[string]string -} - -type ClientHostVolumeCreateResponse struct { - VolumeName string - VolumeID string - - // HostPath is the host path where the volume's mount point was created. We - // send this back to the server to make debugging easier. - HostPath string - - // Capacity is the size in bytes that was actually provisioned by the host - // volume plugin. - CapacityBytes int64 -} - -type ClientHostVolumeRegisterRequest struct { - // ID is a UUID-like string generated by the server. - ID string - - // Name is the name that group.volume will use to identify the volume - // source. Not expected to be unique cluster-wide, but must be unique per - // node. - Name string - - // NodeID is the node where the volume is placed. It's included in the - // client RPC request so that the server can route the request to the - // correct node. - NodeID string - - // HostPath is the host path where the volume's mount point was created - // out-of-band. - HostPath string - - // Capacity is the size in bytes that was provisioned out-of-band. - CapacityBytes int64 - - // Parameters are an opaque map of parameters for the host volume plugin. - Parameters map[string]string -} - -type ClientHostVolumeRegisterResponse struct{} - -type ClientHostVolumeDeleteRequest struct { - // ID is a UUID-like string generated by the server. - ID string - - Name string - - // PluginID is the name of the host volume plugin on the client that will be - // used for deleting the volume. If omitted, the client will use its default - // built-in plugin. - PluginID string - - // Namespace is the Nomad namespace for the volume. - // It's in the client RPC to be included in plugin execution environment. - Namespace string - - // NodeID is the node where the volume is placed. It's included in the - // client RPC request so that the server can route the request to the - // correct node. - NodeID string - - // HostPath is the host path where the volume's mount point was created. - // We send this from the server to allow verification by plugins. - HostPath string - - // Parameters are an opaque map of parameters for the host volume plugin. - Parameters map[string]string -} - -type ClientHostVolumeDeleteResponse struct { - VolumeName string - VolumeID string -} diff --git a/client/vaultclient/vaultclient.go b/client/vaultclient/vaultclient.go index f83bf13c2dd..577b0648e33 100644 --- a/client/vaultclient/vaultclient.go +++ b/client/vaultclient/vaultclient.go @@ -13,8 +13,8 @@ import ( "sync" "time" + metrics "github.com/armon/go-metrics" hclog "github.com/hashicorp/go-hclog" - metrics "github.com/hashicorp/go-metrics/compat" "github.com/hashicorp/nomad/helper/useragent" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/nomad/structs/config" diff --git a/command/agent/agent.go b/command/agent/agent.go index d9bbe78c0d3..82be670c4e0 100644 --- a/command/agent/agent.go +++ b/command/agent/agent.go @@ -16,10 +16,10 @@ import ( "sync" "time" + metrics "github.com/armon/go-metrics" "github.com/dustin/go-humanize" consulapi "github.com/hashicorp/consul/api" log "github.com/hashicorp/go-hclog" - metrics "github.com/hashicorp/go-metrics/compat" uuidparse "github.com/hashicorp/go-uuid" "github.com/hashicorp/nomad/client" clientconfig "github.com/hashicorp/nomad/client/config" @@ -724,8 +724,6 @@ func convertClientConfig(agentConfig *Config) (*clientconfig.Config, error) { if agentConfig.DataDir != "" { conf.StateDir = filepath.Join(agentConfig.DataDir, "client") conf.AllocDir = filepath.Join(agentConfig.DataDir, "alloc") - conf.HostVolumesDir = filepath.Join(agentConfig.DataDir, "host_volumes") - conf.HostVolumePluginDir = filepath.Join(agentConfig.DataDir, "host_volume_plugins") dataParent := filepath.Dir(agentConfig.DataDir) conf.AllocMountsDir = filepath.Join(dataParent, "alloc_mounts") } @@ -738,12 +736,6 @@ func convertClientConfig(agentConfig *Config) (*clientconfig.Config, error) { if agentConfig.Client.AllocMountsDir != "" { conf.AllocMountsDir = agentConfig.Client.AllocMountsDir } - if agentConfig.Client.HostVolumePluginDir != "" { - conf.HostVolumePluginDir = agentConfig.Client.HostVolumePluginDir - } - if agentConfig.Client.HostVolumesDir != "" { - conf.HostVolumesDir = agentConfig.Client.HostVolumesDir - } if agentConfig.Client.NetworkInterface != "" { conf.NetworkInterface = agentConfig.Client.NetworkInterface } diff --git a/command/agent/bindata_assetfs.go b/command/agent/bindata_assetfs.go index 866050bcf53..b6838516864 100644 --- a/command/agent/bindata_assetfs.go +++ b/command/agent/bindata_assetfs.go @@ -31,12 +31,12 @@ // ui/dist/robots.txt // DO NOT EDIT! -//go:build ui // +build ui package agent import ( + "github.com/elazarl/go-bindata-assetfs" "bytes" "compress/gzip" "fmt" @@ -46,8 +46,6 @@ import ( "path/filepath" "strings" "time" - - assetfs "github.com/elazarl/go-bindata-assetfs" ) func bindataRead(data []byte, name string) ([]byte, error) { @@ -733,8 +731,8 @@ func AssetNames() []string { // _bindata is a table, holding each asset generator, mapped to its name. var _bindata = map[string]func() (*asset, error){ - "dist/assets/chunk.265.d2d6721dbed577d51f9b.js": distAssetsChunk265D2d6721dbed577d51f9bJs, - "dist/assets/chunk.280.f72db67a6c61f41fec51.js": distAssetsChunk280F72db67a6c61f41fec51Js, + "dist/assets/chunk.265.d2d6721dbed577d51f9b.js": distAssetsChunk265D2d6721dbed577d51f9bJs, + "dist/assets/chunk.280.f72db67a6c61f41fec51.js": distAssetsChunk280F72db67a6c61f41fec51Js, "dist/assets/chunk.280.f72db67a6c61f41fec51.js.LICENSE.txt": distAssetsChunk280F72db67a6c61f41fec51JsLicenseTxt, "dist/assets/chunk.50.30654845e67b2aa5686e.js": distAssetsChunk5030654845e67b2aa5686eJs, "dist/assets/chunk.50.30654845e67b2aa5686e.js.LICENSE.txt": distAssetsChunk5030654845e67b2aa5686eJsLicenseTxt, @@ -768,13 +766,11 @@ var _bindata = map[string]func() (*asset, error){ // directory embedded in the file by go-bindata. // For example if you run go-bindata on data/... and data contains the // following hierarchy: -// -// data/ -// foo.txt -// img/ -// a.png -// b.png -// +// data/ +// foo.txt +// img/ +// a.png +// b.png // then AssetDir("data") would return []string{"foo.txt", "img"} // AssetDir("data/img") would return []string{"a.png", "b.png"} // AssetDir("foo.txt") and AssetDir("notexist") would return an error @@ -805,12 +801,11 @@ type bintree struct { Func func() (*asset, error) Children map[string]*bintree } - var _bintree = &bintree{nil, map[string]*bintree{ "dist": &bintree{nil, map[string]*bintree{ "assets": &bintree{nil, map[string]*bintree{ - "chunk.265.d2d6721dbed577d51f9b.js": &bintree{distAssetsChunk265D2d6721dbed577d51f9bJs, map[string]*bintree{}}, - "chunk.280.f72db67a6c61f41fec51.js": &bintree{distAssetsChunk280F72db67a6c61f41fec51Js, map[string]*bintree{}}, + "chunk.265.d2d6721dbed577d51f9b.js": &bintree{distAssetsChunk265D2d6721dbed577d51f9bJs, map[string]*bintree{}}, + "chunk.280.f72db67a6c61f41fec51.js": &bintree{distAssetsChunk280F72db67a6c61f41fec51Js, map[string]*bintree{}}, "chunk.280.f72db67a6c61f41fec51.js.LICENSE.txt": &bintree{distAssetsChunk280F72db67a6c61f41fec51JsLicenseTxt, map[string]*bintree{}}, "chunk.50.30654845e67b2aa5686e.js": &bintree{distAssetsChunk5030654845e67b2aa5686eJs, map[string]*bintree{}}, "chunk.50.30654845e67b2aa5686e.js.LICENSE.txt": &bintree{distAssetsChunk5030654845e67b2aa5686eJsLicenseTxt, map[string]*bintree{}}, @@ -825,20 +820,20 @@ var _bintree = &bintree{nil, map[string]*bintree{ "vendor-f7a0120fbbcca50536685e8ba157894a.js": &bintree{distAssetsVendorF7a0120fbbcca50536685e8ba157894aJs, map[string]*bintree{}}, }}, "crossdomain.xml": &bintree{distCrossdomainXml, map[string]*bintree{}}, - "favicon.ico": &bintree{distFaviconIco, map[string]*bintree{}}, + "favicon.ico": &bintree{distFaviconIco, map[string]*bintree{}}, "images": &bintree{nil, map[string]*bintree{ "icons": &bintree{nil, map[string]*bintree{ - "boot.svg": &bintree{distImagesIconsBootSvg, map[string]*bintree{}}, - "box.svg": &bintree{distImagesIconsBoxSvg, map[string]*bintree{}}, - "cancel.svg": &bintree{distImagesIconsCancelSvg, map[string]*bintree{}}, - "clock.svg": &bintree{distImagesIconsClockSvg, map[string]*bintree{}}, - "console.svg": &bintree{distImagesIconsConsoleSvg, map[string]*bintree{}}, - "history.svg": &bintree{distImagesIconsHistorySvg, map[string]*bintree{}}, - "media-pause.svg": &bintree{distImagesIconsMediaPauseSvg, map[string]*bintree{}}, - "media-play.svg": &bintree{distImagesIconsMediaPlaySvg, map[string]*bintree{}}, + "boot.svg": &bintree{distImagesIconsBootSvg, map[string]*bintree{}}, + "box.svg": &bintree{distImagesIconsBoxSvg, map[string]*bintree{}}, + "cancel.svg": &bintree{distImagesIconsCancelSvg, map[string]*bintree{}}, + "clock.svg": &bintree{distImagesIconsClockSvg, map[string]*bintree{}}, + "console.svg": &bintree{distImagesIconsConsoleSvg, map[string]*bintree{}}, + "history.svg": &bintree{distImagesIconsHistorySvg, map[string]*bintree{}}, + "media-pause.svg": &bintree{distImagesIconsMediaPauseSvg, map[string]*bintree{}}, + "media-play.svg": &bintree{distImagesIconsMediaPlaySvg, map[string]*bintree{}}, "node-init-circle-fill.svg": &bintree{distImagesIconsNodeInitCircleFillSvg, map[string]*bintree{}}, - "nomad-logo-n.svg": &bintree{distImagesIconsNomadLogoNSvg, map[string]*bintree{}}, - "search.svg": &bintree{distImagesIconsSearchSvg, map[string]*bintree{}}, + "nomad-logo-n.svg": &bintree{distImagesIconsNomadLogoNSvg, map[string]*bintree{}}, + "search.svg": &bintree{distImagesIconsSearchSvg, map[string]*bintree{}}, }}, }}, "index.html": &bintree{distIndexHtml, map[string]*bintree{}}, @@ -893,6 +888,7 @@ func _filePath(dir, name string) string { return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...) } + func assetFS() *assetfs.AssetFS { assetInfo := func(path string) (os.FileInfo, error) { return os.Stat(path) diff --git a/command/agent/command.go b/command/agent/command.go index d0f354edce2..6889ce0cb4e 100644 --- a/command/agent/command.go +++ b/command/agent/command.go @@ -19,14 +19,14 @@ import ( "syscall" "time" + metrics "github.com/armon/go-metrics" + "github.com/armon/go-metrics/circonus" + "github.com/armon/go-metrics/datadog" + "github.com/armon/go-metrics/prometheus" "github.com/hashicorp/cli" checkpoint "github.com/hashicorp/go-checkpoint" discover "github.com/hashicorp/go-discover" hclog "github.com/hashicorp/go-hclog" - metrics "github.com/hashicorp/go-metrics/compat" - "github.com/hashicorp/go-metrics/compat/circonus" - "github.com/hashicorp/go-metrics/compat/datadog" - "github.com/hashicorp/go-metrics/compat/prometheus" gsyslog "github.com/hashicorp/go-syslog" "github.com/hashicorp/nomad/helper" flaghelper "github.com/hashicorp/nomad/helper/flags" @@ -108,8 +108,6 @@ func (c *Command) readConfig() *Config { flags.StringVar(&cmdConfig.Client.StateDir, "state-dir", "", "") flags.StringVar(&cmdConfig.Client.AllocDir, "alloc-dir", "", "") flags.StringVar(&cmdConfig.Client.AllocMountsDir, "alloc-mounts-dir", "", "") - flags.StringVar(&cmdConfig.Client.HostVolumesDir, "host-volumes-dir", "", "") - flags.StringVar(&cmdConfig.Client.HostVolumePluginDir, "host-volume-plugin-dir", "", "") flags.StringVar(&cmdConfig.Client.NodeClass, "node-class", "", "") flags.StringVar(&cmdConfig.Client.NodePool, "node-pool", "", "") flags.StringVar(&servers, "servers", "", "") @@ -383,13 +381,11 @@ func (c *Command) IsValidConfig(config, cmdConfig *Config) bool { // Verify the paths are absolute. dirs := map[string]string{ - "data-dir": config.DataDir, - "plugin-dir": config.PluginDir, - "alloc-dir": config.Client.AllocDir, - "alloc-mounts-dir": config.Client.AllocMountsDir, - "host-volumes-dir": config.Client.HostVolumesDir, - "host-volume-plugin-dir": config.Client.HostVolumePluginDir, - "state-dir": config.Client.StateDir, + "data-dir": config.DataDir, + "plugin-dir": config.PluginDir, + "alloc-dir": config.Client.AllocDir, + "alloc-mounts-dir": config.Client.AllocMountsDir, + "state-dir": config.Client.StateDir, } for k, dir := range dirs { if dir == "" { @@ -734,7 +730,6 @@ func (c *Command) AutocompleteFlags() complete.Flags { "-region": complete.PredictAnything, "-data-dir": complete.PredictDirs("*"), "-plugin-dir": complete.PredictDirs("*"), - "-host-volume-plugin-dir": complete.PredictDirs("*"), "-dc": complete.PredictAnything, "-log-level": complete.PredictAnything, "-json-logs": complete.PredictNothing, @@ -1565,14 +1560,6 @@ Client Options: The default speed for network interfaces in MBits if the link speed can not be determined dynamically. - -host-volumes-dir - Directory wherein host volume plugins should place volumes. The default is - /host_volumes. - - -host-volume-plugin-dir - Directory containing dynamic host volume plugins. The default is - /host_volume_plugins. - ACL Options: -acl-enabled diff --git a/command/agent/config.go b/command/agent/config.go index 1e2dbb3c57c..5b949cc68d3 100644 --- a/command/agent/config.go +++ b/command/agent/config.go @@ -229,13 +229,6 @@ type ClientConfig struct { // AllocMountsDir is the directory for storing mounts into allocation data AllocMountsDir string `hcl:"alloc_mounts_dir"` - // HostVolumesDir is the suggested directory for plugins to put volumes. - // Volume plugins may ignore this suggestion, but we provide this default. - HostVolumesDir string `hcl:"host_volumes_dir"` - - // HostVolumePluginDir directory contains dynamic host volume plugins - HostVolumePluginDir string `hcl:"host_volume_plugin_dir"` - // Servers is a list of known server addresses. These are as "host:port" Servers []string `hcl:"servers"` @@ -2326,12 +2319,6 @@ func (a *ClientConfig) Merge(b *ClientConfig) *ClientConfig { if b.AllocMountsDir != "" { result.AllocMountsDir = b.AllocMountsDir } - if b.HostVolumesDir != "" { - result.HostVolumesDir = b.HostVolumesDir - } - if b.HostVolumePluginDir != "" { - result.HostVolumePluginDir = b.HostVolumePluginDir - } if b.NodeClass != "" { result.NodeClass = b.NodeClass } diff --git a/command/agent/consul/service_client.go b/command/agent/consul/service_client.go index 6589a6d4116..f5c18e452b9 100644 --- a/command/agent/consul/service_client.go +++ b/command/agent/consul/service_client.go @@ -19,9 +19,9 @@ import ( "sync/atomic" "time" + "github.com/armon/go-metrics" "github.com/hashicorp/consul/api" "github.com/hashicorp/go-hclog" - metrics "github.com/hashicorp/go-metrics/compat" "github.com/hashicorp/go-multierror" "github.com/hashicorp/go-set/v3" "github.com/hashicorp/nomad/client/serviceregistration" diff --git a/command/agent/csi_endpoint.go b/command/agent/csi_endpoint.go index 325ce5bb6d0..2f57aef8865 100644 --- a/command/agent/csi_endpoint.go +++ b/command/agent/csi_endpoint.go @@ -20,12 +20,22 @@ func (s *HTTPServer) CSIVolumesRequest(resp http.ResponseWriter, req *http.Reque return nil, CodedError(405, ErrInvalidMethod) } + // Type filters volume lists to a specific type. When support for non-CSI volumes is + // introduced, we'll need to dispatch here + query := req.URL.Query() + qtype, ok := query["type"] + if !ok { + return []*structs.CSIVolListStub{}, nil + } + if qtype[0] != "csi" { + return nil, nil + } + args := structs.CSIVolumeListRequest{} if s.parse(resp, req, &args.Region, &args.QueryOptions) { return nil, nil } - query := req.URL.Query() args.Prefix = query.Get("prefix") args.PluginID = query.Get("plugin_id") args.NodeID = query.Get("node_id") diff --git a/command/agent/host_volume_endpoint.go b/command/agent/host_volume_endpoint.go deleted file mode 100644 index db12cca929f..00000000000 --- a/command/agent/host_volume_endpoint.go +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package agent - -import ( - "net/http" - "strings" - - "github.com/hashicorp/nomad/nomad/structs" -) - -func (s *HTTPServer) HostVolumesListRequest(resp http.ResponseWriter, req *http.Request) (any, error) { - args := structs.HostVolumeListRequest{} - if s.parse(resp, req, &args.Region, &args.QueryOptions) { - return nil, nil - } - - query := req.URL.Query() - args.Prefix = query.Get("prefix") - args.NodePool = query.Get("node_pool") - args.NodeID = query.Get("node_id") - - var out structs.HostVolumeListResponse - if err := s.agent.RPC("HostVolume.List", &args, &out); err != nil { - return nil, err - } - - setMeta(resp, &out.QueryMeta) - return out.Volumes, nil -} - -// HostVolumeSpecificRequest dispatches GET and PUT -func (s *HTTPServer) HostVolumeSpecificRequest(resp http.ResponseWriter, req *http.Request) (any, error) { - // Tokenize the suffix of the path to get the volume id, tolerating a - // present or missing trailing slash - reqSuffix := strings.TrimPrefix(req.URL.Path, "/v1/volume/host/") - tokens := strings.FieldsFunc(reqSuffix, func(c rune) bool { return c == '/' }) - - if len(tokens) == 0 { - return nil, CodedError(404, resourceNotFoundErr) - } - - switch req.Method { - - // PUT /v1/volume/host/create - // POST /v1/volume/host/create - // PUT /v1/volume/host/register - // POST /v1/volume/host/register - case http.MethodPut, http.MethodPost: - switch tokens[0] { - case "create", "": - return s.hostVolumeCreate(resp, req) - case "register": - return s.hostVolumeRegister(resp, req) - default: - return nil, CodedError(404, resourceNotFoundErr) - } - - // DELETE /v1/volume/host/:id - case http.MethodDelete: - return s.hostVolumeDelete(tokens[0], resp, req) - - // GET /v1/volume/host/:id - case http.MethodGet: - return s.hostVolumeGet(tokens[0], resp, req) - } - - return nil, CodedError(404, resourceNotFoundErr) -} - -func (s *HTTPServer) hostVolumeGet(id string, resp http.ResponseWriter, req *http.Request) (any, error) { - args := structs.HostVolumeGetRequest{ - ID: id, - } - if s.parse(resp, req, &args.Region, &args.QueryOptions) { - return nil, nil - } - - var out structs.HostVolumeGetResponse - if err := s.agent.RPC("HostVolume.Get", &args, &out); err != nil { - return nil, err - } - - setMeta(resp, &out.QueryMeta) - if out.Volume == nil { - return nil, CodedError(404, "volume not found") - } - - return out.Volume, nil -} - -func (s *HTTPServer) hostVolumeRegister(resp http.ResponseWriter, req *http.Request) (any, error) { - - args := structs.HostVolumeRegisterRequest{} - if err := decodeBody(req, &args); err != nil { - return err, CodedError(400, err.Error()) - } - s.parseWriteRequest(req, &args.WriteRequest) - - var out structs.HostVolumeRegisterResponse - if err := s.agent.RPC("HostVolume.Register", &args, &out); err != nil { - return nil, err - } - - setIndex(resp, out.Index) - - return &out, nil -} - -func (s *HTTPServer) hostVolumeCreate(resp http.ResponseWriter, req *http.Request) (any, error) { - - args := structs.HostVolumeCreateRequest{} - if err := decodeBody(req, &args); err != nil { - return err, CodedError(400, err.Error()) - } - s.parseWriteRequest(req, &args.WriteRequest) - - var out structs.HostVolumeCreateResponse - if err := s.agent.RPC("HostVolume.Create", &args, &out); err != nil { - return nil, err - } - - setIndex(resp, out.Index) - - return &out, nil -} - -func (s *HTTPServer) hostVolumeDelete(id string, resp http.ResponseWriter, req *http.Request) (any, error) { - // HTTP API only supports deleting a single ID because of compatibility with - // the existing HTTP routes for CSI - args := structs.HostVolumeDeleteRequest{VolumeID: id} - s.parseWriteRequest(req, &args.WriteRequest) - - var out structs.HostVolumeDeleteResponse - if err := s.agent.RPC("HostVolume.Delete", &args, &out); err != nil { - return nil, err - } - - setIndex(resp, out.Index) - - return nil, nil -} diff --git a/command/agent/host_volume_endpoint_test.go b/command/agent/host_volume_endpoint_test.go deleted file mode 100644 index ddff7a33fbb..00000000000 --- a/command/agent/host_volume_endpoint_test.go +++ /dev/null @@ -1,103 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package agent - -import ( - "fmt" - "net/http" - "net/http/httptest" - "net/url" - "testing" - - "github.com/hashicorp/nomad/nomad/mock" - "github.com/hashicorp/nomad/nomad/structs" - "github.com/shoenig/test/must" -) - -func TestHostVolumeEndpoint_CRUD(t *testing.T) { - httpTest(t, nil, func(s *TestAgent) { - - // Create a volume on the test node - - vol := mock.HostVolumeRequest(structs.DefaultNamespace) - vol.NodePool = "" - vol.Constraints = nil - reqBody := struct { - Volume *structs.HostVolume - }{Volume: vol} - buf := encodeReq(reqBody) - req, err := http.NewRequest(http.MethodPut, "/v1/volume/host/create", buf) - must.NoError(t, err) - respW := httptest.NewRecorder() - - // Make the request and verify we got a valid volume back - - obj, err := s.Server.HostVolumeSpecificRequest(respW, req) - must.NoError(t, err) - must.NotNil(t, obj) - resp := obj.(*structs.HostVolumeCreateResponse) - must.NotNil(t, resp.Volume) - must.Eq(t, vol.Name, resp.Volume.Name) - must.Eq(t, s.client.NodeID(), resp.Volume.NodeID) - must.NotEq(t, "", respW.Result().Header.Get("X-Nomad-Index")) - - volID := resp.Volume.ID - - // Verify volume was created - - path, err := url.JoinPath("/v1/volume/host/", volID) - must.NoError(t, err) - req, err = http.NewRequest(http.MethodGet, path, nil) - must.NoError(t, err) - obj, err = s.Server.HostVolumeSpecificRequest(respW, req) - must.NoError(t, err) - must.NotNil(t, obj) - respVol := obj.(*structs.HostVolume) - must.Eq(t, s.client.NodeID(), respVol.NodeID) - - // Update the volume (note: this doesn't update the volume on the client) - - vol = respVol.Copy() - vol.Parameters = map[string]string{"bar": "foo"} // swaps key and value - reqBody = struct { - Volume *structs.HostVolume - }{Volume: vol} - buf = encodeReq(reqBody) - req, err = http.NewRequest(http.MethodPut, "/v1/volume/host/register", buf) - must.NoError(t, err) - obj, err = s.Server.HostVolumeSpecificRequest(respW, req) - must.NoError(t, err) - must.NotNil(t, obj) - regResp := obj.(*structs.HostVolumeRegisterResponse) - must.NotNil(t, regResp.Volume) - must.Eq(t, map[string]string{"bar": "foo"}, regResp.Volume.Parameters) - - // Verify volume was updated - - path = fmt.Sprintf("/v1/volumes?type=host&node_id=%s", s.client.NodeID()) - req, err = http.NewRequest(http.MethodGet, path, nil) - must.NoError(t, err) - obj, err = s.Server.HostVolumesListRequest(respW, req) - must.NoError(t, err) - vols := obj.([]*structs.HostVolumeStub) - must.Len(t, 1, vols) - - // Delete the volume - - req, err = http.NewRequest(http.MethodDelete, fmt.Sprintf("/v1/volume/host/%s", volID), nil) - must.NoError(t, err) - _, err = s.Server.HostVolumeSpecificRequest(respW, req) - must.NoError(t, err) - - // Verify volume was deleted - - path, err = url.JoinPath("/v1/volume/host/", volID) - must.NoError(t, err) - req, err = http.NewRequest(http.MethodGet, path, nil) - must.NoError(t, err) - obj, err = s.Server.HostVolumeSpecificRequest(respW, req) - must.EqError(t, err, "volume not found") - must.Nil(t, obj) - }) -} diff --git a/command/agent/http.go b/command/agent/http.go index 03bd7ea7b50..3f4db49d65c 100644 --- a/command/agent/http.go +++ b/command/agent/http.go @@ -19,12 +19,12 @@ import ( "sync" "time" + "github.com/armon/go-metrics" assetfs "github.com/elazarl/go-bindata-assetfs" "github.com/gorilla/handlers" "github.com/gorilla/websocket" "github.com/hashicorp/go-connlimit" log "github.com/hashicorp/go-hclog" - metrics "github.com/hashicorp/go-metrics/compat" "github.com/hashicorp/go-msgpack/v2/codec" multierror "github.com/hashicorp/go-multierror" "github.com/rs/cors" @@ -404,14 +404,12 @@ func (s *HTTPServer) registerHandlers(enableDebug bool) { s.mux.HandleFunc("/v1/deployments", s.wrap(s.DeploymentsRequest)) s.mux.HandleFunc("/v1/deployment/", s.wrap(s.DeploymentSpecificRequest)) - s.mux.HandleFunc("GET /v1/volumes", s.wrap(s.ListVolumesRequest)) s.mux.HandleFunc("/v1/volumes", s.wrap(s.CSIVolumesRequest)) s.mux.HandleFunc("/v1/volumes/external", s.wrap(s.CSIExternalVolumesRequest)) s.mux.HandleFunc("/v1/volumes/snapshot", s.wrap(s.CSISnapshotsRequest)) s.mux.HandleFunc("/v1/volume/csi/", s.wrap(s.CSIVolumeSpecificRequest)) s.mux.HandleFunc("/v1/plugins", s.wrap(s.CSIPluginsRequest)) s.mux.HandleFunc("/v1/plugin/csi/", s.wrap(s.CSIPluginSpecificRequest)) - s.mux.HandleFunc("/v1/volume/host/", s.wrap(s.HostVolumeSpecificRequest)) s.mux.HandleFunc("/v1/acl/policies", s.wrap(s.ACLPoliciesRequest)) s.mux.HandleFunc("/v1/acl/policy/", s.wrap(s.ACLPolicySpecificRequest)) diff --git a/command/agent/job_endpoint.go b/command/agent/job_endpoint.go index 4bf7a52ba89..fd0c0c03501 100644 --- a/command/agent/job_endpoint.go +++ b/command/agent/job_endpoint.go @@ -1334,10 +1334,9 @@ func ApiTgToStructsTG(job *structs.Job, taskGroup *api.TaskGroup, tg *structs.Ta Name: v.Name, Type: v.Type, ReadOnly: v.ReadOnly, - Sticky: v.Sticky, Source: v.Source, - AttachmentMode: structs.VolumeAttachmentMode(v.AttachmentMode), - AccessMode: structs.VolumeAccessMode(v.AccessMode), + AttachmentMode: structs.CSIVolumeAttachmentMode(v.AttachmentMode), + AccessMode: structs.CSIVolumeAccessMode(v.AccessMode), PerAlloc: v.PerAlloc, } diff --git a/command/agent/metrics_endpoint_test.go b/command/agent/metrics_endpoint_test.go index b3c75b5aa64..fac79b2d8ee 100644 --- a/command/agent/metrics_endpoint_test.go +++ b/command/agent/metrics_endpoint_test.go @@ -10,7 +10,7 @@ import ( "testing" "time" - metrics "github.com/hashicorp/go-metrics/compat" + "github.com/armon/go-metrics" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" diff --git a/command/agent/testagent.go b/command/agent/testagent.go index ecff31f568e..702898ce0a6 100644 --- a/command/agent/testagent.go +++ b/command/agent/testagent.go @@ -14,8 +14,8 @@ import ( "testing" "time" + metrics "github.com/armon/go-metrics" "github.com/hashicorp/go-hclog" - metrics "github.com/hashicorp/go-metrics/compat" "github.com/hashicorp/nomad/api" "github.com/hashicorp/nomad/ci" client "github.com/hashicorp/nomad/client/config" diff --git a/command/agent/volumes_endpoint.go b/command/agent/volumes_endpoint.go deleted file mode 100644 index 3ee84eceb7f..00000000000 --- a/command/agent/volumes_endpoint.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package agent - -import ( - "net/http" - - "github.com/hashicorp/nomad/nomad/structs" -) - -// ListVolumesRequest dispatches requests for listing volumes to a specific type. -func (s *HTTPServer) ListVolumesRequest(resp http.ResponseWriter, req *http.Request) (interface{}, error) { - query := req.URL.Query() - qtype, ok := query["type"] - if !ok { - return []*structs.CSIVolListStub{}, nil - } - switch qtype[0] { - case "host": - return s.HostVolumesListRequest(resp, req) - case "csi": - return s.CSIVolumesRequest(resp, req) - default: - return nil, CodedError(404, resourceNotFoundErr) - } -} diff --git a/command/asset/asset.go b/command/asset/asset.go index 3570cb78b05..b6c1d9112f6 100644 --- a/command/asset/asset.go +++ b/command/asset/asset.go @@ -22,15 +22,3 @@ var NodePoolSpec []byte //go:embed pool.nomad.json var NodePoolSpecJSON []byte - -//go:embed volume.csi.hcl -var CSIVolumeSpecHCL []byte - -//go:embed volume.csi.json -var CSIVolumeSpecJSON []byte - -//go:embed volume.host.hcl -var HostVolumeSpecHCL []byte - -//go:embed volume.host.json -var HostVolumeSpecJSON []byte diff --git a/command/asset/volume.csi.hcl b/command/asset/volume.csi.hcl deleted file mode 100644 index c4714ab4a15..00000000000 --- a/command/asset/volume.csi.hcl +++ /dev/null @@ -1,70 +0,0 @@ -id = "ebs_prod_db1" -namespace = "default" -name = "database" -type = "csi" -plugin_id = "plugin_id" - -# For 'nomad volume register', provide the external ID from the storage -# provider. This field should be omitted when creating a volume with -# 'nomad volume create' -external_id = "vol-23452345" - -# For 'nomad volume create', specify a snapshot ID or volume to clone. You can -# specify only one of these two fields. -snapshot_id = "snap-12345" -# clone_id = "vol-abcdef" - -# Optional: for 'nomad volume create', specify a maximum and minimum capacity. -# Registering an existing volume will record but ignore these fields. -capacity_min = "10GiB" -capacity_max = "20G" - -# Required (at least one): for 'nomad volume create', specify one or more -# capabilities to validate. Registering an existing volume will record but -# ignore these fields. -capability { - access_mode = "single-node-writer" - attachment_mode = "file-system" -} - -capability { - access_mode = "single-node-reader-only" - attachment_mode = "block-device" -} - -# Optional: for 'nomad volume create', specify mount options to validate for -# 'attachment_mode = "file-system". Registering an existing volume will record -# but ignore these fields. -mount_options { - fs_type = "ext4" - mount_flags = ["ro"] -} - -# Optional: specify one or more locations where the volume must be accessible -# from. Refer to the plugin documentation for what segment values are supported. -topology_request { - preferred { - topology { segments { rack = "R1" } } - } - required { - topology { segments { rack = "R1" } } - topology { segments { rack = "R2", zone = "us-east-1a" } } - } -} - -# Optional: provide any secrets specified by the plugin. -secrets { - example_secret = "xyzzy" -} - -# Optional: provide a map of keys to string values expected by the plugin. -parameters { - skuname = "Premium_LRS" -} - -# Optional: for 'nomad volume register', provide a map of keys to string -# values expected by the plugin. This field will populated automatically by -# 'nomad volume create'. -context { - endpoint = "http://192.168.1.101:9425" -} diff --git a/command/asset/volume.csi.json b/command/asset/volume.csi.json deleted file mode 100644 index 2ad4bdd84de..00000000000 --- a/command/asset/volume.csi.json +++ /dev/null @@ -1,72 +0,0 @@ -{ - "id": "ebs_prod_db1", - "namespace": "default", - "name": "database", - "type": "csi", - "plugin_id": "plugin_id", - "external_id": "vol-23452345", - "snapshot_id": "snap-12345", - "capacity_min": "10GiB", - "capacity_max": "20G", - "capability": [ - { - "access_mode": "single-node-writer", - "attachment_mode": "file-system" - }, - { - "access_mode": "single-node-reader-only", - "attachment_mode": "block-device" - } - ], - "context": [ - { - "endpoint": "http://192.168.1.101:9425" - } - ], - "mount_options": [ - { - "fs_type": "ext4", - "mount_flags": [ - "ro" - ] - } - ], - "topology_request": { - "preferred": [ - { - "topology": { - "segments": { - "rack": "R1" - } - } - } - ], - "required": [ - { - "topology": { - "segments": { - "rack": "R1" - } - } - }, - { - "topology": { - "segments": { - "rack": "R2", - "zone": "us-east-1a" - } - } - } - ] - }, - "parameters": [ - { - "skuname": "Premium_LRS" - } - ], - "secrets": [ - { - "example_secret": "xyzzy" - } - ] -} diff --git a/command/asset/volume.host.hcl b/command/asset/volume.host.hcl deleted file mode 100644 index bf6d9877892..00000000000 --- a/command/asset/volume.host.hcl +++ /dev/null @@ -1,29 +0,0 @@ -id = "disk_prod_db1" -namespace = "default" -name = "database" -type = "host" -plugin_id = "plugin_id" - -# Optional: for 'nomad volume create', specify a maximum and minimum capacity. -# Registering an existing volume will record but ignore these fields. -capacity_min = "10GiB" -capacity_max = "20G" - -# Optional: for 'nomad volume create', specify one or more capabilities to -# validate. Registering an existing volume will record but ignore these fields. -# If omitted, the single-node-writer + file-system capability will be used as a -# default. -capability { - access_mode = "single-node-writer" - attachment_mode = "file-system" -} - -capability { - access_mode = "single-node-reader-only" - attachment_mode = "block-device" -} - -# Optional: provide a map of keys to string values expected by the plugin. -parameters { - skuname = "Premium_LRS" -} diff --git a/command/asset/volume.host.json b/command/asset/volume.host.json deleted file mode 100644 index 36f582bc2cb..00000000000 --- a/command/asset/volume.host.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "id": "disk_prod_db1", - "namespace": "default", - "name": "database", - "type": "host", - "plugin_id": "plugin_id", - "capacity_min": "10GiB", - "capacity_max": "20G", - "capability": [ - { - "access_mode": "single-node-writer", - "attachment_mode": "file-system" - }, - { - "access_mode": "single-node-reader-only", - "attachment_mode": "block-device" - } - ], - "parameters": [ - { - "skuname": "Premium_LRS" - } - ] -} diff --git a/command/node_status.go b/command/node_status.go index 9538e90622b..f7f7b587802 100644 --- a/command/node_status.go +++ b/command/node_status.go @@ -13,7 +13,6 @@ import ( "time" humanize "github.com/dustin/go-humanize" - "github.com/hashicorp/go-set/v3" "github.com/hashicorp/nomad/api" "github.com/hashicorp/nomad/api/contexts" "github.com/hashicorp/nomad/helper/pointer" @@ -130,12 +129,8 @@ func (c *NodeStatusCommand) AutocompleteFlags() complete.Flags { } func (c *NodeStatusCommand) AutocompleteArgs() complete.Predictor { - return nodePredictor(c.Client, nil) -} - -func nodePredictor(factory ApiClientFactory, filter *set.Set[string]) complete.Predictor { return complete.PredictFunc(func(a complete.Args) []string { - client, err := factory() + client, err := c.Meta.Client() if err != nil { return nil } diff --git a/command/plugin_status.go b/command/plugin_status.go index 92dbdc7f26b..02c61c65a9a 100644 --- a/command/plugin_status.go +++ b/command/plugin_status.go @@ -58,10 +58,21 @@ func (c *PluginStatusCommand) Synopsis() string { return "Display status information about a plugin" } +// predictVolumeType is also used in volume_status +var predictVolumeType = complete.PredictFunc(func(a complete.Args) []string { + types := []string{"csi"} + for _, t := range types { + if strings.Contains(t, a.Last) { + return []string{t} + } + } + return nil +}) + func (c *PluginStatusCommand) AutocompleteFlags() complete.Flags { return mergeAutocompleteFlags(c.Meta.AutocompleteFlags(FlagSetClient), complete.Flags{ - "-type": complete.PredictSet("csi"), + "-type": predictVolumeType, "-short": complete.PredictNothing, "-verbose": complete.PredictNothing, "-json": complete.PredictNothing, diff --git a/command/quota_apply.go b/command/quota_apply.go index 9a7466c32ad..84d23d931b4 100644 --- a/command/quota_apply.go +++ b/command/quota_apply.go @@ -6,13 +6,11 @@ package command import ( "bytes" "encoding/json" - "errors" "fmt" "io" "os" "strings" - humanize "github.com/dustin/go-humanize" multierror "github.com/hashicorp/go-multierror" "github.com/hashicorp/hcl" "github.com/hashicorp/hcl/hcl/ast" @@ -233,7 +231,7 @@ func parseQuotaLimits(result *[]*api.QuotaLimit, list *ast.ObjectList) error { // Parse limits if o := listVal.Filter("region_limit"); len(o.Items) > 0 { - limit.RegionLimit = new(api.QuotaResources) + limit.RegionLimit = new(api.Resources) if err := parseQuotaResource(limit.RegionLimit, o); err != nil { return multierror.Prefix(err, "region_limit ->") } @@ -246,7 +244,7 @@ func parseQuotaLimits(result *[]*api.QuotaLimit, list *ast.ObjectList) error { } // parseQuotaResource parses the region_limit resources -func parseQuotaResource(result *api.QuotaResources, list *ast.ObjectList) error { +func parseQuotaResource(result *api.Resources, list *ast.ObjectList) error { list = list.Elem() if len(list.Items) == 0 { return nil @@ -273,7 +271,6 @@ func parseQuotaResource(result *api.QuotaResources, list *ast.ObjectList) error "memory", "memory_max", "device", - "storage", } if err := helper.CheckHCLKeys(listVal, valid); err != nil { return multierror.Prefix(err, "resources ->") @@ -286,7 +283,6 @@ func parseQuotaResource(result *api.QuotaResources, list *ast.ObjectList) error // Manually parse delete(m, "device") - delete(m, "storage") if err := mapstructure.WeakDecode(m, result); err != nil { return err @@ -300,68 +296,9 @@ func parseQuotaResource(result *api.QuotaResources, list *ast.ObjectList) error } } - // Parse storage block - storageBlocks := listVal.Filter("storage") - storage, err := parseStorageResource(storageBlocks) - if err != nil { - return multierror.Prefix(err, "storage ->") - } - result.Storage = storage - return nil } -func parseStorageResource(storageBlocks *ast.ObjectList) (*api.QuotaStorageResources, error) { - switch len(storageBlocks.Items) { - case 0: - return nil, nil - case 1: - default: - return nil, errors.New("only one storage block is allowed") - } - block := storageBlocks.Items[0] - valid := []string{"variables", "host_volumes"} - if err := helper.CheckHCLKeys(block.Val, valid); err != nil { - return nil, err - } - - var m map[string]any - if err := hcl.DecodeObject(&m, block.Val); err != nil { - return nil, err - } - - variablesLimit, err := parseQuotaMegabytes(m["variables"]) - if err != nil { - return nil, fmt.Errorf("invalid variables limit: %v", err) - } - hostVolumesLimit, err := parseQuotaMegabytes(m["host_volumes"]) - if err != nil { - return nil, fmt.Errorf("invalid host_volumes limit: %v", err) - } - - return &api.QuotaStorageResources{ - VariablesMB: variablesLimit, - HostVolumesMB: hostVolumesLimit, - }, nil -} - -func parseQuotaMegabytes(raw any) (int, error) { - switch val := raw.(type) { - case string: - b, err := humanize.ParseBytes(val) - if err != nil { - return 0, fmt.Errorf("could not parse value as bytes: %v", err) - } - return int(b >> 20), nil - case int: - return val, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("invalid type %T", raw) - } -} - func parseDeviceResource(result *[]*api.RequestedDevice, list *ast.ObjectList) error { for idx, o := range list.Items { if l := len(o.Keys); l == 0 { diff --git a/command/quota_apply_test.go b/command/quota_apply_test.go index 7682662db73..c7955f27ccd 100644 --- a/command/quota_apply_test.go +++ b/command/quota_apply_test.go @@ -8,10 +8,7 @@ import ( "testing" "github.com/hashicorp/cli" - "github.com/hashicorp/nomad/api" "github.com/hashicorp/nomad/ci" - "github.com/hashicorp/nomad/helper/pointer" - "github.com/shoenig/test/must" ) func TestQuotaApplyCommand_Implements(t *testing.T) { @@ -41,53 +38,3 @@ func TestQuotaApplyCommand_Fails(t *testing.T) { } ui.ErrorWriter.Reset() } - -func TestQuotaParse(t *testing.T) { - - in := []byte(` -name = "default-quota" -description = "Limit the shared default namespace" - -limit { - region = "global" - region_limit { - cores = 0 - cpu = 2500 - memory = 1000 - memory_max = 1000 - device "nvidia/gpu/1080ti" { - count = 1 - } - storage { - variables = 1000 # in MB - host_volumes = "100 GiB" - } - } -} -`) - - spec, err := parseQuotaSpec(in) - must.NoError(t, err) - - must.Eq(t, &api.QuotaSpec{ - Name: "default-quota", - Description: "Limit the shared default namespace", - Limits: []*api.QuotaLimit{{ - Region: "global", - RegionLimit: &api.QuotaResources{ - CPU: pointer.Of(2500), - Cores: pointer.Of(0), - MemoryMB: pointer.Of(1000), - MemoryMaxMB: pointer.Of(1000), - Devices: []*api.RequestedDevice{{ - Name: "nvidia/gpu/1080ti", - Count: pointer.Of(uint64(1)), - }}, - Storage: &api.QuotaStorageResources{ - VariablesMB: 1000, - HostVolumesMB: 102_400, - }, - }, - }}, - }, spec) -} diff --git a/command/quota_delete_test.go b/command/quota_delete_test.go index 2cbb64d4e6a..1f087b8fdc9 100644 --- a/command/quota_delete_test.go +++ b/command/quota_delete_test.go @@ -101,7 +101,7 @@ func testQuotaSpec() *api.QuotaSpec { Limits: []*api.QuotaLimit{ { Region: "global", - RegionLimit: &api.QuotaResources{ + RegionLimit: &api.Resources{ CPU: pointer.Of(100), }, }, diff --git a/command/quota_init.go b/command/quota_init.go index 3cc8441815f..2a7c46ebd26 100644 --- a/command/quota_init.go +++ b/command/quota_init.go @@ -126,11 +126,8 @@ limit { device "nvidia/gpu/1080ti" { count = 1 } - storage { - variables = 1000 # in MB - host_volumes = 100000 # in MB - } } + variables_limit = 1000 } `) @@ -151,12 +148,9 @@ var defaultJsonQuotaSpec = strings.TrimSpace(` "Name": "nvidia/gpu/1080ti", "Count": 1 } - ], - "Storage": { - "Variables": 1000, - "HostVolumes": 100000 - } - } + ] + }, + "VariablesLimit": 1000 } ] } diff --git a/command/quota_init_test.go b/command/quota_init_test.go index daa97927ce3..c83f7ba2dbb 100644 --- a/command/quota_init_test.go +++ b/command/quota_init_test.go @@ -18,6 +18,7 @@ func TestQuotaInitCommand_Implements(t *testing.T) { } func TestQuotaInitCommand_Run_HCL(t *testing.T) { + ci.Parallel(t) ui := cli.NewMockUi() cmd := &QuotaInitCommand{Meta: Meta{Ui: ui}} @@ -30,7 +31,7 @@ func TestQuotaInitCommand_Run_HCL(t *testing.T) { // Ensure we change the cwd back origDir, err := os.Getwd() must.NoError(t, err) - t.Cleanup(func() { os.Chdir(origDir) }) + defer os.Chdir(origDir) // Create a temp dir and change into it dir := t.TempDir() @@ -64,6 +65,7 @@ func TestQuotaInitCommand_Run_HCL(t *testing.T) { } func TestQuotaInitCommand_Run_JSON(t *testing.T) { + ci.Parallel(t) ui := cli.NewMockUi() cmd := &QuotaInitCommand{Meta: Meta{Ui: ui}} @@ -76,7 +78,7 @@ func TestQuotaInitCommand_Run_JSON(t *testing.T) { // Ensure we change the cwd back origDir, err := os.Getwd() must.NoError(t, err) - t.Cleanup(func() { os.Chdir(origDir) }) + defer os.Chdir(origDir) // Create a temp dir and change into it dir := t.TempDir() diff --git a/command/sentinel_apply.go b/command/sentinel_apply.go index 7db40022bd7..7d43c0e6c88 100644 --- a/command/sentinel_apply.go +++ b/command/sentinel_apply.go @@ -37,9 +37,8 @@ Apply Options: -description Sets a human readable description for the policy. - -scope - Sets the scope of the policy and when it should be enforced. One of - "submit-job" or "submit-host-volume". + -scope (default: submit-job) + Sets the scope of the policy and when it should be enforced. -level (default: advisory) Sets the enforcement level of the policy. Must be one of advisory, @@ -74,7 +73,7 @@ func (c *SentinelApplyCommand) Run(args []string) int { flags := c.Meta.FlagSet(c.Name(), FlagSetClient) flags.Usage = func() { c.Ui.Output(c.Help()) } flags.StringVar(&description, "description", "", "") - flags.StringVar(&scope, "scope", "", "") + flags.StringVar(&scope, "scope", "submit-job", "") flags.StringVar(&enfLevel, "level", "advisory", "") if err := flags.Parse(args); err != nil { return 1 @@ -108,16 +107,6 @@ func (c *SentinelApplyCommand) Run(args []string) int { } } - switch scope { - case api.SentinelScopeSubmitJob, api.SentinelScopeSubmitHostVolume: - case "": - c.Ui.Error("-scope flag is required") - return 1 - default: - c.Ui.Error(fmt.Sprintf("Error: invalid -scope value: %q", scope)) - return 1 - } - // Construct the policy sp := &api.SentinelPolicy{ Name: policyName, diff --git a/command/setup_consul.go b/command/setup_consul.go index 258c7011db7..061b38574dd 100644 --- a/command/setup_consul.go +++ b/command/setup_consul.go @@ -63,6 +63,9 @@ Usage: nomad setup consul [options] environment variables as documented in https://developer.hashicorp.com/consul/commands#environment-variables + WARNING: This command is an experimental feature and may change its behavior + in future versions of Nomad. + Setup Consul options: -jwks-url diff --git a/command/setup_vault.go b/command/setup_vault.go index 72a6c24a5ee..2424f04b634 100644 --- a/command/setup_vault.go +++ b/command/setup_vault.go @@ -74,6 +74,9 @@ Usage: nomad setup vault [options] migrate to use Workload Identities with Vault. This option requires operator:read permission for Nomad. + WARNING: This command is an experimental feature and may change its behavior + in future versions of Nomad. + Setup Vault options: -jwks-url diff --git a/command/volume_create.go b/command/volume_create.go index cca272e727d..c7d32fbe808 100644 --- a/command/volume_create.go +++ b/command/volume_create.go @@ -25,47 +25,18 @@ Usage: nomad volume create [options] If the supplied path is "-" the volume file is read from stdin. Otherwise, it is read from the file at the supplied path. - When ACLs are enabled, this command requires a token with the appropriate - capability in the volume's namespace: the 'csi-write-volume' capability for - CSI volumes or 'host-volume-create' for dynamic host volumes. + When ACLs are enabled, this command requires a token with the + 'csi-write-volume' capability for the volume's namespace. General Options: - ` + generalOptionsUsage(usageOptsDefault) + ` - -Create Options: - - -detach - Return immediately instead of entering monitor mode for dynamic host - volumes. After creating a volume, the volume ID will be printed to the - screen, which can be used to examine the volume using the volume status - command. If -detach is omitted or false, the command will monitor the state - of the volume until it is ready to be scheduled. - - -id - Update a volume previously created with this ID prefix. Used for dynamic - host volumes only. - - -verbose - Display full information when monitoring volume state. Used for dynamic host - volumes only. - - -policy-override - Sets the flag to force override any soft mandatory Sentinel policies. Used - for dynamic host volumes only. -` + ` + generalOptionsUsage(usageOptsDefault) return strings.TrimSpace(helpText) } func (c *VolumeCreateCommand) AutocompleteFlags() complete.Flags { - return mergeAutocompleteFlags(c.Meta.AutocompleteFlags(FlagSetClient), - complete.Flags{ - "-detach": complete.PredictNothing, - "-verbose": complete.PredictNothing, - "-policy-override": complete.PredictNothing, - "-id": complete.PredictNothing, - }) + return c.Meta.AutocompleteFlags(FlagSetClient) } func (c *VolumeCreateCommand) AutocompleteArgs() complete.Predictor { @@ -79,13 +50,7 @@ func (c *VolumeCreateCommand) Synopsis() string { func (c *VolumeCreateCommand) Name() string { return "volume create" } func (c *VolumeCreateCommand) Run(args []string) int { - var detach, verbose, override bool - var volID string flags := c.Meta.FlagSet(c.Name(), FlagSetClient) - flags.BoolVar(&detach, "detach", false, "detach from monitor") - flags.BoolVar(&verbose, "verbose", false, "display full volume IDs") - flags.BoolVar(&override, "policy-override", false, "override soft mandatory Sentinel policies") - flags.StringVar(&volID, "id", "", "update an existing dynamic host volume") flags.Usage = func() { c.Ui.Output(c.Help()) } if err := flags.Parse(args); err != nil { @@ -134,9 +99,8 @@ func (c *VolumeCreateCommand) Run(args []string) int { switch strings.ToLower(volType) { case "csi": - return c.csiCreate(client, ast) - case "host": - return c.hostVolumeCreate(client, ast, detach, verbose, override, volID) + code := c.csiCreate(client, ast) + return code default: c.Ui.Error(fmt.Sprintf("Error unknown volume type: %s", volType)) return 1 diff --git a/command/volume_create_host.go b/command/volume_create_host.go deleted file mode 100644 index 3ef709ea163..00000000000 --- a/command/volume_create_host.go +++ /dev/null @@ -1,401 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package command - -import ( - "context" - "fmt" - "strconv" - "time" - - "github.com/hashicorp/hcl" - "github.com/hashicorp/hcl/hcl/ast" - "github.com/hashicorp/nomad/api" - "github.com/hashicorp/nomad/helper" - "github.com/mitchellh/go-glint" - "github.com/mitchellh/go-glint/components" - "github.com/mitchellh/mapstructure" -) - -func (c *VolumeCreateCommand) hostVolumeCreate( - client *api.Client, ast *ast.File, detach, verbose, override bool, volID string) int { - - vol, err := decodeHostVolume(ast) - if err != nil { - c.Ui.Error(fmt.Sprintf("Error decoding the volume definition: %s", err)) - return 1 - } - if volID != "" { - ns := c.namespace - if vol.Namespace != "" { - ns = vol.Namespace - } - stub, possible, err := getHostVolumeByPrefix(client, volID, ns) - if err != nil { - c.Ui.Error(fmt.Sprintf("Could not update existing volume: %s", err)) - return 1 - } - if len(possible) > 0 { - out, err := formatHostVolumes(possible, formatOpts{short: true}) - if err != nil { - c.Ui.Error(fmt.Sprintf("Error formatting: %s", err)) - return 1 - } - c.Ui.Error(fmt.Sprintf("Prefix matched multiple volumes\n\n%s", out)) - return 1 - } - vol.ID = stub.ID - } - - req := &api.HostVolumeCreateRequest{ - Volume: vol, - PolicyOverride: override, - } - resp, _, err := client.HostVolumes().Create(req, nil) - if err != nil { - c.Ui.Error(fmt.Sprintf("Error creating volume: %s", err)) - return 1 - } - vol = resp.Volume - - if resp.Warnings != "" { - c.Ui.Output( - c.Colorize().Color( - fmt.Sprintf("[bold][yellow]Volume Warnings:\n%s[reset]\n", resp.Warnings))) - } - - var lastIndex uint64 - - if detach || vol.State == api.HostVolumeStateReady { - c.Ui.Output(fmt.Sprintf( - "Created host volume %s with ID %s", vol.Name, vol.ID)) - return 0 - } else { - c.Ui.Output(fmt.Sprintf( - "==> Created host volume %s with ID %s", vol.Name, vol.ID)) - volID = vol.ID - lastIndex = vol.ModifyIndex - } - - if vol.Namespace != "" { - client.SetNamespace(vol.Namespace) - } - - err = c.monitorHostVolume(client, volID, lastIndex, verbose) - if err != nil { - c.Ui.Error(fmt.Sprintf("==> %s: %v", formatTime(time.Now()), err.Error())) - return 1 - } - return 0 -} - -func (c *VolumeCreateCommand) monitorHostVolume(client *api.Client, id string, lastIndex uint64, verbose bool) error { - length := shortId - if verbose { - length = fullId - } - - opts := formatOpts{ - verbose: verbose, - short: !verbose, - length: length, - } - - if isStdoutTerminal() { - return c.ttyMonitor(client, id, lastIndex, opts) - } else { - return c.nottyMonitor(client, id, lastIndex, opts) - } -} - -func (c *VolumeCreateCommand) ttyMonitor(client *api.Client, id string, lastIndex uint64, opts formatOpts) error { - - gUi := glint.New() - spinner := glint.Layout( - components.Spinner(), - glint.Text(fmt.Sprintf(" Monitoring volume %q in progress...", limit(id, opts.length))), - ).Row().MarginLeft(2) - refreshRate := 100 * time.Millisecond - - gUi.SetRefreshRate(refreshRate) - gUi.Set(spinner) - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - go gUi.Render(ctx) - - qOpts := &api.QueryOptions{ - AllowStale: true, - WaitIndex: lastIndex, - WaitTime: time.Second * 5, - } - - var statusComponent *glint.LayoutComponent - var endSpinner *glint.LayoutComponent - -DONE: - for { - vol, meta, err := client.HostVolumes().Get(id, qOpts) - if err != nil { - return err - } - str, err := formatHostVolume(vol, opts) - if err != nil { - // should never happen b/c we don't pass json/template via opts here - return err - } - statusComponent = glint.Layout( - glint.Text(""), - glint.Text(formatTime(time.Now())), - glint.Text(c.Colorize().Color(str)), - ).MarginLeft(4) - - statusComponent = glint.Layout(statusComponent) - gUi.Set(spinner, statusComponent) - - endSpinner = glint.Layout( - components.Spinner(), - glint.Text(fmt.Sprintf(" Host volume %q %s", limit(id, opts.length), vol.State)), - ).Row().MarginLeft(2) - - switch vol.State { - case api.HostVolumeStateReady: - endSpinner = glint.Layout( - glint.Text(fmt.Sprintf("✓ Host volume %q %s", limit(id, opts.length), vol.State)), - ).Row().MarginLeft(2) - break DONE - - case api.HostVolumeStateUnavailable: - endSpinner = glint.Layout( - glint.Text(fmt.Sprintf("! Host volume %q %s", limit(id, opts.length), vol.State)), - ).Row().MarginLeft(2) - break DONE - - default: - qOpts.WaitIndex = meta.LastIndex - continue - } - - } - - // Render one final time with completion message - gUi.Set(endSpinner, statusComponent, glint.Text("")) - gUi.RenderFrame() - return nil -} - -func (c *VolumeCreateCommand) nottyMonitor(client *api.Client, id string, lastIndex uint64, opts formatOpts) error { - - c.Ui.Info(fmt.Sprintf("==> %s: Monitoring volume %q...", - formatTime(time.Now()), limit(id, opts.length))) - - for { - vol, _, err := client.HostVolumes().Get(id, &api.QueryOptions{ - WaitIndex: lastIndex, - WaitTime: time.Second * 5, - }) - if err != nil { - return err - } - if vol.State == api.HostVolumeStateReady { - c.Ui.Info(fmt.Sprintf("==> %s: Volume %q ready", - formatTime(time.Now()), limit(vol.Name, opts.length))) - return nil - } - } -} - -func decodeHostVolume(input *ast.File) (*api.HostVolume, error) { - var err error - vol := &api.HostVolume{} - - list, ok := input.Node.(*ast.ObjectList) - if !ok { - return nil, fmt.Errorf("error parsing: root should be an object") - } - - // Decode the full thing into a map[string]interface for ease - var m map[string]any - err = hcl.DecodeObject(&m, list) - if err != nil { - return nil, err - } - - // Need to manually parse these fields/blocks - delete(m, "capability") - delete(m, "constraint") - delete(m, "capacity") - delete(m, "capacity_max") - delete(m, "capacity_min") - delete(m, "type") - - // Decode the rest - err = mapstructure.WeakDecode(m, vol) - if err != nil { - return nil, err - } - - capacity, err := parseCapacityBytes(list.Filter("capacity")) - if err != nil { - return nil, fmt.Errorf("invalid capacity: %v", err) - } - vol.CapacityBytes = capacity - capacityMin, err := parseCapacityBytes(list.Filter("capacity_min")) - if err != nil { - return nil, fmt.Errorf("invalid capacity_min: %v", err) - } - vol.RequestedCapacityMinBytes = capacityMin - capacityMax, err := parseCapacityBytes(list.Filter("capacity_max")) - if err != nil { - return nil, fmt.Errorf("invalid capacity_max: %v", err) - } - vol.RequestedCapacityMaxBytes = capacityMax - - if o := list.Filter("constraint"); len(o.Items) > 0 { - if err := parseConstraints(&vol.Constraints, o); err != nil { - return nil, fmt.Errorf("invalid constraint: %v", err) - } - } - if o := list.Filter("capability"); len(o.Items) > 0 { - if err := parseHostVolumeCapabilities(&vol.RequestedCapabilities, o); err != nil { - return nil, fmt.Errorf("invalid capability: %v", err) - } - } - - return vol, nil -} - -func parseHostVolumeCapabilities(result *[]*api.HostVolumeCapability, list *ast.ObjectList) error { - for _, o := range list.Elem().Items { - valid := []string{"access_mode", "attachment_mode"} - if err := helper.CheckHCLKeys(o.Val, valid); err != nil { - return err - } - - ot, ok := o.Val.(*ast.ObjectType) - if !ok { - break - } - - var m map[string]any - if err := hcl.DecodeObject(&m, ot.List); err != nil { - return err - } - var cap *api.HostVolumeCapability - if err := mapstructure.WeakDecode(&m, &cap); err != nil { - return err - } - - *result = append(*result, cap) - } - - return nil -} - -func parseConstraints(result *[]*api.Constraint, list *ast.ObjectList) error { - for _, o := range list.Elem().Items { - valid := []string{ - "attribute", - "distinct_hosts", - "distinct_property", - "operator", - "regexp", - "set_contains", - "value", - "version", - "semver", - } - if err := helper.CheckHCLKeys(o.Val, valid); err != nil { - return err - } - - var m map[string]any - if err := hcl.DecodeObject(&m, o.Val); err != nil { - return err - } - - m["LTarget"] = m["attribute"] - m["RTarget"] = m["value"] - m["Operand"] = m["operator"] - - // If "version" is provided, set the operand - // to "version" and the value to the "RTarget" - if constraint, ok := m[api.ConstraintVersion]; ok { - m["Operand"] = api.ConstraintVersion - m["RTarget"] = constraint - } - - // If "semver" is provided, set the operand - // to "semver" and the value to the "RTarget" - if constraint, ok := m[api.ConstraintSemver]; ok { - m["Operand"] = api.ConstraintSemver - m["RTarget"] = constraint - } - - // If "regexp" is provided, set the operand - // to "regexp" and the value to the "RTarget" - if constraint, ok := m[api.ConstraintRegex]; ok { - m["Operand"] = api.ConstraintRegex - m["RTarget"] = constraint - } - - // If "set_contains" is provided, set the operand - // to "set_contains" and the value to the "RTarget" - if constraint, ok := m[api.ConstraintSetContains]; ok { - m["Operand"] = api.ConstraintSetContains - m["RTarget"] = constraint - } - - if value, ok := m[api.ConstraintDistinctHosts]; ok { - enabled, err := parseBool(value) - if err != nil { - return fmt.Errorf("distinct_hosts should be set to true or false; %v", err) - } - - // If it is not enabled, skip the constraint. - if !enabled { - continue - } - - m["Operand"] = api.ConstraintDistinctHosts - m["RTarget"] = strconv.FormatBool(enabled) - } - - if property, ok := m[api.ConstraintDistinctProperty]; ok { - m["Operand"] = api.ConstraintDistinctProperty - m["LTarget"] = property - } - - // Build the constraint - var c api.Constraint - if err := mapstructure.WeakDecode(m, &c); err != nil { - return err - } - if c.Operand == "" { - c.Operand = "=" - } - - *result = append(*result, &c) - } - - return nil -} - -// parseBool takes an interface value and tries to convert it to a boolean and -// returns an error if the type can't be converted. -func parseBool(value any) (bool, error) { - var enabled bool - var err error - switch data := value.(type) { - case string: - enabled, err = strconv.ParseBool(data) - case bool: - enabled = data - default: - err = fmt.Errorf("%v couldn't be converted to boolean value", value) - } - - return enabled, err -} diff --git a/command/volume_create_host_test.go b/command/volume_create_host_test.go deleted file mode 100644 index af1fb1b619d..00000000000 --- a/command/volume_create_host_test.go +++ /dev/null @@ -1,232 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package command - -import ( - "os" - "strings" - "testing" - - "github.com/hashicorp/cli" - "github.com/hashicorp/hcl" - "github.com/hashicorp/nomad/api" - "github.com/hashicorp/nomad/ci" - "github.com/hashicorp/nomad/command/agent" - "github.com/shoenig/test/must" -) - -func TestHostVolumeCreateCommand_Run(t *testing.T) { - ci.Parallel(t) - srv, client, url := testServer(t, true, func(c *agent.Config) { - c.Client.Meta = map[string]string{"rack": "foo"} - }) - t.Cleanup(srv.Shutdown) - - waitForNodes(t, client) - - _, err := client.Namespaces().Register(&api.Namespace{Name: "prod"}, nil) - must.NoError(t, err) - - ui := cli.NewMockUi() - cmd := &VolumeCreateCommand{Meta: Meta{Ui: ui}} - - hclTestFile := ` -namespace = "prod" -name = "database" -type = "host" -plugin_id = "mkdir" -node_pool = "default" - -capacity_min = "10GiB" -capacity_max = "20G" - -constraint { - attribute = "${meta.rack}" - value = "foo" -} - -capability { - access_mode = "single-node-writer" - attachment_mode = "file-system" -} - -capability { - access_mode = "single-node-reader-only" - attachment_mode = "block-device" -} - -parameters { - foo = "bar" -} -` - - file, err := os.CreateTemp(t.TempDir(), "volume-test-*.hcl") - must.NoError(t, err) - _, err = file.WriteString(hclTestFile) - must.NoError(t, err) - - args := []string{"-address", url, "-detach", file.Name()} - - code := cmd.Run(args) - must.Eq(t, 0, code, must.Sprintf("got error: %s", ui.ErrorWriter.String())) - - out := ui.OutputWriter.String() - must.StrContains(t, out, "Created host volume") - parts := strings.Split(out, " ") - id := strings.TrimSpace(parts[len(parts)-1]) - - // Verify volume was created - got, _, err := client.HostVolumes().Get(id, &api.QueryOptions{Namespace: "prod"}) - must.NoError(t, err) - must.NotNil(t, got) - - // Verify we can update the volume without changes - args = []string{"-address", url, "-detach", "-id", got.ID, file.Name()} - code = cmd.Run(args) - must.Eq(t, 0, code, must.Sprintf("got error: %s", ui.ErrorWriter.String())) - list, _, err := client.HostVolumes().List(nil, &api.QueryOptions{Namespace: "prod"}) - must.Len(t, 1, list, must.Sprintf("new volume should not be created on update")) -} - -func TestHostVolume_HCLDecode(t *testing.T) { - ci.Parallel(t) - - cases := []struct { - name string - hcl string - expected *api.HostVolume - errMsg string - }{ - { - name: "full spec", - hcl: ` -namespace = "prod" -name = "database" -type = "host" -plugin_id = "mkdir" -node_pool = "default" - -capacity_min = "10GiB" -capacity_max = "20G" - -constraint { - attribute = "${attr.kernel.name}" - value = "linux" -} - -constraint { - attribute = "${meta.rack}" - value = "foo" -} - -capability { - access_mode = "single-node-writer" - attachment_mode = "file-system" -} - -capability { - access_mode = "single-node-reader-only" - attachment_mode = "block-device" -} - -parameters { - foo = "bar" -} -`, - expected: &api.HostVolume{ - Namespace: "prod", - Name: "database", - PluginID: "mkdir", - NodePool: "default", - Constraints: []*api.Constraint{{ - LTarget: "${attr.kernel.name}", - RTarget: "linux", - Operand: "=", - }, { - LTarget: "${meta.rack}", - RTarget: "foo", - Operand: "=", - }}, - RequestedCapacityMinBytes: 10737418240, - RequestedCapacityMaxBytes: 20000000000, - RequestedCapabilities: []*api.HostVolumeCapability{ - { - AttachmentMode: api.HostVolumeAttachmentModeFilesystem, - AccessMode: api.HostVolumeAccessModeSingleNodeWriter, - }, - { - AttachmentMode: api.HostVolumeAttachmentModeBlockDevice, - AccessMode: api.HostVolumeAccessModeSingleNodeReader, - }, - }, - Parameters: map[string]string{"foo": "bar"}, - }, - }, - - { - name: "mostly empty spec", - hcl: ` -namespace = "prod" -name = "database" -type = "host" -plugin_id = "mkdir" -node_pool = "default" -`, - expected: &api.HostVolume{ - Namespace: "prod", - Name: "database", - PluginID: "mkdir", - NodePool: "default", - }, - }, - - { - name: "invalid capacity", - hcl: ` -namespace = "prod" -name = "database" -type = "host" -plugin_id = "mkdir" -node_pool = "default" - -capacity_min = "a" -`, - expected: nil, - errMsg: "invalid capacity_min: could not parse value as bytes: strconv.ParseFloat: parsing \"\": invalid syntax", - }, - - { - name: "invalid constraint", - hcl: ` -namespace = "prod" -name = "database" -type = "host" -plugin_id = "mkdir" -node_pool = "default" - -constraint { - distinct_hosts = "foo" -} - -`, - expected: nil, - errMsg: "invalid constraint: distinct_hosts should be set to true or false; strconv.ParseBool: parsing \"foo\": invalid syntax", - }, - } - - for _, tc := range cases { - t.Run(tc.name, func(t *testing.T) { - ast, err := hcl.ParseString(tc.hcl) - must.NoError(t, err) - vol, err := decodeHostVolume(ast) - if tc.errMsg == "" { - must.NoError(t, err) - } else { - must.EqError(t, err, tc.errMsg) - } - must.Eq(t, tc.expected, vol) - }) - } - -} diff --git a/command/volume_delete.go b/command/volume_delete.go index e23e02f6fae..7dc3df1e128 100644 --- a/command/volume_delete.go +++ b/command/volume_delete.go @@ -9,7 +9,6 @@ import ( "github.com/hashicorp/nomad/api" "github.com/hashicorp/nomad/api/contexts" - "github.com/hashicorp/nomad/helper" flaghelper "github.com/hashicorp/nomad/helper/flags" "github.com/posener/complete" ) @@ -41,20 +40,14 @@ Delete Options: -secret Secrets to pass to the plugin to delete the snapshot. Accepts multiple - flags in the form -secret key=value. Only available for CSI volumes. - - -type - Type of volume to delete. Must be one of "csi" or "host". Defaults to "csi". + flags in the form -secret key=value ` return strings.TrimSpace(helpText) } func (c *VolumeDeleteCommand) AutocompleteFlags() complete.Flags { return mergeAutocompleteFlags(c.Meta.AutocompleteFlags(FlagSetClient), - complete.Flags{ - "-type": complete.PredictSet("csi", "host"), - "-secret": complete.PredictNothing, - }) + complete.Flags{}) } func (c *VolumeDeleteCommand) AutocompleteArgs() complete.Predictor { @@ -70,11 +63,11 @@ func (c *VolumeDeleteCommand) AutocompleteArgs() complete.Predictor { } matches := resp.Matches[contexts.Volumes] - resp, _, err = client.Search().PrefixSearch(a.Last, contexts.HostVolumes, nil) + resp, _, err = client.Search().PrefixSearch(a.Last, contexts.Nodes, nil) if err != nil { return []string{} } - matches = append(matches, resp.Matches[contexts.HostVolumes]...) + matches = append(matches, resp.Matches[contexts.Nodes]...) return matches }) } @@ -87,11 +80,9 @@ func (c *VolumeDeleteCommand) Name() string { return "volume delete" } func (c *VolumeDeleteCommand) Run(args []string) int { var secretsArgs flaghelper.StringFlag - var typeArg string flags := c.Meta.FlagSet(c.Name(), FlagSetClient) flags.Usage = func() { c.Ui.Output(c.Help()) } flags.Var(&secretsArgs, "secret", "secrets for snapshot, ex. -secret key=value") - flags.StringVar(&typeArg, "type", "csi", "type of volume (csi or host)") if err := flags.Parse(args); err != nil { c.Ui.Error(fmt.Sprintf("Error parsing arguments %s", err)) @@ -114,19 +105,6 @@ func (c *VolumeDeleteCommand) Run(args []string) int { return 1 } - switch typeArg { - case "csi": - return c.deleteCSIVolume(client, volID, secretsArgs) - case "host": - return c.deleteHostVolume(client, volID) - default: - c.Ui.Error(fmt.Sprintf("No such volume type %q", typeArg)) - return 1 - } -} - -func (c *VolumeDeleteCommand) deleteCSIVolume(client *api.Client, volID string, secretsArgs flaghelper.StringFlag) int { - secrets := api.CSISecrets{} for _, kv := range secretsArgs { if key, value, found := strings.Cut(kv, "="); found { @@ -137,30 +115,6 @@ func (c *VolumeDeleteCommand) deleteCSIVolume(client *api.Client, volID string, } } - // get a CSI volume that matches the given prefix or a list of all matches - // if an exact match is not found. - stub, possible, err := getByPrefix[api.CSIVolumeListStub]("volumes", client.CSIVolumes().List, - func(vol *api.CSIVolumeListStub, prefix string) bool { return vol.ID == prefix }, - &api.QueryOptions{ - Prefix: volID, - Namespace: c.namespace, - }) - if err != nil { - c.Ui.Error(fmt.Sprintf("Could not find existing volume to delete: %s", err)) - return 1 - } - if len(possible) > 0 { - out, err := csiFormatVolumes(possible, false, "") - if err != nil { - c.Ui.Error(fmt.Sprintf("Error formatting: %s", err)) - return 1 - } - c.Ui.Error(fmt.Sprintf("Prefix matched multiple volumes\n\n%s", out)) - return 1 - } - volID = stub.ID - c.namespace = stub.Namespace - err = client.CSIVolumes().DeleteOpts(&api.CSIVolumeDeleteRequest{ ExternalVolumeID: volID, Secrets: secrets, @@ -173,34 +127,3 @@ func (c *VolumeDeleteCommand) deleteCSIVolume(client *api.Client, volID string, c.Ui.Output(fmt.Sprintf("Successfully deleted volume %q!", volID)) return 0 } - -func (c *VolumeDeleteCommand) deleteHostVolume(client *api.Client, volID string) int { - - if !helper.IsUUID(volID) { - stub, possible, err := getHostVolumeByPrefix(client, volID, c.namespace) - if err != nil { - c.Ui.Error(fmt.Sprintf("Could not find existing volume to delete: %s", err)) - return 1 - } - if len(possible) > 0 { - out, err := formatHostVolumes(possible, formatOpts{short: true}) - if err != nil { - c.Ui.Error(fmt.Sprintf("Error formatting: %s", err)) - return 1 - } - c.Ui.Error(fmt.Sprintf("Prefix matched multiple volumes\n\n%s", out)) - return 1 - } - volID = stub.ID - c.namespace = stub.Namespace - } - - _, err := client.HostVolumes().Delete(&api.HostVolumeDeleteRequest{ID: volID}, nil) - if err != nil { - c.Ui.Error(fmt.Sprintf("Error deleting volume: %s", err)) - return 1 - } - - c.Ui.Output(fmt.Sprintf("Successfully deleted volume %q!", volID)) - return 0 -} diff --git a/command/volume_delete_host_test.go b/command/volume_delete_host_test.go deleted file mode 100644 index 87fd2900dba..00000000000 --- a/command/volume_delete_host_test.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package command - -import ( - "fmt" - "os" - "strings" - "testing" - - "github.com/hashicorp/cli" - "github.com/hashicorp/nomad/api" - "github.com/hashicorp/nomad/ci" - "github.com/posener/complete" - "github.com/shoenig/test/must" -) - -func TestHostVolumeDeleteCommand(t *testing.T) { - ci.Parallel(t) - srv, client, url := testServer(t, true, nil) - t.Cleanup(srv.Shutdown) - - waitForNodes(t, client) - - _, err := client.Namespaces().Register(&api.Namespace{Name: "prod"}, nil) - must.NoError(t, err) - - nodes, _, err := client.Nodes().List(nil) - must.NoError(t, err) - must.Len(t, 1, nodes) - nodeID := nodes[0].ID - hostPath := t.TempDir() - - ui := cli.NewMockUi() - - hclTestFile := fmt.Sprintf(` -namespace = "prod" -name = "example" -type = "host" -node_id = "%s" -node_pool = "default" -host_path = "%s" - -capability { - access_mode = "single-node-reader-only" - attachment_mode = "file-system" -} -`, nodeID, hostPath) - - file, err := os.CreateTemp(t.TempDir(), "volume-test-*.hcl") - must.NoError(t, err) - _, err = file.WriteString(hclTestFile) - must.NoError(t, err) - - args := []string{"-address", url, file.Name()} - regCmd := &VolumeRegisterCommand{Meta: Meta{Ui: ui}} - code := regCmd.Run(args) - must.Eq(t, 0, code, must.Sprintf("got error: %s", ui.ErrorWriter.String())) - - out := ui.OutputWriter.String() - must.StrContains(t, out, "Registered host volume") - parts := strings.Split(out, " ") - id := strings.TrimSpace(parts[len(parts)-1]) - - ui.OutputWriter.Reset() - - // autocomplete - cmd := &VolumeDeleteCommand{Meta: Meta{Ui: ui, namespace: "*", flagAddress: url}} - prefix := id[:len(id)-5] - cargs := complete.Args{Last: prefix} - predictor := cmd.AutocompleteArgs() - - res := predictor.Predict(cargs) - must.SliceLen(t, 1, res) - must.Eq(t, id, res[0]) - - // missing the namespace - cmd = &VolumeDeleteCommand{Meta: Meta{Ui: ui}} - args = []string{"-address", url, "-type", "host", id} - code = cmd.Run(args) - must.Eq(t, 1, code) - must.StrContains(t, ui.ErrorWriter.String(), "no such volume") - ui.ErrorWriter.Reset() - - // missing the namespace, but use a prefix - args = []string{"-address", url, "-type", "host", id[:12]} - code = cmd.Run(args) - must.Eq(t, 1, code) - must.StrContains(t, ui.ErrorWriter.String(), "no volumes with prefix") - ui.ErrorWriter.Reset() - - // fix the namespace, and use a prefix - args = []string{"-address", url, "-type", "host", "-namespace", "prod", id[:12]} - code = cmd.Run(args) - must.Eq(t, 0, code, must.Sprintf("got error: %s", ui.ErrorWriter.String())) - out = ui.OutputWriter.String() - must.StrContains(t, out, fmt.Sprintf("Successfully deleted volume %q!", id)) -} diff --git a/command/volume_deregister.go b/command/volume_deregister.go index 60851e66954..c4c78cfcc2d 100644 --- a/command/volume_deregister.go +++ b/command/volume_deregister.go @@ -53,6 +53,7 @@ func (c *VolumeDeregisterCommand) AutocompleteArgs() complete.Predictor { return nil } + // When multiple volume types are implemented, this search should merge contexts resp, _, err := client.Search().PrefixSearch(a.Last, contexts.Volumes, nil) if err != nil { return []string{} diff --git a/command/volume_init.go b/command/volume_init.go index 93cd9e17321..bd37df8cfa1 100644 --- a/command/volume_init.go +++ b/command/volume_init.go @@ -8,18 +8,17 @@ import ( "os" "strings" - "github.com/hashicorp/nomad/command/asset" "github.com/posener/complete" ) const ( - // defaultHclVolumeInitName is the default name we use when initializing + // DefaultHclVolumeInitName is the default name we use when initializing // the example volume file in HCL format - defaultHclVolumeInitName = "volume.hcl" + DefaultHclVolumeInitName = "volume.hcl" // DefaultHclVolumeInitName is the default name we use when initializing // the example volume file in JSON format - defaultJsonVolumeInitName = "volume.json" + DefaultJsonVolumeInitName = "volume.json" ) // VolumeInitCommand generates a new volume spec that you can customize to @@ -40,11 +39,6 @@ Init Options: -json Create an example JSON volume specification. - - -type - Create an example for a specific type of volume (one of "csi" or "host", - defaults to "csi"). - ` return strings.TrimSpace(helpText) } @@ -56,7 +50,6 @@ func (c *VolumeInitCommand) Synopsis() string { func (c *VolumeInitCommand) AutocompleteFlags() complete.Flags { return complete.Flags{ "-json": complete.PredictNothing, - "-type": complete.PredictSet("host", "csi"), } } @@ -68,11 +61,9 @@ func (c *VolumeInitCommand) Name() string { return "volume init" } func (c *VolumeInitCommand) Run(args []string) int { var jsonOutput bool - var volType string flags := c.Meta.FlagSet(c.Name(), FlagSetClient) flags.Usage = func() { c.Ui.Output(c.Help()) } flags.BoolVar(&jsonOutput, "json", false, "") - flags.StringVar(&volType, "type", "csi", "type of volume") if err := flags.Parse(args); err != nil { return 1 @@ -86,17 +77,11 @@ func (c *VolumeInitCommand) Run(args []string) int { return 1 } - fileName := defaultHclVolumeInitName - fileContent := asset.CSIVolumeSpecHCL - - if volType == "host" && !jsonOutput { - fileContent = asset.HostVolumeSpecHCL - } else if volType == "host" && jsonOutput { - fileName = defaultJsonVolumeInitName - fileContent = asset.HostVolumeSpecJSON - } else if jsonOutput { - fileName = defaultJsonVolumeInitName - fileContent = asset.CSIVolumeSpecJSON + fileName := DefaultHclVolumeInitName + fileContent := defaultHclVolumeSpec + if jsonOutput { + fileName = DefaultJsonVolumeInitName + fileContent = defaultJsonVolumeSpec } if len(args) == 1 { fileName = args[0] @@ -114,7 +99,7 @@ func (c *VolumeInitCommand) Run(args []string) int { } // Write out the example - err = os.WriteFile(fileName, fileContent, 0660) + err = os.WriteFile(fileName, []byte(fileContent), 0660) if err != nil { c.Ui.Error(fmt.Sprintf("Failed to write %q: %v", fileName, err)) return 1 @@ -124,3 +109,151 @@ func (c *VolumeInitCommand) Run(args []string) int { c.Ui.Output(fmt.Sprintf("Example volume specification written to %s", fileName)) return 0 } + +var defaultHclVolumeSpec = strings.TrimSpace(` +id = "ebs_prod_db1" +namespace = "default" +name = "database" +type = "csi" +plugin_id = "plugin_id" + +# For 'nomad volume register', provide the external ID from the storage +# provider. This field should be omitted when creating a volume with +# 'nomad volume create' +external_id = "vol-23452345" + +# For 'nomad volume create', specify a snapshot ID or volume to clone. You can +# specify only one of these two fields. +snapshot_id = "snap-12345" +# clone_id = "vol-abcdef" + +# Optional: for 'nomad volume create', specify a maximum and minimum capacity. +# Registering an existing volume will record but ignore these fields. +capacity_min = "10GiB" +capacity_max = "20G" + +# Required (at least one): for 'nomad volume create', specify one or more +# capabilities to validate. Registering an existing volume will record but +# ignore these fields. +capability { + access_mode = "single-node-writer" + attachment_mode = "file-system" +} + +capability { + access_mode = "single-node-reader" + attachment_mode = "block-device" +} + +# Optional: for 'nomad volume create', specify mount options to validate for +# 'attachment_mode = "file-system". Registering an existing volume will record +# but ignore these fields. +mount_options { + fs_type = "ext4" + mount_flags = ["ro"] +} + +# Optional: specify one or more locations where the volume must be accessible +# from. Refer to the plugin documentation for what segment values are supported. +topology_request { + preferred { + topology { segments { rack = "R1" } } + } + required { + topology { segments { rack = "R1" } } + topology { segments { rack = "R2", zone = "us-east-1a" } } + } +} + +# Optional: provide any secrets specified by the plugin. +secrets { + example_secret = "xyzzy" +} + +# Optional: provide a map of keys to string values expected by the plugin. +parameters { + skuname = "Premium_LRS" +} + +# Optional: for 'nomad volume register', provide a map of keys to string +# values expected by the plugin. This field will populated automatically by +# 'nomad volume create'. +context { + endpoint = "http://192.168.1.101:9425" +} +`) + +var defaultJsonVolumeSpec = strings.TrimSpace(` +{ + "id": "ebs_prod_db1", + "namespace": "default", + "name": "database", + "type": "csi", + "plugin_id": "plugin_id", + "external_id": "vol-23452345", + "snapshot_id": "snap-12345", + "capacity_min": "10GiB", + "capacity_max": "20G", + "capability": [ + { + "access_mode": "single-node-writer", + "attachment_mode": "file-system" + }, + { + "access_mode": "single-node-reader", + "attachment_mode": "block-device" + } + ], + "context": [ + { + "endpoint": "http://192.168.1.101:9425" + } + ], + "mount_options": [ + { + "fs_type": "ext4", + "mount_flags": [ + "ro" + ] + } + ], + "topology_request": { + "preferred": [ + { + "topology": { + "segments": { + "rack": "R1" + } + } + } + ], + "required": [ + { + "topology": { + "segments": { + "rack": "R1" + } + } + }, + { + "topology": { + "segments": { + "rack": "R2", + "zone": "us-east-1a" + } + } + } + ] + }, + "parameters": [ + { + "skuname": "Premium_LRS" + } + ], + "secrets": [ + { + "example_secret": "xyzzy" + } + ] +} +`) diff --git a/command/volume_register.go b/command/volume_register.go index ec510a4e572..3a8815347ff 100644 --- a/command/volume_register.go +++ b/command/volume_register.go @@ -28,34 +28,18 @@ Usage: nomad volume register [options] If the supplied path is "-" the volume file is read from stdin. Otherwise, it is read from the file at the supplied path. - When ACLs are enabled, this command requires a token with the appropriate - capability in the volume's namespace: the 'csi-write-volume' capability for - CSI volumes or 'host-volume-register' for dynamic host volumes. + When ACLs are enabled, this command requires a token with the + 'csi-write-volume' capability for the volume's namespace. General Options: - ` + generalOptionsUsage(usageOptsDefault) + ` - -Register Options: - - -id - Update a volume previously created with this ID prefix. Used for dynamic - host volumes only. - - -policy-override - Sets the flag to force override any soft mandatory Sentinel policies. Used - for dynamic host volumes only. -` + ` + generalOptionsUsage(usageOptsDefault) return strings.TrimSpace(helpText) } func (c *VolumeRegisterCommand) AutocompleteFlags() complete.Flags { - return mergeAutocompleteFlags(c.Meta.AutocompleteFlags(FlagSetClient), - complete.Flags{ - "-policy-override": complete.PredictNothing, - "-id": complete.PredictNothing, - }) + return c.Meta.AutocompleteFlags(FlagSetClient) } func (c *VolumeRegisterCommand) AutocompleteArgs() complete.Predictor { @@ -69,11 +53,7 @@ func (c *VolumeRegisterCommand) Synopsis() string { func (c *VolumeRegisterCommand) Name() string { return "volume register" } func (c *VolumeRegisterCommand) Run(args []string) int { - var override bool - var volID string flags := c.Meta.FlagSet(c.Name(), FlagSetClient) - flags.BoolVar(&override, "policy-override", false, "override soft mandatory Sentinel policies") - flags.StringVar(&volID, "id", "", "update an existing dynamic host volume") flags.Usage = func() { c.Ui.Output(c.Help()) } if err := flags.Parse(args); err != nil { @@ -123,13 +103,16 @@ func (c *VolumeRegisterCommand) Run(args []string) int { switch volType { case "csi": - return c.csiRegister(client, ast) - case "host": - return c.hostVolumeRegister(client, ast, override, volID) + code := c.csiRegister(client, ast) + if code != 0 { + return code + } default: c.Ui.Error(fmt.Sprintf("Error unknown volume type: %s", volType)) return 1 } + + return 0 } // parseVolume is used to parse the quota specification from HCL diff --git a/command/volume_register_host.go b/command/volume_register_host.go deleted file mode 100644 index ff0929bcbcb..00000000000 --- a/command/volume_register_host.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package command - -import ( - "fmt" - - "github.com/hashicorp/hcl/hcl/ast" - "github.com/hashicorp/nomad/api" -) - -func (c *VolumeRegisterCommand) hostVolumeRegister(client *api.Client, ast *ast.File, override bool, volID string) int { - vol, err := decodeHostVolume(ast) - if err != nil { - c.Ui.Error(fmt.Sprintf("Error decoding the volume definition: %s", err)) - return 1 - } - if vol.NodeID == "" { - c.Ui.Error("Node ID is required for registering") - return 1 - } - if volID != "" { - ns := c.namespace - if vol.Namespace != "" { - ns = vol.Namespace - } - stub, possible, err := getHostVolumeByPrefix(client, volID, ns) - if err != nil { - c.Ui.Error(fmt.Sprintf("Could not update existing volume: %s", err)) - return 1 - } - if len(possible) > 0 { - out, err := formatHostVolumes(possible, formatOpts{short: true}) - if err != nil { - c.Ui.Error(fmt.Sprintf("Error formatting: %s", err)) - return 1 - } - c.Ui.Error(fmt.Sprintf("Prefix matched multiple volumes\n\n%s", out)) - return 1 - } - vol.ID = stub.ID - } - - req := &api.HostVolumeRegisterRequest{ - Volume: vol, - PolicyOverride: override, - } - resp, _, err := client.HostVolumes().Register(req, nil) - if err != nil { - c.Ui.Error(fmt.Sprintf("Error registering volume: %s", err)) - return 1 - } - vol = resp.Volume - - if resp.Warnings != "" { - c.Ui.Output( - c.Colorize().Color( - fmt.Sprintf("[bold][yellow]Volume Warnings:\n%s[reset]\n", resp.Warnings))) - } - - c.Ui.Output(fmt.Sprintf( - "Registered host volume %s with ID %s", vol.Name, vol.ID)) - - return 0 -} diff --git a/command/volume_register_host_test.go b/command/volume_register_host_test.go deleted file mode 100644 index 845f0dae848..00000000000 --- a/command/volume_register_host_test.go +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package command - -import ( - "fmt" - "os" - "strings" - "testing" - - "github.com/hashicorp/cli" - "github.com/hashicorp/nomad/api" - "github.com/hashicorp/nomad/ci" - "github.com/shoenig/test/must" -) - -func TestHostVolumeRegisterCommand_Run(t *testing.T) { - ci.Parallel(t) - srv, client, url := testServer(t, true, nil) - t.Cleanup(srv.Shutdown) - - waitForNodes(t, client) - - _, err := client.Namespaces().Register(&api.Namespace{Name: "prod"}, nil) - must.NoError(t, err) - - nodes, _, err := client.Nodes().List(nil) - must.NoError(t, err) - must.Len(t, 1, nodes) - nodeID := nodes[0].ID - - hostPath := t.TempDir() - - ui := cli.NewMockUi() - cmd := &VolumeRegisterCommand{Meta: Meta{Ui: ui}} - - hclTestFile := fmt.Sprintf(` -namespace = "prod" -name = "database" -type = "host" -plugin_id = "plugin_id" -node_id = "%s" -node_pool = "default" -host_path = "%s" - -capacity = "15GB" -capacity_min = "10GiB" -capacity_max = "20G" - -constraint { - attribute = "${attr.kernel.name}" - value = "linux" -} - -constraint { - attribute = "${meta.rack}" - value = "foo" -} - -capability { - access_mode = "single-node-writer" - attachment_mode = "file-system" -} - -capability { - access_mode = "single-node-reader-only" - attachment_mode = "block-device" -} - -parameters { - foo = "bar" -} -`, nodeID, hostPath) - - file, err := os.CreateTemp(t.TempDir(), "volume-test-*.hcl") - must.NoError(t, err) - _, err = file.WriteString(hclTestFile) - must.NoError(t, err) - - args := []string{"-address", url, file.Name()} - - code := cmd.Run(args) - must.Eq(t, 0, code, must.Sprintf("got error: %s", ui.ErrorWriter.String())) - - out := ui.OutputWriter.String() - must.StrContains(t, out, "Registered host volume") - parts := strings.Split(out, " ") - id := strings.TrimSpace(parts[len(parts)-1]) - - // Verify volume was registered - got, _, err := client.HostVolumes().Get(id, &api.QueryOptions{Namespace: "prod"}) - must.NoError(t, err) - must.NotNil(t, got) - - // Verify we can update the volume without changes - args = []string{"-address", url, "-id", got.ID, file.Name()} - code = cmd.Run(args) - must.Eq(t, 0, code, must.Sprintf("got error: %s", ui.ErrorWriter.String())) - list, _, err := client.HostVolumes().List(nil, &api.QueryOptions{Namespace: "prod"}) - must.Len(t, 1, list, must.Sprintf("new volume should not be registered on update")) -} diff --git a/command/volume_register_csi_test.go b/command/volume_register_test.go similarity index 100% rename from command/volume_register_csi_test.go rename to command/volume_register_test.go diff --git a/command/volume_status.go b/command/volume_status.go index d599e349e83..22fc6afc225 100644 --- a/command/volume_status.go +++ b/command/volume_status.go @@ -52,12 +52,6 @@ Status Options: -t Format and display volumes using a Go template. - - -node-pool - Filter results by node pool, when no volume ID is provided and -type=host. - - -node - Filter results by node ID, when no volume ID is provided and -type=host. ` return strings.TrimSpace(helpText) } @@ -69,13 +63,11 @@ func (c *VolumeStatusCommand) Synopsis() string { func (c *VolumeStatusCommand) AutocompleteFlags() complete.Flags { return mergeAutocompleteFlags(c.Meta.AutocompleteFlags(FlagSetClient), complete.Flags{ - "-type": complete.PredictSet("csi", "host"), - "-short": complete.PredictNothing, - "-verbose": complete.PredictNothing, - "-json": complete.PredictNothing, - "-t": complete.PredictAnything, - "-node": nodePredictor(c.Client, nil), - "-node-pool": nodePoolPredictor(c.Client, nil), + "-type": predictVolumeType, + "-short": complete.PredictNothing, + "-verbose": complete.PredictNothing, + "-json": complete.PredictNothing, + "-t": complete.PredictAnything, }) } @@ -90,21 +82,14 @@ func (c *VolumeStatusCommand) AutocompleteArgs() complete.Predictor { if err != nil { return []string{} } - matches := resp.Matches[contexts.Volumes] - - resp, _, err = client.Search().PrefixSearch(a.Last, contexts.HostVolumes, nil) - if err != nil { - return []string{} - } - matches = append(matches, resp.Matches[contexts.HostVolumes]...) - return matches + return resp.Matches[contexts.Volumes] }) } func (c *VolumeStatusCommand) Name() string { return "volume status" } func (c *VolumeStatusCommand) Run(args []string) int { - var typeArg, nodeID, nodePool string + var typeArg string flags := c.Meta.FlagSet(c.Name(), FlagSetClient) flags.Usage = func() { c.Ui.Output(c.Help()) } @@ -113,8 +98,6 @@ func (c *VolumeStatusCommand) Run(args []string) int { flags.BoolVar(&c.verbose, "verbose", false, "") flags.BoolVar(&c.json, "json", false, "") flags.StringVar(&c.template, "t", "", "") - flags.StringVar(&nodeID, "node", "", "") - flags.StringVar(&nodePool, "node-pool", "", "") if err := flags.Parse(args); err != nil { c.Ui.Error(fmt.Sprintf("Error parsing arguments %s", err)) @@ -147,17 +130,12 @@ func (c *VolumeStatusCommand) Run(args []string) int { id = args[0] } - switch typeArg { - case "csi", "": - if nodeID != "" || nodePool != "" { - c.Ui.Error("-node and -node-pool can only be used with -type host") - return 1 - } - return c.csiStatus(client, id) - case "host": - return c.hostVolumeStatus(client, id, nodeID, nodePool) - default: - c.Ui.Error(fmt.Sprintf("No such volume type %q", typeArg)) - return 1 + code := c.csiStatus(client, id) + if code != 0 { + return code } + + // Extend this section with other volume implementations + + return 0 } diff --git a/command/volume_status_csi.go b/command/volume_status_csi.go index 8b8b9a986be..31fdeeb2331 100644 --- a/command/volume_status_csi.go +++ b/command/volume_status_csi.go @@ -23,7 +23,7 @@ func (c *VolumeStatusCommand) csiBanner() { func (c *VolumeStatusCommand) csiStatus(client *api.Client, id string) int { // Invoke list mode if no volume id if id == "" { - return c.listCSIVolumes(client) + return c.listVolumes(client) } // get a CSI volume that matches the given prefix or a list of all matches if an @@ -55,7 +55,7 @@ func (c *VolumeStatusCommand) csiStatus(client *api.Client, id string) int { return 1 } - str, err := c.formatCSIBasic(vol) + str, err := c.formatBasic(vol) if err != nil { c.Ui.Error(fmt.Sprintf("Error formatting volume: %s", err)) return 1 @@ -65,7 +65,7 @@ func (c *VolumeStatusCommand) csiStatus(client *api.Client, id string) int { return 0 } -func (c *VolumeStatusCommand) listCSIVolumes(client *api.Client) int { +func (c *VolumeStatusCommand) listVolumes(client *api.Client) int { c.csiBanner() vols, _, err := client.CSIVolumes().List(nil) @@ -182,7 +182,7 @@ func csiFormatSortedVolumes(vols []*api.CSIVolumeListStub) (string, error) { return formatList(rows), nil } -func (c *VolumeStatusCommand) formatCSIBasic(vol *api.CSIVolume) (string, error) { +func (c *VolumeStatusCommand) formatBasic(vol *api.CSIVolume) (string, error) { if c.json || len(c.template) > 0 { out, err := Format(c.json, c.template, vol) if err != nil { @@ -224,13 +224,8 @@ func (c *VolumeStatusCommand) formatCSIBasic(vol *api.CSIVolume) (string, error) full = append(full, topo) } - banner := "\n[bold]Capabilities[reset]" - caps := formatCSIVolumeCapabilities(vol.RequestedCapabilities) - full = append(full, banner) - full = append(full, caps) - // Format the allocs - banner = c.Colorize().Color("\n[bold]Allocations[reset]") + banner := c.Colorize().Color("\n[bold]Allocations[reset]") allocs := formatAllocListStubs(vol.Allocations, c.verbose, c.length) full = append(full, banner) full = append(full, allocs) @@ -296,12 +291,3 @@ func csiVolMountOption(volume, request *api.CSIMountOptions) string { return out } - -func formatCSIVolumeCapabilities(caps []*api.CSIVolumeCapability) string { - lines := make([]string, len(caps)+1) - lines[0] = "Access Mode|Attachment Mode" - for i, cap := range caps { - lines[i+1] = fmt.Sprintf("%s|%s", cap.AccessMode, cap.AttachmentMode) - } - return formatList(lines) -} diff --git a/command/volume_status_host.go b/command/volume_status_host.go deleted file mode 100644 index 5f8f4232a06..00000000000 --- a/command/volume_status_host.go +++ /dev/null @@ -1,212 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package command - -import ( - "fmt" - "sort" - "strings" - - humanize "github.com/dustin/go-humanize" - "github.com/hashicorp/nomad/api" -) - -func (c *VolumeStatusCommand) hostVolumeStatus(client *api.Client, id, nodeID, nodePool string) int { - if id == "" { - return c.listHostVolumes(client, nodeID, nodePool) - } - - if nodeID != "" || nodePool != "" { - c.Ui.Error("-node or -node-pool options can only be used when no ID is provided") - return 1 - } - - opts := formatOpts{ - verbose: c.verbose, - short: c.short, - length: c.length, - json: c.json, - template: c.template, - } - - // get a host volume that matches the given prefix or a list of all matches - // if an exact match is not found. note we can't use the shared getByPrefix - // helper here because the List API doesn't match the required signature - - volStub, possible, err := getHostVolumeByPrefix(client, id, c.namespace) - if err != nil { - c.Ui.Error(fmt.Sprintf("Error listing volumes: %s", err)) - return 1 - } - if len(possible) > 0 { - out, err := formatHostVolumes(possible, opts) - if err != nil { - c.Ui.Error(fmt.Sprintf("Error formatting: %s", err)) - return 1 - } - c.Ui.Error(fmt.Sprintf("Prefix matched multiple volumes\n\n%s", out)) - return 1 - } - - vol, _, err := client.HostVolumes().Get(volStub.ID, nil) - if err != nil { - c.Ui.Error(fmt.Sprintf("Error querying volume: %s", err)) - return 1 - } - - str, err := formatHostVolume(vol, opts) - if err != nil { - c.Ui.Error(fmt.Sprintf("Error formatting volume: %s", err)) - return 1 - } - c.Ui.Output(c.Colorize().Color(str)) - return 0 -} - -func (c *VolumeStatusCommand) listHostVolumes(client *api.Client, nodeID, nodePool string) int { - vols, _, err := client.HostVolumes().List(&api.HostVolumeListRequest{ - NodeID: nodeID, - NodePool: nodePool, - }, nil) - if err != nil { - c.Ui.Error(fmt.Sprintf("Error querying volumes: %s", err)) - return 1 - } - - opts := formatOpts{ - verbose: c.verbose, - short: c.short, - length: c.length, - json: c.json, - template: c.template, - } - - str, err := formatHostVolumes(vols, opts) - if err != nil { - c.Ui.Error(fmt.Sprintf("Error formatting volumes: %s", err)) - return 1 - } - c.Ui.Output(c.Colorize().Color(str)) - return 0 -} - -func getHostVolumeByPrefix(client *api.Client, prefix, ns string) (*api.HostVolumeStub, []*api.HostVolumeStub, error) { - vols, _, err := client.HostVolumes().List(nil, &api.QueryOptions{ - Prefix: prefix, - Namespace: ns, - }) - - if err != nil { - return nil, nil, fmt.Errorf("error querying volumes: %s", err) - } - switch len(vols) { - case 0: - return nil, nil, fmt.Errorf("no volumes with prefix or ID %q found", prefix) - case 1: - return vols[0], nil, nil - default: - // search for exact matches to account for multiple exact ID or name - // matches across namespaces - var match *api.HostVolumeStub - exactMatchesCount := 0 - for _, vol := range vols { - if vol.ID == prefix || vol.Name == prefix { - exactMatchesCount++ - match = vol - } - } - if exactMatchesCount == 1 { - return match, nil, nil - } - return nil, vols, nil - } -} - -func formatHostVolume(vol *api.HostVolume, opts formatOpts) (string, error) { - if opts.json || len(opts.template) > 0 { - out, err := Format(opts.json, opts.template, vol) - if err != nil { - return "", fmt.Errorf("format error: %v", err) - } - return out, nil - } - - output := []string{ - fmt.Sprintf("ID|%s", vol.ID), - fmt.Sprintf("Name|%s", vol.Name), - fmt.Sprintf("Namespace|%s", vol.Namespace), - fmt.Sprintf("Plugin ID|%s", vol.PluginID), - fmt.Sprintf("Node ID|%s", vol.NodeID), - fmt.Sprintf("Node Pool|%s", vol.NodePool), - fmt.Sprintf("Capacity|%s", humanize.IBytes(uint64(vol.CapacityBytes))), - fmt.Sprintf("State|%s", vol.State), - fmt.Sprintf("Host Path|%s", vol.HostPath), - } - - // Exit early - if opts.short { - return formatKV(output), nil - } - - full := []string{formatKV(output)} - - banner := "\n[bold]Capabilities[reset]" - caps := formatHostVolumeCapabilities(vol.RequestedCapabilities) - full = append(full, banner) - full = append(full, caps) - - // Format the allocs - banner = "\n[bold]Allocations[reset]" - allocs := formatAllocListStubs(vol.Allocations, opts.verbose, opts.length) - full = append(full, banner) - full = append(full, allocs) - - return strings.Join(full, "\n"), nil -} - -// TODO: we could make a bunch more formatters into shared functions using this -type formatOpts struct { - verbose bool - short bool - length int - json bool - template string -} - -func formatHostVolumes(vols []*api.HostVolumeStub, opts formatOpts) (string, error) { - // Sort the output by volume ID - sort.Slice(vols, func(i, j int) bool { return vols[i].ID < vols[j].ID }) - - if opts.json || len(opts.template) > 0 { - out, err := Format(opts.json, opts.template, vols) - if err != nil { - return "", fmt.Errorf("format error: %v", err) - } - return out, nil - } - - rows := make([]string, len(vols)+1) - rows[0] = "ID|Name|Namespace|Plugin ID|Node ID|Node Pool|State" - for i, v := range vols { - rows[i+1] = fmt.Sprintf("%s|%s|%s|%s|%s|%s|%s", - limit(v.ID, opts.length), - v.Name, - v.Namespace, - v.PluginID, - limit(v.NodeID, opts.length), - v.NodePool, - v.State, - ) - } - return formatList(rows), nil -} - -func formatHostVolumeCapabilities(caps []*api.HostVolumeCapability) string { - lines := make([]string, len(caps)+1) - lines[0] = "Access Mode|Attachment Mode" - for i, cap := range caps { - lines[i+1] = fmt.Sprintf("%s|%s", cap.AccessMode, cap.AttachmentMode) - } - return formatList(lines) -} diff --git a/command/volume_status_host_test.go b/command/volume_status_host_test.go deleted file mode 100644 index 8df4c11efe1..00000000000 --- a/command/volume_status_host_test.go +++ /dev/null @@ -1,172 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package command - -import ( - "fmt" - "os" - "strings" - "testing" - - "github.com/hashicorp/cli" - "github.com/hashicorp/nomad/api" - "github.com/hashicorp/nomad/ci" - "github.com/posener/complete" - "github.com/shoenig/test/must" -) - -func TestHostVolumeStatusCommand_Args(t *testing.T) { - ci.Parallel(t) - ui := cli.NewMockUi() - cmd := &VolumeStatusCommand{Meta: Meta{Ui: ui}} - - code := cmd.Run([]string{ - "-type", "host", - "-node", "6063016a-9d4c-11ef-85fc-9be98efe7e76", - "-node-pool", "prod", - "6e3e80f2-9d4c-11ef-97b1-d38cf64416a4", - }) - must.One(t, code) - - out := ui.ErrorWriter.String() - must.StrContains(t, out, "-node or -node-pool options can only be used when no ID is provided") -} - -func TestHostVolumeStatusCommand_List(t *testing.T) { - ci.Parallel(t) - srv, client, url := testServer(t, true, nil) - t.Cleanup(srv.Shutdown) - - waitForNodes(t, client) - - _, err := client.Namespaces().Register(&api.Namespace{Name: "prod"}, nil) - must.NoError(t, err) - - nodes, _, err := client.Nodes().List(nil) - must.NoError(t, err) - must.Len(t, 1, nodes) - nodeID := nodes[0].ID - - ui := cli.NewMockUi() - - vols := []api.NamespacedID{ - {Namespace: "prod", ID: "database"}, - {Namespace: "prod", ID: "certs"}, - {Namespace: "default", ID: "example"}, - } - - for _, vol := range vols { - hclTestFile := fmt.Sprintf(` -namespace = "%s" -name = "%s" -type = "host" -plugin_id = "mkdir" -node_id = "%s" -node_pool = "default" -capability { - access_mode = "single-node-reader-only" - attachment_mode = "file-system" -} -`, vol.Namespace, vol.ID, nodeID) - - file, err := os.CreateTemp(t.TempDir(), "volume-test-*.hcl") - must.NoError(t, err) - _, err = file.WriteString(hclTestFile) - must.NoError(t, err) - - args := []string{"-address", url, "-detach", file.Name()} - cmd := &VolumeCreateCommand{Meta: Meta{Ui: ui}} - code := cmd.Run(args) - must.Eq(t, 0, code, must.Sprintf("got error: %s", ui.ErrorWriter.String())) - - out := ui.OutputWriter.String() - must.StrContains(t, out, "Created host volume") - ui.OutputWriter.Reset() - } - - cmd := &VolumeStatusCommand{Meta: Meta{Ui: ui}} - args := []string{"-address", url, "-type", "host", "-namespace", "prod"} - code := cmd.Run(args) - must.Eq(t, 0, code, must.Sprintf("got error: %s", ui.ErrorWriter.String())) - out := ui.OutputWriter.String() - must.StrContains(t, out, "certs") - must.StrContains(t, out, "database") - must.StrNotContains(t, out, "example") -} - -func TestHostVolumeStatusCommand_Get(t *testing.T) { - ci.Parallel(t) - srv, client, url := testServer(t, true, nil) - t.Cleanup(srv.Shutdown) - - waitForNodes(t, client) - - _, err := client.Namespaces().Register(&api.Namespace{Name: "prod"}, nil) - must.NoError(t, err) - - nodes, _, err := client.Nodes().List(nil) - must.NoError(t, err) - must.Len(t, 1, nodes) - nodeID := nodes[0].ID - hostPath := t.TempDir() - - ui := cli.NewMockUi() - - hclTestFile := fmt.Sprintf(` -namespace = "prod" -name = "example" -type = "host" -plugin_id = "mkdir" -node_id = "%s" -node_pool = "default" -host_path = "%s" -capability { - access_mode = "single-node-reader-only" - attachment_mode = "file-system" -} -`, nodeID, hostPath) - - file, err := os.CreateTemp(t.TempDir(), "volume-test-*.hcl") - must.NoError(t, err) - _, err = file.WriteString(hclTestFile) - must.NoError(t, err) - - args := []string{"-address", url, file.Name()} - regCmd := &VolumeRegisterCommand{Meta: Meta{Ui: ui}} - code := regCmd.Run(args) - must.Eq(t, 0, code, must.Sprintf("got error: %s", ui.ErrorWriter.String())) - - out := ui.OutputWriter.String() - must.StrContains(t, out, "Registered host volume") - parts := strings.Split(out, " ") - id := strings.TrimSpace(parts[len(parts)-1]) - - ui.OutputWriter.Reset() - - // autocomplete - cmd := &VolumeStatusCommand{Meta: Meta{Ui: ui, namespace: "*", flagAddress: url}} - cmd.Meta.namespace = "*" - prefix := id[:len(id)-5] - cargs := complete.Args{Last: prefix} - predictor := cmd.AutocompleteArgs() - - res := predictor.Predict(cargs) - must.SliceLen(t, 1, res) - must.Eq(t, id, res[0]) - - // missing the namespace - cmd = &VolumeStatusCommand{Meta: Meta{Ui: ui}} - args = []string{"-address", url, "-type", "host", id} - code = cmd.Run(args) - must.Eq(t, 1, code) - must.StrContains(t, ui.ErrorWriter.String(), - "Error listing volumes: no volumes with prefix or ID") - ui.ErrorWriter.Reset() - - args = []string{"-address", url, "-type", "host", "-namespace", "prod", id} - code = cmd.Run(args) - must.Eq(t, 0, code, must.Sprintf("got error: %s", ui.ErrorWriter.String())) - out = ui.OutputWriter.String() - must.StrContains(t, out, "example") -} diff --git a/command/volume_status_csi_test.go b/command/volume_status_test.go similarity index 100% rename from command/volume_status_csi_test.go rename to command/volume_status_test.go diff --git a/demo/hostvolume/_test-plugin.sh b/demo/hostvolume/_test-plugin.sh deleted file mode 100755 index f465d75c2c9..00000000000 --- a/demo/hostvolume/_test-plugin.sh +++ /dev/null @@ -1,84 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -set -euo pipefail - -help() { - cat < [target dir] [uuid] - -Args: - plugin: path to plugin executable - operation: fingerprint, create, or delete - create and delete must be idempotent. - any other operation will be passed into the plugin, - to see how it handles invalid operations. - target dir: directory to create the volume (defaults to /tmp) - uuid: volume id to use (usually assigned by Nomad; - defaults to 74564d17-ce50-0bc1-48e5-6feaa41ede48) - -Examples: - $0 ./example-plugin-mkfs fingerprint - $0 ./example-plugin-mkfs create - $0 ./example-plugin-mkfs create /some/other/place - $0 ./example-plugin-mkfs delete -EOF -} - -if [[ $# -eq 0 || "$*" =~ -h ]]; then - help - exit -fi -if [ $# -lt 2 ]; then - help - exit 1 -fi - -plugin="$1" -op="$2" -volumes_dir="${3:-/tmp}" -uuid="${4:-74564d17-ce50-0bc1-48e5-6feaa41ede48}" -node_id='0b62d807-6101-a80f-374d-e1c430abbf47' -plugin_dir="$(dirname "$plugin")" - -case $op in - fingerprint) - args='fingerprint' - export DHV_OPERATION='fingerprint' - ;; - - create) - args='create' - export DHV_OPERATION='create' - export DHV_VOLUMES_DIR="$volumes_dir" - export DHV_VOLUME_NAME=test - export DHV_VOLUME_ID="$uuid" - export DHV_NODE_ID="$node_id" - export DHV_CAPACITY_MAX_BYTES=50000000 # 50mb - export DHV_CAPACITY_MIN_BYTES=50000000 # 50mb - export DHV_PARAMETERS='{"a": "ayy"}' - export DHV_PLUGIN_DIR="$plugin_dir" - ;; - - delete) - args='delete' - export DHV_OPERATION='delete' - export DHV_VOLUMES_DIR="$volumes_dir" - export DHV_NODE_ID="$node_id" - export DHV_VOLUME_NAME=test - export DHV_VOLUME_ID="$uuid" - export DHV_PARAMETERS='{"a": "ayy"}' - export DHV_PLUGIN_DIR="$plugin_dir" - ;; - - *) - args="$*" - ;; -esac - -set -x -eval "$plugin $args" diff --git a/demo/hostvolume/check.sh b/demo/hostvolume/check.sh deleted file mode 100755 index c89a36c5a54..00000000000 --- a/demo/hostvolume/check.sh +++ /dev/null @@ -1,14 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -set -xeuo pipefail - -nomad volume status -type=host -verbose -nomad operator api /v1/nodes | jq '.[].HostVolumes' - -addr="$(nomad service info -json job | jq -r '.[0].Address'):8000" -curl -sS "$addr/external/" | grep hi -curl -sS "$addr/internal/" | grep hi - -echo '💚 looks good! 💚' diff --git a/demo/hostvolume/e2e.sh b/demo/hostvolume/e2e.sh deleted file mode 100755 index d27070cafac..00000000000 --- a/demo/hostvolume/e2e.sh +++ /dev/null @@ -1,9 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -set -xeuo pipefail - -./setup.sh -./check.sh -./teardown.sh diff --git a/demo/hostvolume/example-plugin-mkfs b/demo/hostvolume/example-plugin-mkfs deleted file mode 100755 index 33a3e5742c5..00000000000 --- a/demo/hostvolume/example-plugin-mkfs +++ /dev/null @@ -1,131 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -set -euo pipefail - -version='0.0.1' -fingerprint() { - printf '{"version": "%s"}' "$version" -} - -help() { - cat < [path] - -Options: - -v|--verbose: Show shell commands (set -x) - -h|--help: Print this help text and exit - -Operations: - create: Creates and mounts the device at path (required) - required environment: - CAPACITY_MIN_BYTES - delete: Unmounts and deletes the device at path (required) - version: Outputs this plugin's version: $version - fingerprint: Outputs plugin metadata: $(fingerprint) - -EOF -} - -# parse args -[ $# -eq 0 ] && { help; exit 1; } -for arg in "$@"; do - case $arg in - -h|-help|--help) help; exit 0 ;; - fingerprint|fingerprint) fingerprint; exit 0 ;; - version|version) echo "$version"; exit 0 ;; - -v|--verbose) set -x; shift; ;; - esac -done - -# OS detect -if [[ "$OSTYPE" == "linux-"* ]]; then - ext=ext4 - mount=/usr/bin/mount - mkfsExec() { - dd if=/dev/zero of="$1".$ext bs=1M count="$2" - mkfs.ext4 "$1".$ext 1>&2 - } - mountExec() { - $mount "$1".$ext "$1" - } - st() { - stat --format='%s' "$1" - } -elif [[ "$OSTYPE" == "darwin"* ]]; then - ext=dmg - mount=/sbin/mount - mkfsExec() { - hdiutil create -megabytes "$2" -layout NONE -fs apfs -volname "$1" "$1" 1>&2 - } - mountExec() { - hdiutil attach "$1".$ext 1>&2 - } - st() { - stat -f %z "$1" - } -else - echo "$OSTYPE is an unsupported OS" - exit 1 -fi - -validate_path() { - local path="$1" - if [[ ! "$path" =~ [0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12} ]]; then - 1>&2 echo "expected uuid-lookin ID in target host path; got: '$path'" - return 1 - fi -} - -is_mounted() { - $mount | grep -q " $1 " -} - -create_volume() { - local path="$1" - validate_path "$path" - local bytes="$2" - - # translate to mb for dd block size - local megs=$((bytes / 1024 / 1024)) # lazy, approximate - - mkdir -p "$(dirname "$path")" - # the extra conditionals are for idempotency - if [ ! -f "$path.$ext" ]; then - mkfsExec "$path" $megs - fi - if ! is_mounted "$path"; then - mkdir -p "$path" - mountExec "$path" - fi -} - -delete_volume() { - local path="$1" - validate_path "$path" - is_mounted "$path" && umount "$path" - rm -rf "$path" - rm -f "$path"."$ext" -} - -host_path="$DHV_VOLUMES_DIR/$DHV_VOLUME_ID" -case "$1" in - "create") - create_volume "$host_path" "$DHV_CAPACITY_MIN_BYTES" - # output what Nomad expects - bytes="$(st "$host_path".$ext)" - printf '{"path": "%s", "bytes": %s}' "$host_path" "$bytes" - ;; - "delete") - delete_volume "$host_path" ;; - *) - echo "unknown operation: $1" 1>&2 - exit 1 ;; -esac diff --git a/demo/hostvolume/external-plugin.volume.hcl b/demo/hostvolume/external-plugin.volume.hcl deleted file mode 100644 index 6c9f17e8d50..00000000000 --- a/demo/hostvolume/external-plugin.volume.hcl +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -name = "external-plugin" -type = "host" -# the executable named `example-plugin-mkfs` must be placed in the -# -host-volume-plugin-dir (config: client.host_volume_plugin_dir) -# or you will get an error creating the volume: -# * could not place volume "external-plugin": no node meets constraints -# The default location is /host_volume_plugins -plugin_id = "example-plugin-mkfs" -capacity_min = "50mb" -capacity_max = "50mb" - -capability { - access_mode = "single-node-writer" - attachment_mode = "file-system" -} - -parameters { - a = "ayy" -} diff --git a/demo/hostvolume/internal-plugin.volume.hcl b/demo/hostvolume/internal-plugin.volume.hcl deleted file mode 100644 index bbea133e1a6..00000000000 --- a/demo/hostvolume/internal-plugin.volume.hcl +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -name = "internal-plugin" -type = "host" -# this plugin is built into Nomad; -# it simply creates a directory. -plugin_id = "mkdir" - -capability { - access_mode = "single-node-writer" - attachment_mode = "file-system" -} - diff --git a/demo/hostvolume/job.nomad.hcl b/demo/hostvolume/job.nomad.hcl deleted file mode 100644 index 1b0e0162665..00000000000 --- a/demo/hostvolume/job.nomad.hcl +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -job "job" { - group "g" { - task "t" { - driver = "docker" - config { - image = "python:slim" - command = "bash" - args = ["-xc", <<-EOF - for dir in internal external; do - touch ${NOMAD_TASK_DIR}/$dir/hiii - done - python -m http.server -d ${NOMAD_TASK_DIR} --bind=:: - EOF - ] - ports = ["http"] - } - volume_mount { - volume = "int" - destination = "${NOMAD_TASK_DIR}/internal" - } - volume_mount { - volume = "ext" - destination = "${NOMAD_TASK_DIR}/external" - } - } - volume "int" { - type = "host" - source = "internal-plugin" - } - volume "ext" { - type = "host" - source = "external-plugin" - } - network { - port "http" { - static = 8000 - } - } - service { - name = "job" - port = "http" - provider = "nomad" - } - } -} diff --git a/demo/hostvolume/no-plugin.volume.hcl b/demo/hostvolume/no-plugin.volume.hcl deleted file mode 100644 index ee215bfd099..00000000000 --- a/demo/hostvolume/no-plugin.volume.hcl +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -name = "no-plugin" -type = "host" - -# this volume spec can be used with 'volume register' after filling in the -# host_path and node_id values below -host_path = "TODO" # absolute path of the volume that was created out-of-band -node_id = "TODO" # ID of the node where the volume was created - -capability { - access_mode = "single-node-writer" - attachment_mode = "file-system" -} diff --git a/demo/hostvolume/setup.sh b/demo/hostvolume/setup.sh deleted file mode 100755 index 9a9fc7be719..00000000000 --- a/demo/hostvolume/setup.sh +++ /dev/null @@ -1,14 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -set -xeuo pipefail - -nomad volume create external-plugin.volume.hcl -nomad volume create internal-plugin.volume.hcl - -nomad job run job.nomad.hcl - -nomad volume status -type=host -verbose -nomad operator api /v1/nodes | jq '.[].HostVolumes' - diff --git a/demo/hostvolume/teardown.sh b/demo/hostvolume/teardown.sh deleted file mode 100755 index d4d17d67fa4..00000000000 --- a/demo/hostvolume/teardown.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -set -xeuo pipefail - -nomad job stop job || true - -for _ in {1..5}; do - sleep 3 - ids="$(nomad volume status -type=host -verbose | awk '/ternal-plugin/ {print$1}')" - test -z "$ids" && break - for id in $ids; do - nomad volume delete -type=host "$id" || continue - done -done - diff --git a/dev/hooks/pre-push b/dev/hooks/pre-push index 2a62a40f335..0f6cc35f4be 100755 --- a/dev/hooks/pre-push +++ b/dev/hooks/pre-push @@ -31,7 +31,6 @@ if [ -f version/version_ent.go ]; then fi # do not push directly to main, stable-*, release/* -# do not push Enterprise tags # ==================== while read local_ref local_sha remote_ref remote_sha do @@ -46,13 +45,5 @@ do if echo "$remote_ref"|grep -q 'refs/heads/release/.*'; then fail "refusing to push directly to a branch prefixed \`release/\`" fi - - if echo "$remote_ref" | grep -q 'refs/tags/v.*\+ent'; then - fail "refusing to push Nomad Enterprise tag" - fi - - if echo "$remote_ref" | grep -q 'refs/tags/v.*\+pro'; then - fail "refusing to push Nomad Enterprise (pro) tag" - fi - done + diff --git a/drivers/docker/config_test.go b/drivers/docker/config_test.go index d30e7cb544a..7b7fd5a1e5f 100644 --- a/drivers/docker/config_test.go +++ b/drivers/docker/config_test.go @@ -646,6 +646,7 @@ func TestConfig_Capabilities(t *testing.T) { NetIsolationModes: []drivers.NetIsolationMode{"host", "group", "task"}, MustInitiateNetwork: true, MountConfigs: 0, + RemoteTasks: false, DisableLogCollection: false, }, }, @@ -659,6 +660,7 @@ func TestConfig_Capabilities(t *testing.T) { NetIsolationModes: []drivers.NetIsolationMode{"host", "group", "task"}, MustInitiateNetwork: true, MountConfigs: 0, + RemoteTasks: false, DisableLogCollection: true, }, }, @@ -672,6 +674,7 @@ func TestConfig_Capabilities(t *testing.T) { NetIsolationModes: []drivers.NetIsolationMode{"host", "group", "task"}, MustInitiateNetwork: true, MountConfigs: 0, + RemoteTasks: false, DisableLogCollection: false, }, }, diff --git a/drivers/shared/executor/executor_test.go b/drivers/shared/executor/executor_test.go index 4122dcff05e..d9941fc1906 100644 --- a/drivers/shared/executor/executor_test.go +++ b/drivers/shared/executor/executor_test.go @@ -1,7 +1,7 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -//go:build linux +//go:build !windows package executor diff --git a/e2e/.gitignore b/e2e/.gitignore new file mode 100644 index 00000000000..cfc151d21cd --- /dev/null +++ b/e2e/.gitignore @@ -0,0 +1 @@ +provisioning.json diff --git a/e2e/README.md b/e2e/README.md index 0c4f4b60699..e6fde397641 100644 --- a/e2e/README.md +++ b/e2e/README.md @@ -125,14 +125,3 @@ You can update the `nomad_version` variable, or simply rebuild the binary you have at the `nomad_local_binary` path so that Terraform picks up the changes. Then run `terraform plan`/`terraform apply` again. This will update Nomad in place, making the minimum amount of changes necessary. - -### ...Use Vault within a Test - -The infrastructure build enables a Vault KV2 mount whose mount point matches the value of the -`CLUSTER_UNIQUE_IDENTIFIER` environment variable and is generated -[here](https://github.com/hashicorp/nomad/blob/687335639bc6d4d522c91d6026d9e3f149aa75dc/e2e/terraform/provision-infra/main.tf#L16). - -All Nomad workloads which include a -[Vault block](https://developer.hashicorp.com/nomad/docs/job-specification/vault) will be granted -access to secrets according to the -[default policy document](./terraform/provision-infra/templates/vault-acl-jwt-policy-nomad-workloads.hcl.tpl). diff --git a/e2e/allocexec/docker_exec_test.go b/e2e/allocexec/docker_exec_test.go index 17c165af4ba..2956d700d81 100644 --- a/e2e/allocexec/docker_exec_test.go +++ b/e2e/allocexec/docker_exec_test.go @@ -7,7 +7,6 @@ import ( "archive/tar" "bytes" "context" - "fmt" "strings" "testing" "time" @@ -28,14 +27,13 @@ func TestDockerAllocExec(t *testing.T) { } func testDockerExecStdin(t *testing.T) { - sub, cleanup := jobs3.Submit(t, "./input/sleepytar.hcl") + _, cleanup := jobs3.Submit(t, "./input/sleepytar.hcl") t.Cleanup(cleanup) client, err := nomadapi.NewClient(nomadapi.DefaultConfig()) must.NoError(t, err) - filter := fmt.Sprintf("JobID == \"%s\"", sub.JobID()) - allocations, _, err := client.Allocations().List(&nomadapi.QueryOptions{Filter: filter}) + allocations, _, err := client.Allocations().List(nil) must.NoError(t, err) must.SliceLen(t, 1, allocations) @@ -87,7 +85,7 @@ func testDockerExecStdin(t *testing.T) { nil, nil, ) - must.NoError(t, err, must.Sprintf("error executing command inside the container: %v", err)) + must.NoError(t, err) must.Zero(t, exitCode) // check the output of tar diff --git a/e2e/cni/cni_test.go b/e2e/cni/cni_test.go index 92cbf6a3903..b2b3bcde18e 100644 --- a/e2e/cni/cni_test.go +++ b/e2e/cni/cni_test.go @@ -40,7 +40,7 @@ func testCNIArgs(t *testing.T) { job, _ := jobs3.Submit(t, "./input/cni_args.nomad.hcl") logs := job.Exec("group", "task", []string{"cat", "local/victory"}) t.Logf("FancyMessage: %s", logs.Stdout) - // "default" is the Nomad node's pool, interpolated in the jobspec, passed - // through the CNI plugin, and cat-ed by the task. - must.Eq(t, "default\n", logs.Stdout) + // "global" is the Nomad node's region, interpolated in the jobspec, + // passed through the CNI plugin, and cat-ed by the task. + must.Eq(t, "global\n", logs.Stdout) } diff --git a/e2e/cni/input/cni_args.nomad.hcl b/e2e/cni/input/cni_args.nomad.hcl index ebc33225e5e..1d58b774a8c 100644 --- a/e2e/cni/input/cni_args.nomad.hcl +++ b/e2e/cni/input/cni_args.nomad.hcl @@ -8,10 +8,9 @@ job "cni_args" { cni { # feature under test args = { - # the message gets placed as a file called "victory" in the task dir - # specified here by the cni_args.sh plugin. Using node pool allows us - # to test interpolation as an extra. - FancyMessage = "${node.pool}" + # the message gets placed as a file called "victory" + # in the task dir specified here by the cni_args.sh plugin + FancyMessage = "${node.region}" FancyTaskDir = "${NOMAD_ALLOC_DIR}/task/local" } } diff --git a/e2e/dynamic_host_volumes/doc.go b/e2e/dynamic_host_volumes/doc.go deleted file mode 100644 index 6df9474952b..00000000000 --- a/e2e/dynamic_host_volumes/doc.go +++ /dev/null @@ -1,7 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package dynamic_host_volumes - -// This package contains only tests, so this is a placeholder file to -// make sure builds don't fail with "no non-test Go files in" errors diff --git a/e2e/dynamic_host_volumes/dynamic_host_volumes_test.go b/e2e/dynamic_host_volumes/dynamic_host_volumes_test.go deleted file mode 100644 index da99ca6a020..00000000000 --- a/e2e/dynamic_host_volumes/dynamic_host_volumes_test.go +++ /dev/null @@ -1,318 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package dynamic_host_volumes - -import ( - "context" - "fmt" - "testing" - "time" - - "github.com/hashicorp/nomad/api" - nomadapi "github.com/hashicorp/nomad/api" - "github.com/hashicorp/nomad/e2e/e2eutil" - "github.com/hashicorp/nomad/e2e/v3/jobs3" - "github.com/hashicorp/nomad/e2e/v3/volumes3" - "github.com/shoenig/test/must" - "github.com/shoenig/test/wait" -) - -// TestDynamicHostVolumes_CreateWorkflow tests the workflow where a dynamic host -// volume is created by a plugin and then mounted by a job. -func TestDynamicHostVolumes_CreateWorkflow(t *testing.T) { - - start := time.Now() - nomad := e2eutil.NomadClient(t) - e2eutil.WaitForLeader(t, nomad) - e2eutil.WaitForNodesReady(t, nomad, 1) - - _, cleanupVol := volumes3.Create(t, "input/volume-create.nomad.hcl", - volumes3.WithClient(nomad)) - t.Cleanup(cleanupVol) - - t.Logf("[%v] submitting mounter job", time.Since(start)) - _, cleanupJob := jobs3.Submit(t, "./input/mount-created.nomad.hcl") - t.Cleanup(cleanupJob) - t.Logf("[%v] test complete, cleaning up", time.Since(start)) -} - -// TestDynamicHostVolumes_RegisterWorkflow tests the workflow where a dynamic -// host volume is created out-of-band and registered by a job, then mounted by -// another job. -func TestDynamicHostVolumes_RegisterWorkflow(t *testing.T) { - - start := time.Now() - nomad := e2eutil.NomadClient(t) - e2eutil.WaitForLeader(t, nomad) - e2eutil.WaitForNodesReady(t, nomad, 1) - - submitted, cleanup := jobs3.Submit(t, "./input/register-volumes.nomad.hcl", - jobs3.Dispatcher(), - ) - t.Cleanup(cleanup) - t.Logf("[%v] register job %q created", time.Since(start), submitted.JobID()) - - _, err := e2eutil.Command( - "nomad", "acl", "policy", "apply", - "-namespace", "default", "-job", submitted.JobID(), - "register-volumes-policy", "./input/register-volumes.policy.hcl") - must.NoError(t, err) - t.Logf("[%v] ACL policy for job %q created", time.Since(start), submitted.JobID()) - - must.NoError(t, e2eutil.Dispatch(submitted.JobID(), - map[string]string{ - "vol_name": "registered-volume", - "vol_size": "1G", - "vol_path": "/tmp/registered-volume", - }, "")) - t.Logf("[%v] job dispatched", time.Since(start)) - - must.Wait(t, wait.InitialSuccess( - wait.ErrorFunc(func() error { - dispatches, err := e2eutil.DispatchedJobs(submitted.JobID()) - if err != nil { - return err - } - if len(dispatches) == 0 { - return fmt.Errorf("no dispatched job for %q", submitted.JobID()) - } - - jobID := dispatches[0]["ID"] - must.NotEq(t, "", jobID, - must.Sprintf("invalid dispatched jobs output: %v", dispatches)) - - allocs, _, err := nomad.Jobs().Allocations(jobID, true, nil) - if len(allocs) == 0 || allocs[0].ClientStatus != "complete" { - out, _ := e2eutil.AllocLogs(allocs[0].ID, "default", e2eutil.LogsStdErr) - return fmt.Errorf("allocation status was %q. logs: %s", - allocs[0].ClientStatus, out) - } - - t.Logf("[%v] dispatched job done", time.Since(start)) - return nil - }), - wait.Timeout(10*time.Second), - wait.Gap(50*time.Millisecond), - )) - - out, err := e2eutil.Command("nomad", "volume", "status", "-verbose", "-type", "host") - must.NoError(t, err) - vols, err := e2eutil.ParseColumns(out) - must.NoError(t, err) - - var volID string - var nodeID string - for _, vol := range vols { - if vol["Name"] == "registered-volume" { - volID = vol["ID"] - nodeID = vol["Node ID"] - } - } - must.NotEq(t, "", volID, must.Sprintf("volume was not registered: %s", out)) - - t.Cleanup(func() { - _, err := e2eutil.Command("nomad", "volume", "delete", "-type", "host", volID) - must.NoError(t, err) - }) - - must.Wait(t, wait.InitialSuccess( - wait.ErrorFunc(func() error { - node, _, err := nomad.Nodes().Info(nodeID, nil) - if err != nil { - return err - } - _, ok := node.HostVolumes["registered-volume"] - if !ok { - return fmt.Errorf("node %q did not fingerprint volume %q", nodeID, volID) - } - vol, _, err := nomad.HostVolumes().Get(volID, nil) - if err != nil { - return err - } - if vol.State != "ready" { - return fmt.Errorf("node fingerprinted volume but status was not updated") - } - - t.Logf("[%v] volume %q is ready", time.Since(start), volID) - return nil - }), - wait.Timeout(10*time.Second), - wait.Gap(50*time.Millisecond), - )) - - t.Logf("[%v] submitting mounter job", time.Since(start)) - _, cleanup2 := jobs3.Submit(t, "./input/mount-registered.nomad.hcl") - t.Cleanup(cleanup2) - t.Logf("[%v] test complete, cleaning up", time.Since(start)) -} - -// TestDynamicHostVolumes_StickyVolumes tests where a job marks a volume as -// sticky and its allocations should have strong associations with specific -// volumes as they are replaced -func TestDynamicHostVolumes_StickyVolumes(t *testing.T) { - - start := time.Now() - nomad := e2eutil.NomadClient(t) - e2eutil.WaitForLeader(t, nomad) - e2eutil.WaitForNodesReady(t, nomad, 2) - - // TODO: if we create # of volumes == # of nodes, we can make test flakes - // stand out more easily - - vol1Sub, cleanup1 := volumes3.Create(t, "input/volume-sticky.nomad.hcl", - volumes3.WithClient(nomad)) - t.Cleanup(cleanup1) - - vol2Sub, cleanup2 := volumes3.Create(t, "input/volume-sticky.nomad.hcl", - volumes3.WithClient(nomad)) - t.Cleanup(cleanup2) - - nodeToVolMap := map[string]string{ - vol1Sub.NodeID(): vol1Sub.VolumeID(), - vol2Sub.NodeID(): vol2Sub.VolumeID(), - } - - t.Logf("[%v] submitting sticky volume mounter job", time.Since(start)) - jobSub, cleanupJob := jobs3.Submit(t, "./input/sticky.nomad.hcl") - t.Cleanup(cleanupJob) - - allocID1 := jobSub.Allocs()[0].ID - alloc, _, err := nomad.Allocations().Info(allocID1, nil) - must.NoError(t, err) - - selectedNodeID := alloc.NodeID - selectedVolID := nodeToVolMap[selectedNodeID] - - t.Logf("[%v] volume %q on node %q was selected", - time.Since(start), selectedVolID, selectedNodeID) - - // Test: force reschedule - - _, err = nomad.Allocations().Stop(alloc, nil) - must.NoError(t, err) - - t.Logf("[%v] stopped allocation %q", time.Since(start), alloc.ID) - - var allocID2 string - - must.Wait(t, wait.InitialSuccess( - wait.ErrorFunc(func() error { - allocs, _, err := nomad.Jobs().Allocations(jobSub.JobID(), true, nil) - must.NoError(t, err) - if len(allocs) != 2 { - return fmt.Errorf("alloc not started") - } - for _, a := range allocs { - if a.ID != allocID1 { - allocID2 = a.ID - if a.ClientStatus != api.AllocClientStatusRunning { - return fmt.Errorf("replacement alloc not running") - } - } - } - return nil - }), - wait.Timeout(10*time.Second), - wait.Gap(50*time.Millisecond), - )) - - newAlloc, _, err := nomad.Allocations().Info(allocID2, nil) - must.NoError(t, err) - must.Eq(t, selectedNodeID, newAlloc.NodeID) - t.Logf("[%v] replacement alloc %q is running", time.Since(start), newAlloc.ID) - - // Test: drain node - - t.Logf("[%v] draining node %q", time.Since(start), selectedNodeID) - cleanup, err := drainNode(nomad, selectedNodeID, time.Second*20) - t.Cleanup(cleanup) - must.NoError(t, err) - - must.Wait(t, wait.InitialSuccess( - wait.ErrorFunc(func() error { - evals, _, err := nomad.Jobs().Evaluations(jobSub.JobID(), nil) - if err != nil { - return err - } - - got := map[string]string{} - - for _, eval := range evals { - got[eval.ID[:8]] = fmt.Sprintf("status=%q trigger=%q create_index=%d", - eval.Status, - eval.TriggeredBy, - eval.CreateIndex, - ) - if eval.Status == nomadapi.EvalStatusBlocked { - return nil - } - } - - return fmt.Errorf("expected blocked eval, got evals => %#v", got) - }), - wait.Timeout(10*time.Second), - wait.Gap(50*time.Millisecond), - )) - - t.Logf("[%v] undraining node %q", time.Since(start), selectedNodeID) - cleanup() - - var allocID3 string - must.Wait(t, wait.InitialSuccess( - wait.ErrorFunc(func() error { - allocs, _, err := nomad.Jobs().Allocations(jobSub.JobID(), true, nil) - must.NoError(t, err) - if len(allocs) != 3 { - return fmt.Errorf("alloc not started") - } - for _, a := range allocs { - if a.ID != allocID1 && a.ID != allocID2 { - allocID3 = a.ID - if a.ClientStatus != api.AllocClientStatusRunning { - return fmt.Errorf("replacement alloc %q not running", allocID3) - } - } - } - return nil - }), - wait.Timeout(10*time.Second), - wait.Gap(50*time.Millisecond), - )) - - newAlloc, _, err = nomad.Allocations().Info(allocID3, nil) - must.NoError(t, err) - must.Eq(t, selectedNodeID, newAlloc.NodeID) - t.Logf("[%v] replacement alloc %q is running", time.Since(start), newAlloc.ID) - -} - -func drainNode(nomad *nomadapi.Client, nodeID string, timeout time.Duration) (func(), error) { - resp, err := nomad.Nodes().UpdateDrainOpts(nodeID, &nomadapi.DrainOptions{ - DrainSpec: &nomadapi.DrainSpec{}, - MarkEligible: false, - }, nil) - if err != nil { - return func() {}, err - } - - cleanup := func() { - nomad.Nodes().UpdateDrainOpts(nodeID, &nomadapi.DrainOptions{ - MarkEligible: true}, nil) - } - - ctx, cancel := context.WithTimeout(context.TODO(), timeout) - defer cancel() - drainCh := nomad.Nodes().MonitorDrain(ctx, nodeID, resp.EvalCreateIndex, false) - - for { - select { - case <-ctx.Done(): - return cleanup, err - case msg := <-drainCh: - if msg == nil { - return cleanup, nil - } - } - } -} diff --git a/e2e/dynamic_host_volumes/input/mount-created.nomad.hcl b/e2e/dynamic_host_volumes/input/mount-created.nomad.hcl deleted file mode 100644 index edd712b0fc2..00000000000 --- a/e2e/dynamic_host_volumes/input/mount-created.nomad.hcl +++ /dev/null @@ -1,49 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -job "example" { - - type = "batch" - - group "web" { - - constraint { - attribute = "${attr.kernel.name}" - value = "linux" - } - - network { - mode = "bridge" - port "www" { - to = 8001 - } - } - - volume "data" { - type = "host" - source = "created-volume" - } - - task "http" { - - driver = "docker" - config { - image = "busybox:1" - command = "httpd" - args = ["-v", "-f", "-p", "8001", "-h", "/var/www"] - ports = ["www"] - } - - volume_mount { - volume = "data" - destination = "/var/www" - } - - resources { - cpu = 128 - memory = 128 - } - - } - } -} diff --git a/e2e/dynamic_host_volumes/input/mount-registered.nomad.hcl b/e2e/dynamic_host_volumes/input/mount-registered.nomad.hcl deleted file mode 100644 index 21c3a15c67e..00000000000 --- a/e2e/dynamic_host_volumes/input/mount-registered.nomad.hcl +++ /dev/null @@ -1,49 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -job "example" { - - type = "batch" - - group "web" { - - constraint { - attribute = "${attr.kernel.name}" - value = "linux" - } - - network { - mode = "bridge" - port "www" { - to = 8001 - } - } - - volume "data" { - type = "host" - source = "registered-volume" - } - - task "http" { - - driver = "docker" - config { - image = "busybox:1" - command = "httpd" - args = ["-v", "-f", "-p", "8001", "-h", "/var/www"] - ports = ["www"] - } - - volume_mount { - volume = "data" - destination = "/var/www" - } - - resources { - cpu = 128 - memory = 128 - } - - } - } -} diff --git a/e2e/dynamic_host_volumes/input/register-volumes.nomad.hcl b/e2e/dynamic_host_volumes/input/register-volumes.nomad.hcl deleted file mode 100644 index 1ff98654b37..00000000000 --- a/e2e/dynamic_host_volumes/input/register-volumes.nomad.hcl +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -job "register-volumes" { - - type = "batch" - - parameterized { - meta_required = ["vol_name", "vol_size", "vol_path"] - } - - group "group" { - - restart { - attempts = 0 - mode = "fail" - } - - task "task" { - - driver = "raw_exec" - - config { - command = "bash" - args = ["${NOMAD_TASK_DIR}/register.sh", "${node.unique.id}"] - } - - template { - destination = "local/register.sh" - data = < ECS ARN, so grab the updated + // allocation's task state. + arn := arnForAlloc(t, tc.Nomad().Allocations(), allocID) + + // Use ARN to lookup status of ECS task in AWS + ensureECSRunning(ctx, t, ecsClient, arn) + + t.Logf("Task %s is running!", arn) + + // Stop the job + e2eutil.WaitForJobStopped(t, tc.Nomad(), jobID) + + // Ensure it is stopped in ECS + input := ecs.DescribeTasksInput{ + Cluster: aws.String("nomad-rtd-e2e"), + Tasks: []string{arn}, + } + testutil.WaitForResult(func() (bool, error) { + resp, err := ecsClient.DescribeTasks(ctx, &input) + if err != nil { + return false, err + } + status := *resp.Tasks[0].LastStatus + return status == ecsTaskStatusStopped, fmt.Errorf("ecs task is not stopped: %s", status) + }, func(err error) { + t.Fatalf("error retrieving ecs task status: %v", err) + }) +} + +// TestECSDrain asserts an ECS job may be started, drained from one node, and +// is managed by a new node without stopping and restarting the remote task. +func (tc *RemoteTasksTest) TestECSDrain(f *framework.F) { + t := f.T() + + ctx := context.Background() + + ecsClient := ecsOrSkip(ctx, t, tc.Nomad()) + + jobID := "ecsjob-" + uuid.Generate()[0:8] + tc.jobIDs = append(tc.jobIDs, jobID) + _, allocs := registerECSJobs(t, tc.Nomad(), jobID) + require.Len(t, allocs, 1) + origNode := allocs[0].NodeID + origAlloc := allocs[0].ID + e2eutil.WaitForAllocsRunning(t, tc.Nomad(), []string{origAlloc}) + + arn := arnForAlloc(t, tc.Nomad().Allocations(), origAlloc) + ensureECSRunning(ctx, t, ecsClient, arn) + + t.Logf("Task %s is running! Now to drain the node.", arn) + + // Drain the node + _, err := tc.Nomad().Nodes().UpdateDrain( + origNode, + &api.DrainSpec{Deadline: 30 * time.Second}, + false, + nil, + ) + require.NoError(t, err, "error draining original node") + + // Wait for new alloc to be running + var newAlloc *api.AllocationListStub + qopts := &api.QueryOptions{} + testutil.WaitForResult(func() (bool, error) { + allocs, resp, err := tc.Nomad().Jobs().Allocations(jobID, false, qopts) + if err != nil { + return false, fmt.Errorf("error retrieving allocations for job: %w", err) + } + + qopts.WaitIndex = resp.LastIndex + + if len(allocs) > 2 { + return false, fmt.Errorf("expected 1 or 2 allocs but found %d", len(allocs)) + } + + for _, alloc := range allocs { + if alloc.ID == origAlloc { + // This is the old alloc, skip it + continue + } + + newAlloc = alloc + + if newAlloc.ClientStatus == "running" { + break + } + } + + if newAlloc == nil { + return false, fmt.Errorf("no new alloc found") + } + if newAlloc.ClientStatus != "running" { + return false, fmt.Errorf("expected new alloc (%s) to be running but found: %s", + newAlloc.ID, newAlloc.ClientStatus) + } + + return true, nil + }, func(err error) { + t.Fatalf("error waiting for new alloc to be running: %v", err) + }) + + // Make sure the ARN hasn't changed by looking up the new alloc's ARN + newARN := arnForAlloc(t, tc.Nomad().Allocations(), newAlloc.ID) + + assert.Equal(t, arn, newARN, "unexpected new ARN") +} + +// TestECSDeployment asserts a new ECS task is started when an ECS job is +// deployed. +func (tc *RemoteTasksTest) TestECSDeployment(f *framework.F) { + t := f.T() + + ctx := context.Background() + + ecsClient := ecsOrSkip(ctx, t, tc.Nomad()) + + jobID := "ecsjob-" + uuid.Generate()[0:8] + tc.jobIDs = append(tc.jobIDs, jobID) + job, origAllocs := registerECSJobs(t, tc.Nomad(), jobID) + require.Len(t, origAllocs, 1) + origAllocID := origAllocs[0].ID + e2eutil.WaitForAllocsRunning(t, tc.Nomad(), []string{origAllocID}) + + // We need to go from Allocation -> ECS ARN, so grab the updated + // allocation's task state. + origARN := arnForAlloc(t, tc.Nomad().Allocations(), origAllocID) + + // Use ARN to lookup status of ECS task in AWS + ensureECSRunning(ctx, t, ecsClient, origARN) + + t.Logf("Task %s is running! Updating...", origARN) + + // Force a deployment by updating meta + job.Meta = map[string]string{ + "updated": time.Now().Format(time.RFC3339Nano), + } + + // Register updated job + resp, _, err := tc.Nomad().Jobs().Register(job, nil) + require.NoError(t, err, "error registering updated job") + require.NotEmpty(t, resp.EvalID, "no eval id created when registering updated job") + + // Wait for new alloc to be running + var newAlloc *api.AllocationListStub + testutil.WaitForResult(func() (bool, error) { + allocs, _, err := tc.Nomad().Jobs().Allocations(jobID, false, nil) + if err != nil { + return false, err + } + + for _, a := range allocs { + if a.ID == origAllocID { + if a.ClientStatus == "complete" { + // Original alloc stopped as expected! + continue + } + + // Original alloc is still running + newAlloc = nil + return false, fmt.Errorf("original alloc not yet terminal. "+ + "client status: %s; desired status: %s", + a.ClientStatus, a.DesiredStatus) + } + + if a.ClientStatus != "running" { + return false, fmt.Errorf("new alloc is not running: %s", a.ClientStatus) + } + + if newAlloc != nil { + return false, fmt.Errorf("found 2 replacement allocs: %s and %s", + a.ID, newAlloc.ID) + } + + newAlloc = a + } + + return newAlloc != nil, fmt.Errorf("no new alloc found for updated job") + }, func(err error) { + require.NoError(t, err, "error waiting for updated alloc") + }) + + newARN := arnForAlloc(t, tc.Nomad().Allocations(), newAlloc.ID) + t.Logf("Task %s is updated!", newARN) + require.NotEqual(t, origARN, newARN, "expected new ARN") + + // Ensure original ARN is stopped in ECS + input := ecs.DescribeTasksInput{ + Cluster: aws.String("nomad-rtd-e2e"), + Tasks: []string{origARN}, + } + testutil.WaitForResult(func() (bool, error) { + resp, err := ecsClient.DescribeTasks(ctx, &input) + if err != nil { + return false, err + } + status := *resp.Tasks[0].LastStatus + return status == ecsTaskStatusStopped, fmt.Errorf("original ecs task is not stopped: %s", status) + }, func(err error) { + t.Fatalf("error retrieving ecs task status for original ARN: %v", err) + }) +} + +// ecsOrSkip returns an AWS ECS client or skips the test if ECS is unreachable +// by the test runner or the ECS remote task driver isn't healthy. +func ecsOrSkip(ctx context.Context, t *testing.T, nomadClient *api.Client) *ecs.Client { + cfg, err := config.LoadDefaultConfig(ctx, config.WithRegion("us-east-1")) + require.NoError(t, err) + + ecsClient := ecs.NewFromConfig(cfg) + + _, err = ecsClient.ListClusters(ctx, &ecs.ListClustersInput{}) + if err != nil { + t.Skipf("Skipping ECS Remote Task Driver Task. Error querying AWS ECS API: %v", err) + } + + testutil.WaitForResult(func() (bool, error) { + nodes, _, err := nomadClient.Nodes().List(nil) + if err != nil { + return false, fmt.Errorf("error retrieving node listing: %w", err) + } + + notReady := 0 + notEligible := 0 + noECS := 0 + notHealthy := 0 + ready := 0 + for _, n := range nodes { + if n.Status != "ready" { + notReady++ + continue + } + if n.SchedulingEligibility != "eligible" { + notEligible++ + continue + } + ecsDriver, ok := n.Drivers["ecs"] + if !ok { + noECS++ + continue + } + if !ecsDriver.Healthy { + notHealthy++ + continue + } + ready++ + } + + return ready > 1, fmt.Errorf("expected 2 nodes with healthy ecs drivers but found: "+ + "not_ready=%d ineligible=%d no_driver=%d unhealthy=%d ok=%d", + notReady, notEligible, noECS, notHealthy, ready) + }, func(err error) { + if err != nil { + t.Skipf("Skipping Remote Task Driver tests due to: %v", err) + } + }) + + return ecsClient +} + +// arnForAlloc retrieves the ARN for a running allocation. +func arnForAlloc(t *testing.T, allocAPI *api.Allocations, allocID string) string { + t.Logf("Retrieving ARN for alloc=%s", allocID) + ecsState := struct { + ARN string + }{} + testutil.WaitForResult(func() (bool, error) { + alloc, _, err := allocAPI.Info(allocID, nil) + if err != nil { + return false, err + } + state := alloc.TaskStates["http-server"] + if state == nil { + return false, fmt.Errorf("no task state for http-server (%d task states)", len(alloc.TaskStates)) + } + if state.TaskHandle == nil { + return false, fmt.Errorf("no task handle for http-server") + } + if len(state.TaskHandle.DriverState) == 0 { + return false, fmt.Errorf("no driver state for task handle") + } + if err := base.MsgPackDecode(state.TaskHandle.DriverState, &ecsState); err != nil { + return false, fmt.Errorf("error decoding driver state: %w", err) + } + if ecsState.ARN == "" { + return false, fmt.Errorf("ARN is empty despite DriverState being %d bytes", len(state.TaskHandle.DriverState)) + } + return true, nil + }, func(err error) { + t.Fatalf("error getting ARN: %v", err) + }) + t.Logf("Retrieved ARN=%s for alloc=%s", ecsState.ARN, allocID) + + return ecsState.ARN +} + +// ensureECSRunning asserts that the given ARN is a running ECS task. +func ensureECSRunning(ctx context.Context, t *testing.T, ecsClient *ecs.Client, arn string) { + t.Logf("Ensuring ARN=%s is running", arn) + input := ecs.DescribeTasksInput{ + Cluster: aws.String("nomad-rtd-e2e"), + Tasks: []string{arn}, + } + testutil.WaitForResult(func() (bool, error) { + resp, err := ecsClient.DescribeTasks(ctx, &input) + if err != nil { + return false, err + } + status := *resp.Tasks[0].LastStatus + return status == ecsTaskStatusRunning, fmt.Errorf("ecs task is not running: %s", status) + }, func(err error) { + t.Fatalf("error retrieving ecs task status: %v", err) + }) + t.Logf("ARN=%s is running", arn) +} + +// registerECSJobs registers an ECS job and returns it and its allocation +// stubs. +func registerECSJobs(t *testing.T, nomadClient *api.Client, jobID string) (*api.Job, []*api.AllocationListStub) { + const ( + jobPath = "remotetasks/input/ecs.nomad" + varPath = "remotetasks/input/ecs.vars" + ) + + jobBytes, err := os.ReadFile(jobPath) + require.NoError(t, err, "error reading job file") + + job, err := jobspec2.ParseWithConfig(&jobspec2.ParseConfig{ + Path: jobPath, + Body: jobBytes, + VarFiles: []string{varPath}, + Strict: true, + }) + require.NoErrorf(t, err, "error parsing jobspec from %s with var file %s", jobPath, varPath) + + job.ID = &jobID + job.Name = &jobID + + // Register job + resp, _, err := nomadClient.Jobs().Register(job, nil) + require.NoError(t, err, "error registering job") + require.NotEmpty(t, resp.EvalID, "no eval id created when registering job") + + var allocs []*api.AllocationListStub + testutil.WaitForResult(func() (bool, error) { + allocs, _, err = nomadClient.Jobs().Allocations(jobID, false, nil) + if err != nil { + return false, err + } + return len(allocs) > 0, fmt.Errorf("no allocs found") + }, func(err error) { + require.NoErrorf(t, err, "error retrieving allocations for %s", jobID) + }) + return job, allocs +} diff --git a/e2e/terraform/.gitignore b/e2e/terraform/.gitignore index 17718060e43..027cd2507e6 100644 --- a/e2e/terraform/.gitignore +++ b/e2e/terraform/.gitignore @@ -1,2 +1,3 @@ +*.zip +uploads/ custom.tfvars -remotetasks/ diff --git a/e2e/terraform/.terraform.lock.hcl b/e2e/terraform/.terraform.lock.hcl new file mode 100644 index 00000000000..5cef68f560c --- /dev/null +++ b/e2e/terraform/.terraform.lock.hcl @@ -0,0 +1,177 @@ +# This file is maintained automatically by "terraform init". +# Manual edits may be lost in future updates. + +provider "registry.terraform.io/hashicorp/aws" { + version = "4.10.0" + hashes = [ + "h1:3zeyl8QwNYPXRD4b++0Vo9nBcsL3FXT+DT3x/KJNKB0=", + "h1:F9BjbxBhuo1A/rP318IUrkW3TAh29i6UC18qwhzCs6c=", + "h1:S6xGPRL08YEuBdemiYZyIBf/YwM4OCvzVuaiuU6kLjc=", + "h1:pjPLizna1qa/CZh7HvLuQ73YmqaunLXatyOqzF2ePEI=", + "zh:0a2a7eabfeb7dbb17b7f82aff3fa2ba51e836c15e5be4f5468ea44bd1299b48d", + "zh:23409c7205d13d2d68b5528e1c49e0a0455d99bbfec61eb0201142beffaa81f7", + "zh:3adad2245d97816f3919778b52c58fb2de130938a3e9081358bfbb72ec478d9a", + "zh:5bf100aba6332f24b1ffeae7536d5d489bb907bf774a06b95f2183089eaf1a1a", + "zh:63c3a24c0c229a1d3390e6ea2454ba4d8ace9b94e086bee1dbdcf665ae969e15", + "zh:6b76f5ffd920f0a750da3a4ff1d00eab18d9cd3731b009aae3df4135613bad4d", + "zh:8cd6b1e6b51e8e9bbe2944bb169f113d20d1d72d07ccd1b7b83f40b3c958233e", + "zh:9b12af85486a96aedd8d7984b0ff811a4b42e3d88dad1a3fb4c0b580d04fa425", + "zh:c5c31f58fb5bd6aebc6c662a4693640ec763cb3399cce0b592101cf24ece1625", + "zh:cc485410be43d6ad95d81b9e54cc4d2117aadf9bf5941165a9df26565d9cce42", + "zh:cebb89c74b6a3dc6780824b1d1e2a8d16a51e75679e14ad0b830d9f7da1a3a67", + "zh:e7dc427189cb491e1f96e295101964415cbf8630395ee51e396d2a811f365237", + ] +} + +provider "registry.terraform.io/hashicorp/external" { + version = "2.2.2" + hashes = [ + "h1:/Qsdu8SIXbfANKJFs1UTAfvcomJUalOd3uDZvj3jixA=", + "h1:BKQ5f5ijzeyBSnUr+j0wUi+bYv6KBQVQNDXNRVEcfJE=", + "h1:VUkgcWvCliS0HO4kt7oEQhFD2gcx/59XpwMqxfCU1kE=", + "h1:e7RpnZ2PbJEEPnfsg7V0FNwbfSk0/Z3FdrLsXINBmDY=", + "zh:0b84ab0af2e28606e9c0c1289343949339221c3ab126616b831ddb5aaef5f5ca", + "zh:10cf5c9b9524ca2e4302bf02368dc6aac29fb50aeaa6f7758cce9aa36ae87a28", + "zh:56a016ee871c8501acb3f2ee3b51592ad7c3871a1757b098838349b17762ba6b", + "zh:719d6ef39c50e4cffc67aa67d74d195adaf42afcf62beab132dafdb500347d39", + "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", + "zh:7fbfc4d37435ac2f717b0316f872f558f608596b389b895fcb549f118462d327", + "zh:8ac71408204db606ce63fe8f9aeaf1ddc7751d57d586ec421e62d440c402e955", + "zh:a4cacdb06f114454b6ed0033add28006afa3f65a0ea7a43befe45fc82e6809fb", + "zh:bb5ce3132b52ae32b6cc005bc9f7627b95259b9ffe556de4dad60d47d47f21f0", + "zh:bb60d2976f125ffd232a7ccb4b3f81e7109578b23c9c6179f13a11d125dca82a", + "zh:f9540ecd2e056d6e71b9ea5f5a5cf8f63dd5c25394b9db831083a9d4ea99b372", + "zh:ffd998b55b8a64d4335a090b6956b4bf8855b290f7554dd38db3302de9c41809", + ] +} + +provider "registry.terraform.io/hashicorp/hcp" { + version = "0.26.0" + hashes = [ + "h1:B5O/NawTnKPdUgUlGP/mM2ybv0RcLvVJVOcrivDdFnI=", + "h1:C0KoYT09Ff91pE5KzrFrISCE5wQyJaJnxPdA0SXDOzI=", + "h1:f4IwCK9heo5F+k+nRFY/fzG18DesbBcqRL8F4WsKh7Q=", + "h1:fCHcXVlT/MoAqvIUjFyJqtGrz+ebHNCcR1YM2ZSRPxE=", + "zh:0fa82a384b25a58b65523e0ea4768fa1212b1f5cfc0c9379d31162454fedcc9d", + "zh:6fa5415dbac9c8d20026772dd5aee7dd3ac541e9d86827d0b70bc752472ec76c", + "zh:7490212c32339153165aec1dcef063804aac0d3f1cfbdfd3d04d7a60c29b0f40", + "zh:792e8fbe630159105801a471c46c988d94636637c1e5cdb725956cab4e664c87", + "zh:9e460a3e4735ff24f2fc1c445fce54e4ed596c8dc97f683f5cefa93fb2be9b14", + "zh:a124e8366fdf10d17a0b2860151beb00e12d8c33860fcc661547d0239138d3fb", + "zh:a9b9cb4d077f8d8bcc22c813aea820c224228807f34e2e3716d30c84ce63c53a", + "zh:aae6a8e87c6c64bb33311ef658993a5cc8398aac8dcb2c18953bd9e96a2e0011", + "zh:dc2e83b8f4ca2d4aa2e0b5cc98b9c298c1cf5c583d323320c85d4f06f8f4b43c", + "zh:e17b1c7ef80c3507c892d343282c61dc58ab45978481ee004843f1746f6b791c", + "zh:ee35efe2628aca5f259f3fee8db15accfdced1a5530f01c8a23f59e5ed5dcb7a", + "zh:f8173393330eb376b7357f8271d1c75e0850905dceb32ce482af58e112894278", + ] +} + +provider "registry.terraform.io/hashicorp/local" { + version = "2.2.2" + hashes = [ + "h1:5UYW2wJ320IggrzLt8tLD6MowePqycWtH1b2RInHZkE=", + "h1:BVEZnjtpWxKPG9OOQh4dFa1z5pwMO/uuzYtu6AR2LyM=", + "h1:S6nf97sybBugc8FtrOSPXaynEKx0gO6Oktu6KJzvdDU=", + "h1:SjDyZXIUHEQzZe10VjhlhZq2a9kgQB6tmqJcpq2BeWg=", + "zh:027e4873c69da214e2fed131666d5de92089732a11d096b68257da54d30b6f9d", + "zh:0ba2216e16cfb72538d76a4c4945b4567a76f7edbfef926b1c5a08d7bba2a043", + "zh:1fee8f6aae1833c27caa96e156cf99a681b6f085e476d7e1b77d285e21d182c1", + "zh:2e8a3e72e877003df1c390a231e0d8e827eba9f788606e643f8e061218750360", + "zh:719008f9e262aa1523a6f9132adbe9eee93c648c2981f8359ce41a40e6425433", + "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", + "zh:9a70fdbe6ef955c4919a4519caca116f34c19c7ddedd77990fbe4f80fe66dc84", + "zh:abc412423d670cbb6264827fa80e1ffdc4a74aff3f19ba6a239dd87b85b15bec", + "zh:ae953a62c94d2a2a0822e5717fafc54e454af57bd6ed02cd301b9786765c1dd3", + "zh:be0910bdf46698560f9e86f51a4ff795c62c02f8dc82b2b1dab77a0b3a93f61e", + "zh:e58f9083b7971919b95f553227adaa7abe864fce976f0166cf4d65fc17257ff2", + "zh:ff4f77cbdbb22cc98182821c7ef84dce16298ab0e997d5c7fae97247f7a4bcb0", + ] +} + +provider "registry.terraform.io/hashicorp/null" { + version = "3.1.1" + hashes = [ + "h1:71sNUDvmiJcijsvfXpiLCz0lXIBSsEJjMxljt7hxMhw=", + "h1:Pctug/s/2Hg5FJqjYcTM0kPyx3AoYK1MpRWO0T9V2ns=", + "h1:YvH6gTaQzGdNv+SKTZujU1O0bO+Pw6vJHOPhqgN8XNs=", + "h1:ZD4wyZ0KJzt5s2mD0xD7paJlVONNicLvZKdgtezz02I=", + "zh:063466f41f1d9fd0dd93722840c1314f046d8760b1812fa67c34de0afcba5597", + "zh:08c058e367de6debdad35fc24d97131c7cf75103baec8279aba3506a08b53faf", + "zh:73ce6dff935150d6ddc6ac4a10071e02647d10175c173cfe5dca81f3d13d8afe", + "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", + "zh:8fdd792a626413502e68c195f2097352bdc6a0df694f7df350ed784741eb587e", + "zh:976bbaf268cb497400fd5b3c774d218f3933271864345f18deebe4dcbfcd6afa", + "zh:b21b78ca581f98f4cdb7a366b03ae9db23a73dfa7df12c533d7c19b68e9e72e5", + "zh:b7fc0c1615dbdb1d6fd4abb9c7dc7da286631f7ca2299fb9cd4664258ccfbff4", + "zh:d1efc942b2c44345e0c29bc976594cb7278c38cfb8897b344669eafbc3cddf46", + "zh:e356c245b3cd9d4789bab010893566acace682d7db877e52d40fc4ca34a50924", + "zh:ea98802ba92fcfa8cf12cbce2e9e7ebe999afbf8ed47fa45fc847a098d89468b", + "zh:eff8872458806499889f6927b5d954560f3d74bf20b6043409edf94d26cd906f", + ] +} + +provider "registry.terraform.io/hashicorp/random" { + version = "3.1.2" + hashes = [ + "h1:5A5VsY5wNmOZlupUcLnIoziMPn8htSZBXbP3lI7lBEM=", + "h1:9A6Ghjgad0KjJRxa6nPo8i8uFvwj3Vv0wnEgy49u+24=", + "h1:JF+aiOtS0G0ffbBdk1qfj7IrT39y/GZh/yl2IhqcIVM=", + "h1:hxN/z2AVJkF2ei7bfevJdD1B0WfyABxxk9j1zzLsLRk=", + "zh:0daceba867b330d3f8e2c5dc895c4291845a78f31955ce1b91ab2c4d1cd1c10b", + "zh:104050099efd30a630741f788f9576b19998e7a09347decbec3da0b21d64ba2d", + "zh:173f4ef3fdf0c7e2564a3db0fac560e9f5afdf6afd0b75d6646af6576b122b16", + "zh:41d50f975e535f968b3f37170fb07937c15b76d85ba947d0ce5e5ff9530eda65", + "zh:51a5038867e5e60757ed7f513dd6a973068241190d158a81d1b69296efb9cb8d", + "zh:6432a568e97a5a36cc8aebca5a7e9c879a55d3bc71d0da1ab849ad905f41c0be", + "zh:6bac6501394b87138a5e17c9f3a41e46ff7833ad0ba2a96197bb7787e95b641c", + "zh:6c0a7f5faacda644b022e7718e53f5868187435be6d000786d1ca05aa6683a25", + "zh:74c89de3fa6ef3027efe08f8473c2baeb41b4c6cee250ba7aeb5b64e8c79800d", + "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", + "zh:b29eabbf0a5298f0e95a1df214c7cfe06ea9bcf362c63b3ad2f72d85da7d4685", + "zh:e891458c7a61e5b964e09616f1a4f87d0471feae1ec04cc51776e7dec1a3abce", + ] +} + +provider "registry.terraform.io/hashicorp/tls" { + version = "3.3.0" + hashes = [ + "h1:A4xOtHhD4jCmn4nO1xCTk2Nl5IP5JpjicjF+Fuu2ZFQ=", + "h1:Uf8HqbZjYn8pKB0og2H9A8IXIKtHT+o8BE3+fjtO1ZQ=", + "h1:oitTcxYGyDvHuNsjPJUi00a+AT0k+TWgNsGUSM2CV/E=", + "h1:xx/b39Q9FVZSlDc97rlDmQ9dNaaxFFyVzP9kV+47z28=", + "zh:16140e8cc880f95b642b6bf6564f4e98760e9991864aacc8e21273423571e561", + "zh:16338b8457759c97fdd73153965d6063b037f2954fd512e569fcdc42b7fef743", + "zh:348bd44b7cd0c6d663bba36cecb474c17635a8f22b02187d034b8e57a8729c5a", + "zh:3832ac73c2335c0fac26138bacbd18160efaa3f06c562869acc129e814e27f86", + "zh:756d1e60690d0164eee9c93b498b4c8beabbfc1d8b7346cb6d2fa719055089d6", + "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", + "zh:93b911bcddba8dadc5339edb004c8019c230ea67477c73c4f741c236dd9511b1", + "zh:c0c4e5742e8ac004c507540423db52af3f44b8ec04443aa8e14669340819344f", + "zh:c78296a1dff8ccd5d50203aac353422fc18d425072ba947c88cf5b46de7d32d2", + "zh:d7143f444e0f7e6cd67fcaf080398b4f1487cf05de3e0e79af6c14e22812e38b", + "zh:e600ac76b118816ad72132eee4c22ab5fc044f67c3babc54537e1fc1ad53d295", + "zh:fca07af5f591e12d2dc178a550da69a4847bdb34f8180a5b8e04fde6b528cf99", + ] +} + +provider "registry.terraform.io/hashicorp/vault" { + version = "3.4.1" + hashes = [ + "h1:HIjd/7KktGO5E/a0uICbIanUj0Jdd0j8aL/r+QxFhAs=", + "h1:X8P4B/zB97Dtj21qp0Rrswlz92WYCA5C59jpYGZeQuc=", + "h1:dXJBo807u69+Uib2hjoBQ68G2+nGXcNZeq/THVyQQVc=", + "h1:oow6cAwKiFpJBBWKsDqNmwZIrFTWWvoeIbqs+vyUDE0=", + "zh:1eb8370a1846e34e2bcc4d11eece5733735784a8eab447bbed3cfd822101b577", + "zh:2df3989327cea68b2167514b7ebddc67b09340f00bbf3fa85df03c97adfb9d25", + "zh:3dd1e317264f574985e856296deef71a76464918bf0566eb0d7f6389ea0586bd", + "zh:9750861f2822482aa608ea5a52b385bc42b2e1f2511094e6a975412618c4495d", + "zh:9b940e7f78975d29a4d0a116cf43c0bc1cb03bec4ad8d34887d64e6e60bacb9e", + "zh:9cb6e7ad2a62529d35dacd20695d49c2f02230cb785d46178cc10f4ec80e5a51", + "zh:a12718689bbcb37bcbb9132c18bffd354fad8ab5c8cb89cec1a0ee85c65b8cb7", + "zh:a6e38afacca1af4fab04a9f2dc49b8295eb462db68bdc7451352d0f950f804f8", + "zh:d6e0e994d51b9e07d5713d4796381f9e129e9de962e79caae2b7055f6f68297e", + "zh:ea4bbef7a1bb2553db473fa304c93845674167b61e8c9677107a96c8c696da12", + "zh:f985a8b7f4ef7d1eba9cef7d99997ee9c4a54ffe76dab7fa8b1fdec2a9edca7e", + ] +} diff --git a/e2e/terraform/Makefile b/e2e/terraform/Makefile index fa77c60f815..10b4eda7884 100644 --- a/e2e/terraform/Makefile +++ b/e2e/terraform/Makefile @@ -7,8 +7,8 @@ CONSUL_LICENSE_PATH ?= custom.tfvars: echo 'nomad_local_binary = "$(PKG_PATH)"' > custom.tfvars echo 'volumes = false' >> custom.tfvars - echo 'client_count_linux = 3' >> custom.tfvars - echo 'client_count_windows_2016 = 0' >> custom.tfvars + echo 'client_count_ubuntu_jammy_amd64 = 3' >> custom.tfvars + echo 'client_count_windows_2016_amd64 = 0' >> custom.tfvars echo 'consul_license = "$(shell cat $(CONSUL_LICENSE_PATH))"' >> custom.tfvars echo 'nomad_license = "$(shell cat $(NOMAD_LICENSE_PATH))"' >> custom.tfvars @@ -45,10 +45,10 @@ destroy_full: # don't run this by default in plan/apply because it prevents you from # updating a running cluster tidy: - rm -rf provision-infra/keys - mkdir -p provision-infra/keys - chmod 0700 provision-infra/keys - rm -rf provision-infra/uploads/* + rm -rf keys + mkdir keys + chmod 0700 keys + rm -rf uploads/* git checkout uploads/README.md rm -f terraform.tfstate.*.backup rm custom.tfvars diff --git a/e2e/terraform/README.md b/e2e/terraform/README.md index de223c28a76..b7d77a22bde 100644 --- a/e2e/terraform/README.md +++ b/e2e/terraform/README.md @@ -42,7 +42,6 @@ cd ./hcp-vault-auth terraform init terraform apply --auto-approve $(terraform output --raw environment) -cd ../ ``` Optionally, edit the `terraform.tfvars` file to change the number of @@ -52,33 +51,22 @@ Linux clients or Windows clients. region = "us-east-1" instance_type = "t2.medium" server_count = "3" -client_count_linux = "4" -client_count_windows_2016 = "1" +client_count_ubuntu_jammy_amd64 = "4" +client_count_windows_2016_amd64 = "1" ``` -You will also need a Consul Enterprise license file and a Nomad Enterprise license file. +You will also need a Consul Enterprise license file. Optionally, edit the `nomad_local_binary` variable in the `terraform.tfvars` file to change the path to the local binary of -Nomad you'd like to upload, but keep in mind it has to match the OS and the CPU architecture of the nodes (amd64 linux). - -NOTE: If you want to have a cluster with mixed CPU architectures, you need to specify the count and also provide the -corresponding binary using `var.nomad_local_binary_client_ubuntu_jammy` and or `var.nomad_local_binary_client_windows_2016`. +Nomad you'd like to upload. Run Terraform apply to deploy the infrastructure: ```sh cd e2e/terraform/ terraform init -terraform apply -var="consul_license=$(cat full_path_to_consul.hclic)" -var="nomad_license=$(cat full_path_to_nomad.hclic)" -``` - -Alternative you can also run `make apply_full` from the terraform directory: - -``` -export NOMAD_LICENSE_PATH=./nomad.hclic -export CONSUL_LICENSE_PATH=./consul.hclic -make apply_full +terraform apply ``` > Note: You will likely see "Connection refused" or "Permission denied" errors @@ -138,21 +126,20 @@ about the cluster: client node IPs. - `terraform output windows_clients` will output the list of Windows client node IPs. -- `cluster_unique_identifier` will output the random name used to identify the cluster's resources ## SSH You can use Terraform outputs above to access nodes via ssh: ```sh -ssh -i keys/${CLUSTER_UNIQUE_IDENTIFIER}/nomad-e2e-*.pem ubuntu@${EC2_IP_ADDR} +ssh -i keys/nomad-e2e-*.pem ubuntu@${EC2_IP_ADDR} ``` The Windows client runs OpenSSH for convenience, but has a different user and will drop you into a Powershell shell instead of bash: ```sh -ssh -i keys/${CLUSTER_UNIQUE_IDENTIFIER}/nomad-e2e-*.pem Administrator@${EC2_IP_ADDR} +ssh -i keys/nomad-e2e-*.pem Administrator@${EC2_IP_ADDR} ``` ## Teardown diff --git a/e2e/terraform/provision-infra/compute.tf b/e2e/terraform/compute.tf similarity index 60% rename from e2e/terraform/provision-infra/compute.tf rename to e2e/terraform/compute.tf index b6bd45adc1a..af4254825a9 100644 --- a/e2e/terraform/provision-infra/compute.tf +++ b/e2e/terraform/compute.tf @@ -2,13 +2,11 @@ # SPDX-License-Identifier: BUSL-1.1 locals { - ami_prefix = "nomad-e2e-v3" - ubuntu_image_name = "ubuntu-jammy-${var.instance_arch}" - windows_image_name = "windows-2016-${var.instance_arch}" + ami_prefix = "nomad-e2e-v3" } resource "aws_instance" "server" { - ami = data.aws_ami.ubuntu_jammy.image_id + ami = data.aws_ami.ubuntu_jammy_amd64.image_id instance_type = var.instance_type key_name = module.keys.key_name vpc_security_group_ids = [aws_security_group.servers.id] # see also the secondary ENI @@ -24,48 +22,44 @@ resource "aws_instance" "server" { } } -resource "aws_instance" "client_ubuntu_jammy" { - ami = data.aws_ami.ubuntu_jammy.image_id +resource "aws_instance" "client_ubuntu_jammy_amd64" { + ami = data.aws_ami.ubuntu_jammy_amd64.image_id instance_type = var.instance_type key_name = module.keys.key_name vpc_security_group_ids = [aws_security_group.clients.id] # see also the secondary ENI - count = var.client_count_linux + count = var.client_count_ubuntu_jammy_amd64 iam_instance_profile = data.aws_iam_instance_profile.nomad_e2e_cluster.name availability_zone = var.availability_zone # Instance tags tags = { - Name = "${local.random_name}-client-ubuntu-jammy-${count.index}" + Name = "${local.random_name}-client-ubuntu-jammy-amd64-${count.index}" ConsulAutoJoin = "auto-join-${local.random_name}" User = data.aws_caller_identity.current.arn - OS = "linux" } } - - -resource "aws_instance" "client_windows_2016" { - ami = data.aws_ami.windows_2016[0].image_id +resource "aws_instance" "client_windows_2016_amd64" { + ami = data.aws_ami.windows_2016_amd64[0].image_id instance_type = var.instance_type key_name = module.keys.key_name vpc_security_group_ids = [aws_security_group.clients.id] - count = var.client_count_windows_2016 + count = var.client_count_windows_2016_amd64 iam_instance_profile = data.aws_iam_instance_profile.nomad_e2e_cluster.name availability_zone = var.availability_zone - user_data = file("${path.module}/userdata/windows-2016.ps1") + user_data = file("${path.root}/userdata/windows-2016.ps1") # Instance tags tags = { Name = "${local.random_name}-client-windows-2016-${count.index}" ConsulAutoJoin = "auto-join-${local.random_name}" User = data.aws_caller_identity.current.arn - OS = "windows" } } resource "aws_instance" "consul_server" { - ami = data.aws_ami.ubuntu_jammy_consul_server.image_id + ami = data.aws_ami.ubuntu_jammy_amd64.image_id instance_type = var.instance_type key_name = module.keys.key_name vpc_security_group_ids = [aws_security_group.consul_server.id] @@ -81,24 +75,16 @@ resource "aws_instance" "consul_server" { } -# We build the AMI only as needed. The AMI is tagged with the SHA of the commit -# that forced the build, which may not be the commit that's spawning this test -# run. data "external" "packer_sha" { program = ["/bin/sh", "-c", < 0 ? 1 : 0 +data "aws_ami" "windows_2016_amd64" { + count = var.client_count_windows_2016_amd64 > 0 ? 1 : 0 most_recent = true owners = ["self"] filter { name = "name" - values = ["${local.ami_prefix}-${local.windows_image_name}-*"] + values = ["${local.ami_prefix}-windows-2016-amd64-*"] } filter { diff --git a/e2e/terraform/provision-infra/consul-clients.tf b/e2e/terraform/consul-clients.tf similarity index 76% rename from e2e/terraform/provision-infra/consul-clients.tf rename to e2e/terraform/consul-clients.tf index 9556ef6daf3..33a59e8cb42 100644 --- a/e2e/terraform/provision-infra/consul-clients.tf +++ b/e2e/terraform/consul-clients.tf @@ -35,12 +35,12 @@ resource "tls_locally_signed_cert" "consul_agents" { resource "local_sensitive_file" "consul_agents_key" { content = tls_private_key.consul_agents.private_key_pem - filename = "${local.uploads_dir}/shared/consul.d/agent_cert.key.pem" + filename = "uploads/shared/consul.d/agent_cert.key.pem" } resource "local_sensitive_file" "consul_agents_cert" { content = tls_locally_signed_cert.consul_agents.cert_pem - filename = "${local.uploads_dir}/shared/consul.d/agent_cert.pem" + filename = "uploads/shared/consul.d/agent_cert.pem" } # Consul tokens for the Consul agents @@ -48,11 +48,11 @@ resource "local_sensitive_file" "consul_agents_cert" { resource "random_uuid" "consul_agent_token" {} resource "local_sensitive_file" "consul_agent_config_file" { - content = templatefile("${path.module}/provision-nomad/etc/consul.d/clients.hcl", { + content = templatefile("etc/consul.d/clients.hcl", { token = "${random_uuid.consul_agent_token.result}" autojoin_value = "auto-join-${local.random_name}" }) - filename = "${local.uploads_dir}/shared/consul.d/clients.hcl" + filename = "uploads/shared/consul.d/clients.hcl" file_permission = "0600" } @@ -61,21 +61,21 @@ resource "local_sensitive_file" "consul_agent_config_file" { resource "random_uuid" "consul_token_for_nomad" {} resource "local_sensitive_file" "nomad_client_config_for_consul" { - content = templatefile("${path.module}/provision-nomad/etc/nomad.d/client-consul.hcl", { + content = templatefile("etc/nomad.d/client-consul.hcl", { token = "${random_uuid.consul_token_for_nomad.result}" client_service_name = "client-${local.random_name}" server_service_name = "server-${local.random_name}" }) - filename = "${local.uploads_dir}/shared/nomad.d/client-consul.hcl" + filename = "uploads/shared/nomad.d/client-consul.hcl" file_permission = "0600" } resource "local_sensitive_file" "nomad_server_config_for_consul" { - content = templatefile("${path.module}/provision-nomad/etc/nomad.d/server-consul.hcl", { + content = templatefile("etc/nomad.d/server-consul.hcl", { token = "${random_uuid.consul_token_for_nomad.result}" client_service_name = "client-${local.random_name}" server_service_name = "server-${local.random_name}" }) - filename = "${local.uploads_dir}/shared/nomad.d/server-consul.hcl" + filename = "uploads/shared/nomad.d/server-consul.hcl" file_permission = "0600" } diff --git a/e2e/terraform/provision-infra/consul-servers.tf b/e2e/terraform/consul-servers.tf similarity index 80% rename from e2e/terraform/provision-infra/consul-servers.tf rename to e2e/terraform/consul-servers.tf index 35d96534d80..eaffbc65697 100644 --- a/e2e/terraform/provision-infra/consul-servers.tf +++ b/e2e/terraform/consul-servers.tf @@ -10,18 +10,18 @@ resource "random_uuid" "consul_initial_management_token" {} resource "local_sensitive_file" "consul_initial_management_token" { content = random_uuid.consul_initial_management_token.result - filename = "${local.keys_dir}/consul_initial_management_token" + filename = "keys/consul_initial_management_token" file_permission = "0600" } resource "local_sensitive_file" "consul_server_config_file" { - content = templatefile("${path.module}/provision-nomad/etc/consul.d/servers.hcl", { + content = templatefile("etc/consul.d/servers.hcl", { management_token = "${random_uuid.consul_initial_management_token.result}" token = "${random_uuid.consul_agent_token.result}" nomad_token = "${random_uuid.consul_token_for_nomad.result}" autojoin_value = "auto-join-${local.random_name}" }) - filename = "${local.uploads_dir}/shared/consul.d/servers.hcl" + filename = "uploads/shared/consul.d/servers.hcl" file_permission = "0600" } @@ -59,20 +59,20 @@ resource "tls_locally_signed_cert" "consul_server" { resource "local_sensitive_file" "consul_server_key" { content = tls_private_key.consul_server.private_key_pem - filename = "${local.uploads_dir}/shared/consul.d/server_cert.key.pem" + filename = "uploads/shared/consul.d/server_cert.key.pem" } resource "local_sensitive_file" "consul_server_cert" { content = tls_locally_signed_cert.consul_server.cert_pem - filename = "${local.uploads_dir}/shared/consul.d/server_cert.pem" + filename = "uploads/shared/consul.d/server_cert.pem" } # if consul_license is unset, it'll be a harmless empty license file resource "local_sensitive_file" "consul_environment" { - content = templatefile("${path.module}/provision-nomad/etc/consul.d/.environment", { + content = templatefile("etc/consul.d/.environment", { license = var.consul_license }) - filename = "${local.uploads_dir}/shared/consul.d/.environment" + filename = "uploads/shared/consul.d/.environment" file_permission = "0600" } @@ -91,33 +91,33 @@ resource "null_resource" "upload_consul_server_configs" { user = "ubuntu" host = aws_instance.consul_server.public_ip port = 22 - private_key = file("${local.keys_dir}/${local.random_name}.pem") + private_key = file("${path.root}/keys/${local.random_name}.pem") target_platform = "unix" timeout = "15m" } provisioner "file" { - source = "${local.keys_dir}/tls_ca.crt" + source = "keys/tls_ca.crt" destination = "/tmp/consul_ca.pem" } provisioner "file" { - source = "${local.uploads_dir}/shared/consul.d/.environment" + source = "uploads/shared/consul.d/.environment" destination = "/tmp/.consul_environment" } provisioner "file" { - source = "${local.uploads_dir}/shared/consul.d/server_cert.pem" + source = "uploads/shared/consul.d/server_cert.pem" destination = "/tmp/consul_cert.pem" } provisioner "file" { - source = "${local.uploads_dir}/shared/consul.d/server_cert.key.pem" + source = "uploads/shared/consul.d/server_cert.key.pem" destination = "/tmp/consul_cert.key.pem" } provisioner "file" { - source = "${local.uploads_dir}/shared/consul.d/servers.hcl" + source = "uploads/shared/consul.d/servers.hcl" destination = "/tmp/consul_server.hcl" } provisioner "file" { - source = "${path.module}/provision-nomad/etc/consul.d/consul-server.service" + source = "etc/consul.d/consul-server.service" destination = "/tmp/consul.service" } } @@ -133,7 +133,7 @@ resource "null_resource" "install_consul_server_configs" { user = "ubuntu" host = aws_instance.consul_server.public_ip port = 22 - private_key = file("${local.keys_dir}/${local.random_name}.pem") + private_key = file("${path.root}/keys/${local.random_name}.pem") target_platform = "unix" timeout = "15m" } @@ -166,10 +166,10 @@ resource "null_resource" "bootstrap_consul_acls" { depends_on = [null_resource.install_consul_server_configs] provisioner "local-exec" { - command = "${path.module}/scripts/bootstrap-consul.sh" + command = "./scripts/bootstrap-consul.sh" environment = { CONSUL_HTTP_ADDR = "https://${aws_instance.consul_server.public_ip}:8501" - CONSUL_CACERT = "${local.keys_dir}/tls_ca.crt" + CONSUL_CACERT = "keys/tls_ca.crt" CONSUL_HTTP_TOKEN = "${random_uuid.consul_initial_management_token.result}" CONSUL_AGENT_TOKEN = "${random_uuid.consul_agent_token.result}" NOMAD_CLUSTER_CONSUL_TOKEN = "${random_uuid.consul_token_for_nomad.result}" diff --git a/e2e/terraform/ecs-task.json b/e2e/terraform/ecs-task.json new file mode 100644 index 00000000000..cd0f0db9f3e --- /dev/null +++ b/e2e/terraform/ecs-task.json @@ -0,0 +1,21 @@ +[ + { + "command": [ + "/bin/sh -c \"echo ' Amazon ECS Sample App

Amazon ECS Sample App

Congratulations!

Your application is now running on a container in Amazon ECS.

' > /usr/local/apache2/htdocs/index.html && httpd-foreground\"" + ], + "entryPoint": [ + "sh", + "-c" + ], + "essential": true, + "image": "httpd:2.4", + "name": "nomad-remote-driver-demo", + "portMappings": [ + { + "containerPort": 80, + "hostPort": 80, + "protocol": "tcp" + } + ] + } +] diff --git a/e2e/terraform/ecs.tf b/e2e/terraform/ecs.tf new file mode 100644 index 00000000000..9c1c27e72ca --- /dev/null +++ b/e2e/terraform/ecs.tf @@ -0,0 +1,28 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +# Nomad ECS Remote Task Driver E2E +resource "aws_ecs_cluster" "nomad_rtd_e2e" { + name = "nomad-rtd-e2e" +} + +resource "aws_ecs_task_definition" "nomad_rtd_e2e" { + family = "nomad-rtd-e2e" + container_definitions = file("ecs-task.json") + + # Don't need a network for e2e tests + network_mode = "awsvpc" + + requires_compatibilities = ["FARGATE"] + cpu = 256 + memory = 512 +} + +resource "local_file" "ecs_vars_hcl" { + content = templatefile("${path.module}/ecs.tftpl", { + sg_id = aws_security_group.clients.id, + subnet_id = data.aws_subnet.default.id, + }) + filename = "${path.module}/../remotetasks/input/ecs.vars" + file_permission = "0664" +} diff --git a/e2e/terraform/ecs.tftpl b/e2e/terraform/ecs.tftpl new file mode 100644 index 00000000000..bb5a10430ba --- /dev/null +++ b/e2e/terraform/ecs.tftpl @@ -0,0 +1,2 @@ +security_groups = ["${sg_id}"] +subnets = ["${subnet_id}"] diff --git a/e2e/terraform/provision-infra/provision-nomad/etc/acls/consul/consul-agent-policy.hcl b/e2e/terraform/etc/acls/consul/consul-agent-policy.hcl similarity index 100% rename from e2e/terraform/provision-infra/provision-nomad/etc/acls/consul/consul-agent-policy.hcl rename to e2e/terraform/etc/acls/consul/consul-agent-policy.hcl diff --git a/e2e/terraform/provision-infra/provision-nomad/etc/acls/consul/nomad-client-policy.hcl b/e2e/terraform/etc/acls/consul/nomad-client-policy.hcl similarity index 100% rename from e2e/terraform/provision-infra/provision-nomad/etc/acls/consul/nomad-client-policy.hcl rename to e2e/terraform/etc/acls/consul/nomad-client-policy.hcl diff --git a/e2e/terraform/provision-infra/provision-nomad/etc/acls/consul/nomad-server-policy.hcl b/e2e/terraform/etc/acls/consul/nomad-server-policy.hcl similarity index 100% rename from e2e/terraform/provision-infra/provision-nomad/etc/acls/consul/nomad-server-policy.hcl rename to e2e/terraform/etc/acls/consul/nomad-server-policy.hcl diff --git a/e2e/terraform/etc/acls/vault/nomad-policy.hcl b/e2e/terraform/etc/acls/vault/nomad-policy.hcl new file mode 100644 index 00000000000..1059928967f --- /dev/null +++ b/e2e/terraform/etc/acls/vault/nomad-policy.hcl @@ -0,0 +1,44 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +# Allow creating tokens under "nomad-tasks" role. The role name should be +# updated if "nomad-tasks" is not used. +path "auth/token/create/${role}" { + capabilities = ["update"] +} + +# Allow looking up "${role}" role. The role name should be updated if +# "${role}" is not used. +path "auth/token/roles/${role}" { + capabilities = ["read"] +} + +# Allow looking up the token passed to Nomad to validate the token has the +# proper capabilities. This is provided by the "default" policy. +path "auth/token/lookup-self" { + capabilities = ["read"] +} + +# Allow looking up incoming tokens to validate they have permissions to access +# the tokens they are requesting. This is only required if +# `allow_unauthenticated` is set to false. +path "auth/token/lookup" { + capabilities = ["update"] +} + +# Allow revoking tokens that should no longer exist. This allows revoking +# tokens for dead tasks. +path "auth/token/revoke-accessor" { + capabilities = ["update"] +} + +# Allow checking the capabilities of our own token. This is used to validate the +# token upon startup. +path "sys/capabilities-self" { + capabilities = ["update"] +} + +# Allow our own token to be renewed. +path "auth/token/renew-self" { + capabilities = ["update"] +} diff --git a/e2e/terraform/provision-infra/provision-nomad/etc/consul.d/.environment b/e2e/terraform/etc/consul.d/.environment similarity index 100% rename from e2e/terraform/provision-infra/provision-nomad/etc/consul.d/.environment rename to e2e/terraform/etc/consul.d/.environment diff --git a/e2e/terraform/provision-infra/provision-nomad/etc/consul.d/clients.hcl b/e2e/terraform/etc/consul.d/clients.hcl similarity index 100% rename from e2e/terraform/provision-infra/provision-nomad/etc/consul.d/clients.hcl rename to e2e/terraform/etc/consul.d/clients.hcl diff --git a/e2e/terraform/provision-infra/provision-nomad/etc/consul.d/consul-server.service b/e2e/terraform/etc/consul.d/consul-server.service similarity index 100% rename from e2e/terraform/provision-infra/provision-nomad/etc/consul.d/consul-server.service rename to e2e/terraform/etc/consul.d/consul-server.service diff --git a/e2e/terraform/provision-infra/provision-nomad/etc/consul.d/consul.service b/e2e/terraform/etc/consul.d/consul.service similarity index 96% rename from e2e/terraform/provision-infra/provision-nomad/etc/consul.d/consul.service rename to e2e/terraform/etc/consul.d/consul.service index 56ecccb8c32..2f1e9f24ed1 100644 --- a/e2e/terraform/provision-infra/provision-nomad/etc/consul.d/consul.service +++ b/e2e/terraform/etc/consul.d/consul.service @@ -4,7 +4,6 @@ Requires=network-online.target After=network-online.target [Service] -Type=notify Restart=on-failure Environment=CONSUL_ALLOW_PRIVILEGED_PORTS=true WorkingDirectory=/etc/consul.d diff --git a/e2e/terraform/provision-infra/provision-nomad/etc/consul.d/servers.hcl b/e2e/terraform/etc/consul.d/servers.hcl similarity index 100% rename from e2e/terraform/provision-infra/provision-nomad/etc/consul.d/servers.hcl rename to e2e/terraform/etc/consul.d/servers.hcl diff --git a/e2e/terraform/provision-infra/provision-nomad/etc/nomad.d/.environment b/e2e/terraform/etc/nomad.d/.environment similarity index 100% rename from e2e/terraform/provision-infra/provision-nomad/etc/nomad.d/.environment rename to e2e/terraform/etc/nomad.d/.environment diff --git a/e2e/terraform/provision-infra/provision-nomad/etc/nomad.d/base.hcl b/e2e/terraform/etc/nomad.d/base.hcl similarity index 100% rename from e2e/terraform/provision-infra/provision-nomad/etc/nomad.d/base.hcl rename to e2e/terraform/etc/nomad.d/base.hcl diff --git a/e2e/terraform/provision-infra/provision-nomad/etc/nomad.d/client-consul.hcl b/e2e/terraform/etc/nomad.d/client-consul.hcl similarity index 100% rename from e2e/terraform/provision-infra/provision-nomad/etc/nomad.d/client-consul.hcl rename to e2e/terraform/etc/nomad.d/client-consul.hcl diff --git a/e2e/terraform/provision-infra/provision-nomad/etc/nomad.d/client-linux-0.hcl b/e2e/terraform/etc/nomad.d/client-linux-0.hcl similarity index 100% rename from e2e/terraform/provision-infra/provision-nomad/etc/nomad.d/client-linux-0.hcl rename to e2e/terraform/etc/nomad.d/client-linux-0.hcl diff --git a/e2e/terraform/provision-infra/provision-nomad/etc/nomad.d/client-linux-1.hcl b/e2e/terraform/etc/nomad.d/client-linux-1.hcl similarity index 100% rename from e2e/terraform/provision-infra/provision-nomad/etc/nomad.d/client-linux-1.hcl rename to e2e/terraform/etc/nomad.d/client-linux-1.hcl diff --git a/e2e/terraform/provision-infra/provision-nomad/etc/nomad.d/client-linux-2.hcl b/e2e/terraform/etc/nomad.d/client-linux-2.hcl similarity index 100% rename from e2e/terraform/provision-infra/provision-nomad/etc/nomad.d/client-linux-2.hcl rename to e2e/terraform/etc/nomad.d/client-linux-2.hcl diff --git a/e2e/terraform/provision-infra/provision-nomad/etc/nomad.d/client-linux-3.hcl b/e2e/terraform/etc/nomad.d/client-linux-3.hcl similarity index 100% rename from e2e/terraform/provision-infra/provision-nomad/etc/nomad.d/client-linux-3.hcl rename to e2e/terraform/etc/nomad.d/client-linux-3.hcl diff --git a/e2e/terraform/provision-infra/provision-nomad/etc/nomad.d/client-linux.hcl b/e2e/terraform/etc/nomad.d/client-linux.hcl similarity index 100% rename from e2e/terraform/provision-infra/provision-nomad/etc/nomad.d/client-linux.hcl rename to e2e/terraform/etc/nomad.d/client-linux.hcl diff --git a/e2e/terraform/provision-infra/provision-nomad/etc/nomad.d/client-windows.hcl b/e2e/terraform/etc/nomad.d/client-windows.hcl similarity index 100% rename from e2e/terraform/provision-infra/provision-nomad/etc/nomad.d/client-windows.hcl rename to e2e/terraform/etc/nomad.d/client-windows.hcl diff --git a/e2e/terraform/provision-infra/provision-nomad/etc/nomad.d/index.hcl b/e2e/terraform/etc/nomad.d/index.hcl similarity index 100% rename from e2e/terraform/provision-infra/provision-nomad/etc/nomad.d/index.hcl rename to e2e/terraform/etc/nomad.d/index.hcl diff --git a/e2e/terraform/provision-infra/provision-nomad/etc/nomad.d/nomad-client.service b/e2e/terraform/etc/nomad.d/nomad-client.service similarity index 65% rename from e2e/terraform/provision-infra/provision-nomad/etc/nomad.d/nomad-client.service rename to e2e/terraform/etc/nomad.d/nomad-client.service index 5433f87554d..ef6a95e14a2 100644 --- a/e2e/terraform/provision-infra/provision-nomad/etc/nomad.d/nomad-client.service +++ b/e2e/terraform/etc/nomad.d/nomad-client.service @@ -5,12 +5,6 @@ After=network-online.target StartLimitIntervalSec=0 StartLimitBurst=3 -# Nomad and Consul are started very closely together. This helps ensure Consul -# is already running before Nomad starts and avoids having to SIGHUP the Nomad -# clients in order to reload the Consul fingerprints. -Wants=consul.service -After=consul.service - [Service] User=root ExecReload=/bin/kill -HUP $MAINPID diff --git a/e2e/terraform/provision-infra/provision-nomad/etc/nomad.d/nomad-server.service b/e2e/terraform/etc/nomad.d/nomad-server.service similarity index 100% rename from e2e/terraform/provision-infra/provision-nomad/etc/nomad.d/nomad-server.service rename to e2e/terraform/etc/nomad.d/nomad-server.service diff --git a/e2e/terraform/provision-infra/provision-nomad/etc/nomad.d/server-consul.hcl b/e2e/terraform/etc/nomad.d/server-consul.hcl similarity index 100% rename from e2e/terraform/provision-infra/provision-nomad/etc/nomad.d/server-consul.hcl rename to e2e/terraform/etc/nomad.d/server-consul.hcl diff --git a/e2e/terraform/provision-infra/provision-nomad/etc/nomad.d/server-linux.hcl b/e2e/terraform/etc/nomad.d/server-linux.hcl similarity index 100% rename from e2e/terraform/provision-infra/provision-nomad/etc/nomad.d/server-linux.hcl rename to e2e/terraform/etc/nomad.d/server-linux.hcl diff --git a/e2e/terraform/provision-infra/provision-nomad/etc/nomad.d/tls.hcl b/e2e/terraform/etc/nomad.d/tls.hcl similarity index 88% rename from e2e/terraform/provision-infra/provision-nomad/etc/nomad.d/tls.hcl rename to e2e/terraform/etc/nomad.d/tls.hcl index e6b2b8528d2..34f2b1171eb 100644 --- a/e2e/terraform/provision-infra/provision-nomad/etc/nomad.d/tls.hcl +++ b/e2e/terraform/etc/nomad.d/tls.hcl @@ -10,5 +10,5 @@ tls { key_file = "/etc/nomad.d/tls/agent.key" verify_server_hostname = true - verify_https_client = false + verify_https_client = true } diff --git a/e2e/terraform/etc/nomad.d/vault.hcl b/e2e/terraform/etc/nomad.d/vault.hcl new file mode 100644 index 00000000000..691f24de865 --- /dev/null +++ b/e2e/terraform/etc/nomad.d/vault.hcl @@ -0,0 +1,11 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +vault { + enabled = true + address = "${url}" + task_token_ttl = "1h" + create_from_role = "${role}" + namespace = "${namespace}" + token = "${token}" +} diff --git a/e2e/terraform/hcp_vault.tf b/e2e/terraform/hcp_vault.tf new file mode 100644 index 00000000000..b148f25ef99 --- /dev/null +++ b/e2e/terraform/hcp_vault.tf @@ -0,0 +1,53 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +# Note: the test environment must have the following values set: +# export HCP_CLIENT_ID= +# export HCP_CLIENT_SECRET= +# export VAULT_TOKEN= +# export VAULT_ADDR= + +data "hcp_vault_cluster" "e2e_shared_vault" { + cluster_id = var.hcp_vault_cluster_id +} + +# Vault policy for the Nomad cluster, which allows it to mint derived tokens for +# tasks. It's interpolated with the random cluster name to avoid collisions +# between concurrent E2E clusters +resource "vault_policy" "nomad" { + name = "${local.random_name}-nomad-server" + policy = templatefile("${path.root}/etc/acls/vault/nomad-policy.hcl", { + role = "nomad-tasks-${local.random_name}" + }) +} + +resource "vault_token" "nomad" { + policies = [vault_policy.nomad.name] + no_parent = true + renewable = true + ttl = "72h" +} + +# The default role that Nomad will use for derived tokens. It's not allowed +# access to nomad-policy so that it can only mint tokens for tasks, not for new +# clusters +resource "vault_token_auth_backend_role" "nomad_cluster" { + role_name = "nomad-tasks-${local.random_name}" + disallowed_policies = [vault_policy.nomad.name] + orphan = true + token_period = "259200" + renewable = true + token_max_ttl = "0" +} + +# Nomad agent configuration for Vault +resource "local_sensitive_file" "nomad_config_for_vault" { + content = templatefile("etc/nomad.d/vault.hcl", { + token = vault_token.nomad.client_token + url = data.hcp_vault_cluster.e2e_shared_vault.vault_private_endpoint_url + namespace = var.hcp_vault_namespace + role = "nomad-tasks-${local.random_name}" + }) + filename = "uploads/shared/nomad.d/vault.hcl" + file_permission = "0600" +} diff --git a/e2e/terraform/provision-infra/iam.tf b/e2e/terraform/iam.tf similarity index 100% rename from e2e/terraform/provision-infra/iam.tf rename to e2e/terraform/iam.tf diff --git a/e2e/terraform/main.tf b/e2e/terraform/main.tf index 861e571115c..f6e84ef5e23 100644 --- a/e2e/terraform/main.tf +++ b/e2e/terraform/main.tf @@ -5,19 +5,30 @@ provider "aws" { region = var.region } -module "provision-infra" { - source = "./provision-infra" +data "aws_caller_identity" "current" { +} + +resource "random_pet" "e2e" { +} + +resource "random_password" "windows_admin_password" { + length = 20 + special = true + override_special = "_%@" +} + +locals { + random_name = "${var.name}-${random_pet.e2e.id}" +} + +# Generates keys to use for provisioning and access +module "keys" { + name = local.random_name + path = "${path.root}/keys" + source = "mitchellh/dynamic-keys/aws" + version = "v2.0.0" +} - server_count = var.server_count - client_count_linux = var.client_count_linux - client_count_windows_2016 = var.client_count_windows_2016 - nomad_local_binary_server = var.nomad_local_binary_server - nomad_local_binary = var.nomad_local_binary - nomad_local_binary_client_ubuntu_jammy = var.nomad_local_binary_client_ubuntu_jammy - nomad_local_binary_client_windows_2016 = var.nomad_local_binary_client_windows_2016 - nomad_license = var.nomad_license - consul_license = var.consul_license - nomad_region = var.nomad_region - instance_arch = var.instance_arch - name = var.name +data "aws_kms_alias" "e2e" { + name = "alias/${var.aws_kms_alias}" } diff --git a/e2e/terraform/provision-infra/network.tf b/e2e/terraform/network.tf similarity index 93% rename from e2e/terraform/provision-infra/network.tf rename to e2e/terraform/network.tf index aa273f18d10..79330e0aab8 100644 --- a/e2e/terraform/provision-infra/network.tf +++ b/e2e/terraform/network.tf @@ -54,15 +54,6 @@ resource "aws_security_group" "servers" { cidr_blocks = [local.ingress_cidr] } - # Nomad HTTP access from the HashiCorp Cloud virtual network CIDR. This is - # used for the workload identity authentication method JWKS callback. - ingress { - from_port = 4646 - to_port = 4646 - protocol = "tcp" - cidr_blocks = [var.hcp_hvn_cidr] - } - # Nomad HTTP and RPC from clients ingress { from_port = 4646 @@ -216,9 +207,9 @@ resource "aws_network_interface" "clients_secondary" { subnet_id = data.aws_subnet.secondary.id security_groups = [aws_security_group.clients_secondary.id] - count = var.client_count_linux + count = var.client_count_ubuntu_jammy_amd64 attachment { - instance = aws_instance.client_ubuntu_jammy[count.index].id + instance = aws_instance.client_ubuntu_jammy_amd64[count.index].id device_index = 1 } } diff --git a/e2e/terraform/provision-infra/nomad-acls.tf b/e2e/terraform/nomad-acls.tf similarity index 77% rename from e2e/terraform/provision-infra/nomad-acls.tf rename to e2e/terraform/nomad-acls.tf index fded7388027..fbe3d7651b9 100644 --- a/e2e/terraform/provision-infra/nomad-acls.tf +++ b/e2e/terraform/nomad-acls.tf @@ -11,20 +11,19 @@ resource "null_resource" "bootstrap_nomad_acls" { depends_on = [module.nomad_server, null_resource.bootstrap_consul_acls] provisioner "local-exec" { - command = "${path.module}/scripts/bootstrap-nomad.sh" + command = "./scripts/bootstrap-nomad.sh" environment = { NOMAD_ADDR = "https://${aws_instance.server.0.public_ip}:4646" - NOMAD_CACERT = "${local.keys_dir}/tls_ca.crt" - NOMAD_CLIENT_CERT = "${local.keys_dir}/tls_api_client.crt" - NOMAD_CLIENT_KEY = "${local.keys_dir}/tls_api_client.key" - NOMAD_TOKEN_PATH = "${local.keys_dir}" + NOMAD_CACERT = "keys/tls_ca.crt" + NOMAD_CLIENT_CERT = "keys/tls_api_client.crt" + NOMAD_CLIENT_KEY = "keys/tls_api_client.key" } } } data "local_sensitive_file" "nomad_token" { depends_on = [null_resource.bootstrap_nomad_acls] - filename = "${local.keys_dir}/nomad_root_token" + filename = "${path.root}/keys/nomad_root_token" } # push the token out to the servers for humans to use. @@ -37,8 +36,8 @@ locals { cat < /tmp/resolv.conf nameserver 127.0.0.1 nameserver $DOCKER_BRIDGE_IP_ADDRESS @@ -30,7 +30,7 @@ cp /tmp/resolv.conf /etc/resolv.conf # need to get the interface for dnsmasq config so that we can # accomodate both "predictable" and old-style interface names -IFACE=$(ip route | grep default | awk '{print "interface="$5}') +IFACE=$(/usr/local/bin/sockaddr eval 'GetDefaultInterfaces | attr "Name"') cat < /tmp/dnsmasq port=53 @@ -38,7 +38,7 @@ resolv-file=/var/run/dnsmasq/resolv.conf bind-interfaces interface=docker0 interface=lo -$IFACE +interface=$IFACE listen-address=127.0.0.1 server=/consul/127.0.0.1#8600 EOF diff --git a/e2e/terraform/packer/ubuntu-jammy-amd64/setup.sh b/e2e/terraform/packer/ubuntu-jammy-amd64/setup.sh index 87d556e272a..e3646693d9f 100755 --- a/e2e/terraform/packer/ubuntu-jammy-amd64/setup.sh +++ b/e2e/terraform/packer/ubuntu-jammy-amd64/setup.sh @@ -36,6 +36,11 @@ sudo apt-get install -y \ curl -o /tmp/hc-install.zip https://releases.hashicorp.com/hc-install/0.9.0/hc-install_0.9.0_linux_amd64.zip sudo unzip -d /usr/local/bin /tmp/hc-install.zip +# Install sockaddr +aws s3 cp "s3://nomad-team-dev-test-binaries/tools/sockaddr_linux_amd64" /tmp/sockaddr +sudo mv /tmp/sockaddr /usr/local/bin +sudo chmod +x /usr/local/bin/sockaddr +sudo chown root:root /usr/local/bin/sockaddr # Disable the firewall sudo ufw disable || echo "ufw not installed" @@ -164,3 +169,14 @@ echo "Updating boot parameters" # enable cgroup_memory and swap sudo sed -i 's/GRUB_CMDLINE_LINUX="[^"]*/& cgroup_enable=memory swapaccount=1/' /etc/default/grub sudo update-grub + +echo "Configuring user shell" +sudo tee -a /home/ubuntu/.bashrc << 'EOF' +IP_ADDRESS=$(/usr/local/bin/sockaddr eval 'GetPrivateIP') +export CONSUL_RPC_ADDR=$IP_ADDRESS:8400 +export CONSUL_HTTP_ADDR=$IP_ADDRESS:8500 +export VAULT_ADDR=http://$IP_ADDRESS:8200 +export NOMAD_ADDR=http://$IP_ADDRESS:4646 +export JAVA_HOME=/usr/lib/jvm/java-17-openjdk-amd64/bin + +EOF diff --git a/e2e/terraform/provision-infra/.gitignore b/e2e/terraform/provision-infra/.gitignore deleted file mode 100644 index a6ec5984572..00000000000 --- a/e2e/terraform/provision-infra/.gitignore +++ /dev/null @@ -1,7 +0,0 @@ -*.zip -custom.tfvars - -# ephemeral objects added by module -provision-nomad/uploads/ -keys/ -csi/ diff --git a/e2e/terraform/provision-infra/hcp_vault.tf b/e2e/terraform/provision-infra/hcp_vault.tf deleted file mode 100644 index f7b7f03f9a7..00000000000 --- a/e2e/terraform/provision-infra/hcp_vault.tf +++ /dev/null @@ -1,91 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -# Note: the test environment must have the following values set: -# export HCP_CLIENT_ID= -# export HCP_CLIENT_SECRET= -# export VAULT_TOKEN= -# export VAULT_ADDR= - -data "hcp_vault_cluster" "e2e_shared_vault" { - cluster_id = var.hcp_vault_cluster_id -} - -// Use stable naming formatting, so that e2e tests can rely on the -// CLUSTER_UNIQUE_IDENTIFIER env var to re-build these names when they need to. -// -// If these change, downstream tests will need to be updated as well, most -// notably vaultsecrets. -locals { - workload_identity_path = "jwt-nomad-${local.random_name}" - workload_identity_role = "jwt-nomad-${local.random_name}-workloads" - workload_identity_policy = "jwt-nomad-${local.random_name}-workloads" -} - -// The authentication backed is used by Nomad to generated workload identities -// for allocations. -// -// Nomad is running TLS, so we must pass the CA and HTTPS endpoint. Due to -// limitations within Vault at the moment, the Nomad TLS configuration must set -// "verify_https_client=false". Vault will return an error without this when -// writing the auth backend. -resource "vault_jwt_auth_backend" "nomad_cluster" { - depends_on = [null_resource.bootstrap_nomad_acls] - default_role = local.workload_identity_role - jwks_url = "https://${aws_instance.server[0].private_ip}:4646/.well-known/jwks.json" - jwks_ca_pem = tls_self_signed_cert.ca.cert_pem - jwt_supported_algs = ["RS256"] - path = local.workload_identity_path -} - -// This is our default role for the nomad JWT authentication backend within -// Vault. -resource "vault_jwt_auth_backend_role" "nomad_cluster" { - backend = vault_jwt_auth_backend.nomad_cluster.path - bound_audiences = ["vault.io"] - role_name = local.workload_identity_role - role_type = "jwt" - token_period = 1800 - token_policies = [local.workload_identity_policy] - token_type = "service" - user_claim = "/nomad_job_id" - user_claim_json_pointer = true - - claim_mappings = { - nomad_namespace = "nomad_namespace" - nomad_job_id = "nomad_job_id" - nomad_task = "nomad_task" - } -} - -// Enable a KV secrets backend using the generated name for the path, so that -// multiple clusters can run simultaneously and that failed destroys do not -// impact subsequent runs. -resource "vault_mount" "nomad_cluster" { - path = local.random_name - type = "kv" - options = { version = "2" } -} - -// This Vault policy is linked from default Nomad WI auth backend role and uses -// Nomad's documented default policy for workloads as an outline. It grants -// access to the KV path enabled above, making it available to all e2e tests by -// default. -resource "vault_policy" "nomad-workloads" { - name = local.workload_identity_policy - policy = templatefile("${path.module}/templates/vault-acl-jwt-policy-nomad-workloads.hcl.tpl", { - AUTH_METHOD_ACCESSOR = vault_jwt_auth_backend.nomad_cluster.accessor - MOUNT = local.random_name - }) -} - -# Nomad agent configuration for Vault -resource "local_sensitive_file" "nomad_config_for_vault" { - content = templatefile("${path.module}/provision-nomad/etc/nomad.d/vault.hcl", { - jwt_auth_backend_path = local.workload_identity_path - url = data.hcp_vault_cluster.e2e_shared_vault.vault_private_endpoint_url - namespace = var.hcp_vault_namespace - }) - filename = "${local.uploads_dir}/shared/nomad.d/vault.hcl" - file_permission = "0600" -} diff --git a/e2e/terraform/provision-infra/main.tf b/e2e/terraform/provision-infra/main.tf deleted file mode 100644 index 342691a64ae..00000000000 --- a/e2e/terraform/provision-infra/main.tf +++ /dev/null @@ -1,33 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -data "aws_caller_identity" "current" { -} - -resource "random_pet" "e2e" { -} - -resource "random_password" "windows_admin_password" { - length = 20 - special = true - override_special = "_%@" -} - -locals { - random_name = "${var.name}-${random_pet.e2e.id}" - uploads_dir = "${path.module}/provision-nomad/uploads/${random_pet.e2e.id}" - keys_dir = "${path.module}/keys/${random_pet.e2e.id}" -} - -# Generates keys to use for provisioning and access -module "keys" { - depends_on = [random_pet.e2e] - name = local.random_name - path = "${local.keys_dir}" - source = "mitchellh/dynamic-keys/aws" - version = "v2.0.0" -} - -data "aws_kms_alias" "e2e" { - name = "alias/${var.aws_kms_alias}" -} diff --git a/e2e/terraform/provision-infra/nomad.tf b/e2e/terraform/provision-infra/nomad.tf deleted file mode 100644 index fcb9518f18b..00000000000 --- a/e2e/terraform/provision-infra/nomad.tf +++ /dev/null @@ -1,102 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -locals { - server_binary = var.nomad_local_binary_server != "" ? var.nomad_local_binary_server : var.nomad_local_binary - linux_binary = var.nomad_local_binary_client_ubuntu_jammy != "" ? var.nomad_local_binary_client_ubuntu_jammy : var.nomad_local_binary - windows_binary = var.nomad_local_binary_client_windows_2016 != "" ? var.nomad_local_binary_client_windows_2016 : var.nomad_local_binary -} - -module "nomad_server" { - source = "./provision-nomad" - depends_on = [aws_instance.server] - count = var.server_count - - platform = "linux" - arch = "linux_amd64" - role = "server" - index = count.index - instance = aws_instance.server[count.index] - - nomad_region = var.nomad_region - nomad_local_binary = local.server_binary - - nomad_license = var.nomad_license - tls_ca_key = tls_private_key.ca.private_key_pem - tls_ca_cert = tls_self_signed_cert.ca.cert_pem - - aws_region = var.region - aws_kms_key_id = data.aws_kms_alias.e2e.target_key_id - - uploads_dir = local.uploads_dir - keys_dir = local.keys_dir - - connection = { - type = "ssh" - user = "ubuntu" - port = 22 - private_key = "${local.keys_dir}/${local.random_name}.pem" - } -} - -# TODO: split out the different Linux targets (ubuntu, centos, arm, etc.) when -# they're available -module "nomad_client_ubuntu_jammy" { - source = "./provision-nomad" - depends_on = [aws_instance.client_ubuntu_jammy] - count = var.client_count_linux - - platform = "linux" - arch = "linux_${var.instance_arch}" - role = "client" - index = count.index - instance = aws_instance.client_ubuntu_jammy[count.index] - nomad_license = var.nomad_license - nomad_region = var.nomad_region - nomad_local_binary = local.linux_binary - - tls_ca_key = tls_private_key.ca.private_key_pem - tls_ca_cert = tls_self_signed_cert.ca.cert_pem - - uploads_dir = local.uploads_dir - keys_dir = local.keys_dir - - connection = { - type = "ssh" - user = "ubuntu" - port = 22 - private_key = "${local.keys_dir}/${local.random_name}.pem" - } -} - - -# TODO: split out the different Windows targets (2016, 2019) when they're -# available -module "nomad_client_windows_2016" { - source = "./provision-nomad" - depends_on = [aws_instance.client_windows_2016] - count = var.client_count_windows_2016 - - platform = "windows" - arch = "windows_${var.instance_arch}" - role = "client" - index = count.index - instance = aws_instance.client_windows_2016[count.index] - - nomad_region = var.nomad_region - nomad_license = var.nomad_license - nomad_local_binary = local.windows_binary - - tls_ca_key = tls_private_key.ca.private_key_pem - tls_ca_cert = tls_self_signed_cert.ca.cert_pem - - uploads_dir = local.uploads_dir - keys_dir = local.keys_dir - - connection = { - type = "ssh" - user = "Administrator" - port = 22 - private_key = "${local.keys_dir}/${local.random_name}.pem" - } -} diff --git a/e2e/terraform/provision-infra/outputs.tf b/e2e/terraform/provision-infra/outputs.tf deleted file mode 100644 index 5a60d84d62e..00000000000 --- a/e2e/terraform/provision-infra/outputs.tf +++ /dev/null @@ -1,95 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -output "servers" { - value = aws_instance.server.*.public_ip -} - -output "linux_clients" { - value = aws_instance.client_ubuntu_jammy.*.public_ip -} - -output "windows_clients" { - value = aws_instance.client_windows_2016.*.public_ip -} - -output "clients" { - value = concat(aws_instance.client_ubuntu_jammy.*.public_ip, aws_instance.client_windows_2016.*.public_ip) -} - -output "message" { - value = </dev/null 2>&1 && pwd )" echo "waiting for Consul leader to be up..." while true : do - pwd - echo CONSUL_CACERT=$CONSUL_CACERT - echo CONSUL_HTTP_ADDR=$CONSUL_HTTP_ADDR consul info && break echo "Consul server not ready, waiting 5s" sleep 5 @@ -30,5 +27,3 @@ consul acl token create -policy-name=nomad-cluster -secret "$NOMAD_CLUSTER_CONSU echo "writing Consul cluster policy and token" consul acl policy create -name consul-agents -rules @${DIR}/consul-agents-policy.hcl consul acl token create -policy-name=consul-agents -secret "$CONSUL_AGENT_TOKEN" - -echo "Consul successfully bootstraped!" \ No newline at end of file diff --git a/e2e/terraform/provision-infra/scripts/bootstrap-nomad.sh b/e2e/terraform/scripts/bootstrap-nomad.sh similarity index 75% rename from e2e/terraform/provision-infra/scripts/bootstrap-nomad.sh rename to e2e/terraform/scripts/bootstrap-nomad.sh index 7c7a004eb06..5039970b8a6 100755 --- a/e2e/terraform/provision-infra/scripts/bootstrap-nomad.sh +++ b/e2e/terraform/scripts/bootstrap-nomad.sh @@ -10,18 +10,13 @@ do ROOT_TOKEN=$(nomad acl bootstrap | awk '/Secret ID/{print $4}') if [ ! -z $ROOT_TOKEN ]; then break; fi sleep 5 - pwd - echo NOMAD_ADDR= $NOMAD_ADDR - echo NOMAD_CACERT= $NOMAD_CACERT - pwd done set -e export NOMAD_TOKEN="$ROOT_TOKEN" -mkdir -p "$NOMAD_TOKEN_PATH" -echo $NOMAD_TOKEN > "${NOMAD_TOKEN_PATH}/nomad_root_token" -echo NOMAD_TOKEN=$NOMAD_TOKEN +mkdir -p ../keys +echo $NOMAD_TOKEN > "${DIR}/../keys/nomad_root_token" # Our default policy after bootstrapping will be full-access. Without # further policy, we only test that we're hitting the ACL code @@ -31,5 +26,3 @@ nomad acl policy apply \ -description "Anonymous policy (full-access)" \ anonymous \ "${DIR}/anonymous.nomad_policy.hcl" - -echo "Nomad successfully bootstraped" diff --git a/e2e/terraform/provision-infra/scripts/consul-agents-policy.hcl b/e2e/terraform/scripts/consul-agents-policy.hcl similarity index 100% rename from e2e/terraform/provision-infra/scripts/consul-agents-policy.hcl rename to e2e/terraform/scripts/consul-agents-policy.hcl diff --git a/e2e/terraform/provision-infra/scripts/nomad-cluster-consul-policy.hcl b/e2e/terraform/scripts/nomad-cluster-consul-policy.hcl similarity index 100% rename from e2e/terraform/provision-infra/scripts/nomad-cluster-consul-policy.hcl rename to e2e/terraform/scripts/nomad-cluster-consul-policy.hcl diff --git a/e2e/terraform/terraform.tfvars b/e2e/terraform/terraform.tfvars index 284c99dbece..20a1651118b 100644 --- a/e2e/terraform/terraform.tfvars +++ b/e2e/terraform/terraform.tfvars @@ -6,5 +6,5 @@ # before running `terraform apply` and created the /pkg/goos_goarch/binary # folder -nomad_local_binary = "../../pkg/linux_amd64/nomad" -nomad_local_binary_client_windows_2016 = "../../pkg/windows_amd64/nomad.exe" +nomad_local_binary = "../../pkg/linux_amd64/nomad" +nomad_local_binary_client_windows_2016_amd64 = ["../../pkg/windows_amd64/nomad.exe"] diff --git a/e2e/terraform/provision-infra/tls_ca.tf b/e2e/terraform/tls_ca.tf similarity index 88% rename from e2e/terraform/provision-infra/tls_ca.tf rename to e2e/terraform/tls_ca.tf index 94481341d9c..992c165b5ca 100644 --- a/e2e/terraform/provision-infra/tls_ca.tf +++ b/e2e/terraform/tls_ca.tf @@ -23,11 +23,11 @@ resource "tls_self_signed_cert" "ca" { } resource "local_sensitive_file" "ca_key" { - filename = "${local.keys_dir}/tls_ca.key" + filename = "keys/tls_ca.key" content = tls_private_key.ca.private_key_pem } resource "local_sensitive_file" "ca_cert" { - filename = "${local.keys_dir}/tls_ca.crt" + filename = "keys/tls_ca.crt" content = tls_self_signed_cert.ca.cert_pem } diff --git a/e2e/terraform/provision-infra/tls_client.tf b/e2e/terraform/tls_client.tf similarity index 86% rename from e2e/terraform/provision-infra/tls_client.tf rename to e2e/terraform/tls_client.tf index 74dce73f86e..9a5e48c3f70 100644 --- a/e2e/terraform/provision-infra/tls_client.tf +++ b/e2e/terraform/tls_client.tf @@ -34,12 +34,12 @@ resource "tls_locally_signed_cert" "api_client" { resource "local_sensitive_file" "api_client_key" { content = tls_private_key.api_client.private_key_pem - filename = "${local.keys_dir}/tls_api_client.key" + filename = "keys/tls_api_client.key" } resource "local_sensitive_file" "api_client_cert" { content = tls_locally_signed_cert.api_client.cert_pem - filename = "${local.keys_dir}/tls_api_client.crt" + filename = "keys/tls_api_client.crt" } # Self signed cert for reverse proxy @@ -56,7 +56,7 @@ resource "tls_self_signed_cert" "self_signed" { organization = "HashiCorp, Inc." } - ip_addresses = toset(aws_instance.client_ubuntu_jammy.*.public_ip) + ip_addresses = toset(aws_instance.client_ubuntu_jammy_amd64.*.public_ip) validity_period_hours = 720 allowed_uses = [ @@ -66,10 +66,10 @@ resource "tls_self_signed_cert" "self_signed" { resource "local_sensitive_file" "self_signed_key" { content = tls_private_key.self_signed.private_key_pem - filename = "${local.keys_dir}/self_signed.key" + filename = "keys/self_signed.key" } resource "local_sensitive_file" "self_signed_cert" { content = tls_self_signed_cert.self_signed.cert_pem - filename = "${local.keys_dir}/self_signed.crt" + filename = "keys/self_signed.crt" } diff --git a/e2e/terraform/provision-infra/provision-nomad/uploads/README.md b/e2e/terraform/uploads/README.md similarity index 100% rename from e2e/terraform/provision-infra/provision-nomad/uploads/README.md rename to e2e/terraform/uploads/README.md diff --git a/e2e/terraform/provision-infra/userdata/README.md b/e2e/terraform/userdata/README.md similarity index 100% rename from e2e/terraform/provision-infra/userdata/README.md rename to e2e/terraform/userdata/README.md diff --git a/e2e/terraform/provision-infra/userdata/windows-2016.ps1 b/e2e/terraform/userdata/windows-2016.ps1 similarity index 100% rename from e2e/terraform/provision-infra/userdata/windows-2016.ps1 rename to e2e/terraform/userdata/windows-2016.ps1 diff --git a/e2e/terraform/variables.tf b/e2e/terraform/variables.tf index f8013eafd73..a027aa99b26 100644 --- a/e2e/terraform/variables.tf +++ b/e2e/terraform/variables.tf @@ -21,22 +21,17 @@ variable "instance_type" { default = "t3a.medium" } -variable "instance_arch" { - description = "The architecture for the AWS instance type to use for both clients and servers." - default = "amd64" -} - variable "server_count" { description = "The number of servers to provision." default = "3" } -variable "client_count_linux" { +variable "client_count_ubuntu_jammy_amd64" { description = "The number of Ubuntu clients to provision." default = "4" } -variable "client_count_windows_2016" { +variable "client_count_windows_2016_amd64" { description = "The number of windows 2016 clients to provision." default = "0" } @@ -53,21 +48,24 @@ variable "restrict_ingress_cidrblock" { variable "nomad_local_binary" { description = "The path to a local binary to provision" + default = "" } variable "nomad_license" { type = string description = "If nomad_license is set, deploy a license" + default = "" } variable "nomad_region" { - description = "The AWS region to deploy to." - default = "us-east-1" + description = "The name of the Nomad region." + default = "e2e" } variable "consul_license" { type = string description = "If consul_license is set, deploy a license" + default = "" } variable "volumes" { @@ -76,6 +74,12 @@ variable "volumes" { default = true } +variable "hcp_consul_cluster_id" { + description = "The ID of the HCP Consul cluster" + type = string + default = "nomad-e2e-shared-hcp-consul" +} + variable "hcp_vault_cluster_id" { description = "The ID of the HCP Vault cluster" type = string @@ -95,24 +99,24 @@ variable "aws_kms_alias" { } # ---------------------------------------- -# If you want to deploy different versions you can use these variables to -# provide a build to override the values of nomad_sha, nomad_version, +# If you want to deploy multiple versions you can use these variables to +# provide a list of builds to override the values of nomad_sha, nomad_version, # or nomad_local_binary. Most of the time you can ignore these variables! variable "nomad_local_binary_server" { - description = "A path to an alternative binary to deploy to servers, to override nomad_local_binary" - type = string - default = "" + description = "A list of nomad local binary paths to deploy to servers, to override nomad_local_binary" + type = list(string) + default = [] } -variable "nomad_local_binary_client_ubuntu_jammy" { - description = "A path to an alternative binary to deploy to ubuntu clients, to override nomad_local_binary" - type = string - default = "" +variable "nomad_local_binary_client_ubuntu_jammy_amd64" { + description = "A list of nomad local binary paths to deploy to Ubuntu Jammy clients, to override nomad_local_binary" + type = list(string) + default = [] } -variable "nomad_local_binary_client_windows_2016" { - description = "A path to an alternative binary to deploy to windows clients, to override nomad_local_binary" - type = string - default = "" +variable "nomad_local_binary_client_windows_2016_amd64" { + description = "A list of nomad local binary paths to deploy to Windows 2016 clients, to override nomad_local_binary" + type = list(string) + default = [] } diff --git a/enos/enos-providers.hcl b/e2e/terraform/versions.tf similarity index 57% rename from enos/enos-providers.hcl rename to e2e/terraform/versions.tf index d961427939e..a123945c03f 100644 --- a/enos/enos-providers.hcl +++ b/e2e/terraform/versions.tf @@ -1,6 +1,7 @@ # Copyright (c) HashiCorp, Inc. # SPDX-License-Identifier: BUSL-1.1 -provider "aws" "default" { - region = var.aws_region + +terraform { + required_version = ">= 0.12" } diff --git a/e2e/terraform/provision-infra/volumes.tf b/e2e/terraform/volumes.tf similarity index 86% rename from e2e/terraform/provision-infra/volumes.tf rename to e2e/terraform/volumes.tf index 5a6329cbd59..a3ce9c9eac5 100644 --- a/e2e/terraform/provision-infra/volumes.tf +++ b/e2e/terraform/volumes.tf @@ -3,7 +3,7 @@ resource "aws_efs_file_system" "csi" { count = var.volumes ? 1 : 0 - creation_token = "${random_pet.e2e.id}-CSI" + creation_token = "${local.random_name}-CSI" tags = { Name = "${local.random_name}-efs" @@ -23,6 +23,6 @@ resource "local_file" "efs_volume_hcl" { content = templatefile("${path.module}/volumes.tftpl", { id = aws_efs_file_system.csi[0].id, }) - filename = "${path.module}/csi/input/volume-efs.hcl" + filename = "${path.module}/../csi/input/volume-efs.hcl" file_permission = "0664" } diff --git a/e2e/terraform/provision-infra/volumes.tftpl b/e2e/terraform/volumes.tftpl similarity index 100% rename from e2e/terraform/provision-infra/volumes.tftpl rename to e2e/terraform/volumes.tftpl diff --git a/e2e/ui/package-lock.json b/e2e/ui/package-lock.json index 5fb8472fcf7..d85ee949c6f 100644 --- a/e2e/ui/package-lock.json +++ b/e2e/ui/package-lock.json @@ -1,20 +1,21 @@ { - "name": "ui", + "name": "src", "lockfileVersion": 2, "requires": true, "packages": { "": { "devDependencies": { - "@playwright/test": "^1.50.0" + "@playwright/test": "^1.48.0" } }, "node_modules/@playwright/test": { - "version": "1.50.0", - "resolved": "https://registry.npmjs.org/@playwright/test/-/test-1.50.0.tgz", - "integrity": "sha512-ZGNXbt+d65EGjBORQHuYKj+XhCewlwpnSd/EDuLPZGSiEWmgOJB5RmMCCYGy5aMfTs9wx61RivfDKi8H/hcMvw==", + "version": "1.48.0", + "resolved": "https://registry.npmjs.org/@playwright/test/-/test-1.48.0.tgz", + "integrity": "sha512-W5lhqPUVPqhtc/ySvZI5Q8X2ztBOUgZ8LbAFy0JQgrXZs2xaILrUcNO3rQjwbLPfGK13+rZsDa1FpG+tqYkT5w==", "dev": true, + "license": "Apache-2.0", "dependencies": { - "playwright": "1.50.0" + "playwright": "1.48.0" }, "bin": { "playwright": "cli.js" @@ -29,6 +30,7 @@ "integrity": "sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==", "dev": true, "hasInstallScript": true, + "license": "MIT", "optional": true, "os": [ "darwin" @@ -38,12 +40,13 @@ } }, "node_modules/playwright": { - "version": "1.50.0", - "resolved": "https://registry.npmjs.org/playwright/-/playwright-1.50.0.tgz", - "integrity": "sha512-+GinGfGTrd2IfX1TA4N2gNmeIksSb+IAe589ZH+FlmpV3MYTx6+buChGIuDLQwrGNCw2lWibqV50fU510N7S+w==", + "version": "1.48.0", + "resolved": "https://registry.npmjs.org/playwright/-/playwright-1.48.0.tgz", + "integrity": "sha512-qPqFaMEHuY/ug8o0uteYJSRfMGFikhUysk8ZvAtfKmUK3kc/6oNl/y3EczF8OFGYIi/Ex2HspMfzYArk6+XQSA==", "dev": true, + "license": "Apache-2.0", "dependencies": { - "playwright-core": "1.50.0" + "playwright-core": "1.48.0" }, "bin": { "playwright": "cli.js" @@ -56,10 +59,11 @@ } }, "node_modules/playwright-core": { - "version": "1.50.0", - "resolved": "https://registry.npmjs.org/playwright-core/-/playwright-core-1.50.0.tgz", - "integrity": "sha512-CXkSSlr4JaZs2tZHI40DsZUN/NIwgaUPsyLuOAaIZp2CyF2sN5MM5NJsyB188lFSSozFxQ5fPT4qM+f0tH/6wQ==", + "version": "1.48.0", + "resolved": "https://registry.npmjs.org/playwright-core/-/playwright-core-1.48.0.tgz", + "integrity": "sha512-RBvzjM9rdpP7UUFrQzRwR8L/xR4HyC1QXMzGYTbf1vjw25/ya9NRAVnXi/0fvFopjebvyPzsmoK58xxeEOaVvA==", "dev": true, + "license": "Apache-2.0", "bin": { "playwright-core": "cli.js" }, @@ -70,12 +74,12 @@ }, "dependencies": { "@playwright/test": { - "version": "1.50.0", - "resolved": "https://registry.npmjs.org/@playwright/test/-/test-1.50.0.tgz", - "integrity": "sha512-ZGNXbt+d65EGjBORQHuYKj+XhCewlwpnSd/EDuLPZGSiEWmgOJB5RmMCCYGy5aMfTs9wx61RivfDKi8H/hcMvw==", + "version": "1.48.0", + "resolved": "https://registry.npmjs.org/@playwright/test/-/test-1.48.0.tgz", + "integrity": "sha512-W5lhqPUVPqhtc/ySvZI5Q8X2ztBOUgZ8LbAFy0JQgrXZs2xaILrUcNO3rQjwbLPfGK13+rZsDa1FpG+tqYkT5w==", "dev": true, "requires": { - "playwright": "1.50.0" + "playwright": "1.48.0" } }, "fsevents": { @@ -86,19 +90,19 @@ "optional": true }, "playwright": { - "version": "1.50.0", - "resolved": "https://registry.npmjs.org/playwright/-/playwright-1.50.0.tgz", - "integrity": "sha512-+GinGfGTrd2IfX1TA4N2gNmeIksSb+IAe589ZH+FlmpV3MYTx6+buChGIuDLQwrGNCw2lWibqV50fU510N7S+w==", + "version": "1.48.0", + "resolved": "https://registry.npmjs.org/playwright/-/playwright-1.48.0.tgz", + "integrity": "sha512-qPqFaMEHuY/ug8o0uteYJSRfMGFikhUysk8ZvAtfKmUK3kc/6oNl/y3EczF8OFGYIi/Ex2HspMfzYArk6+XQSA==", "dev": true, "requires": { "fsevents": "2.3.2", - "playwright-core": "1.50.0" + "playwright-core": "1.48.0" } }, "playwright-core": { - "version": "1.50.0", - "resolved": "https://registry.npmjs.org/playwright-core/-/playwright-core-1.50.0.tgz", - "integrity": "sha512-CXkSSlr4JaZs2tZHI40DsZUN/NIwgaUPsyLuOAaIZp2CyF2sN5MM5NJsyB188lFSSozFxQ5fPT4qM+f0tH/6wQ==", + "version": "1.48.0", + "resolved": "https://registry.npmjs.org/playwright-core/-/playwright-core-1.48.0.tgz", + "integrity": "sha512-RBvzjM9rdpP7UUFrQzRwR8L/xR4HyC1QXMzGYTbf1vjw25/ya9NRAVnXi/0fvFopjebvyPzsmoK58xxeEOaVvA==", "dev": true } } diff --git a/e2e/ui/package.json b/e2e/ui/package.json index 30bc95b27d7..a655fe54b65 100644 --- a/e2e/ui/package.json +++ b/e2e/ui/package.json @@ -1,5 +1,5 @@ { "devDependencies": { - "@playwright/test": "^1.50.0" + "@playwright/test": "^1.48.0" } } diff --git a/e2e/ui/run.sh b/e2e/ui/run.sh index 03ae5505f69..a976393c786 100755 --- a/e2e/ui/run.sh +++ b/e2e/ui/run.sh @@ -33,7 +33,7 @@ EOF } -IMAGE="mcr.microsoft.com/playwright:v1.50.0-jammy" +IMAGE="mcr.microsoft.com/playwright:v1.48.0-noble" pushd $(dirname "${BASH_SOURCE[0]}") > /dev/null run_tests() { diff --git a/e2e/v3/volumes3/host3.go b/e2e/v3/volumes3/host3.go deleted file mode 100644 index b854961fae6..00000000000 --- a/e2e/v3/volumes3/host3.go +++ /dev/null @@ -1,225 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package volumes3 - -import ( - "context" - "fmt" - "os" - "os/exec" - "strings" - "testing" - "time" - - "github.com/hashicorp/nomad/api" - nomadapi "github.com/hashicorp/nomad/api" - "github.com/hashicorp/nomad/e2e/v3/util3" - "github.com/shoenig/test/must" - "github.com/shoenig/test/wait" -) - -// VolumeSubmission holds state around creating and cleaning up a dynamic host -// volume. -type VolumeSubmission struct { - t *testing.T - - nomadClient *nomadapi.Client - - // inputs - namespace string - filename string - waitState nomadapi.HostVolumeState - - // behaviors - noCleanup bool - timeout time.Duration - verbose bool - - // outputs - volID string - nodeID string -} - -type Option func(*VolumeSubmission) - -type Cleanup func() - -func Create(t *testing.T, filename string, opts ...Option) (*VolumeSubmission, Cleanup) { - t.Helper() - - sub := &VolumeSubmission{ - t: t, - namespace: api.DefaultNamespace, - filename: filename, - waitState: nomadapi.HostVolumeStateReady, - timeout: 10 * time.Second, - } - - for _, opt := range opts { - opt(sub) - } - - start := time.Now() - sub.setClient() // setup API client if not configured by option - sub.run(start) // create the volume via API - sub.waits(start) // wait on node fingerprint - - return sub, sub.cleanup -} - -// VolumeID returns the volume ID set by the server -func (sub *VolumeSubmission) VolumeID() string { - return sub.volID -} - -// NodeID returns the node ID, which may have been set by the server -func (sub *VolumeSubmission) NodeID() string { - return sub.nodeID -} - -// Get fetches the api.HostVolume from the server for further examination -func (sub *VolumeSubmission) Get() *nomadapi.HostVolume { - vol, _, err := sub.nomadClient.HostVolumes().Get(sub.volID, - &api.QueryOptions{Namespace: sub.namespace}) - must.NoError(sub.t, err) - return vol -} - -func (sub *VolumeSubmission) setClient() { - if sub.nomadClient != nil { - return - } - nomadClient, err := nomadapi.NewClient(nomadapi.DefaultConfig()) - must.NoError(sub.t, err, must.Sprint("failed to create nomad API client")) - sub.nomadClient = nomadClient -} - -func (sub *VolumeSubmission) run(start time.Time) { - sub.t.Helper() - ctx, cancel := context.WithTimeout(context.Background(), sub.timeout) - defer cancel() - - bytes, err := exec.CommandContext(ctx, - "nomad", "volume", "create", - "-namespace", sub.namespace, - "-detach", sub.filename).CombinedOutput() - must.NoError(sub.t, err, must.Sprint("error creating volume")) - out := string(bytes) - split := strings.Split(out, " ") - sub.volID = strings.TrimSpace(split[len(split)-1]) - - sub.logf("[%v] volume %q created", time.Since(start), sub.VolumeID()) -} - -func (sub *VolumeSubmission) waits(start time.Time) { - sub.t.Helper() - must.Wait(sub.t, wait.InitialSuccess( - wait.ErrorFunc(func() error { - vol, _, err := sub.nomadClient.HostVolumes().Get(sub.volID, - &api.QueryOptions{Namespace: sub.namespace}) - if err != nil { - return err - } - sub.nodeID = vol.NodeID - - if vol.State != sub.waitState { - return fmt.Errorf("volume is not yet in %q state: %q", sub.waitState, vol.State) - } - - // if we're waiting for the volume to be ready, let's also verify - // that it's correctly fingerprinted on the node - switch sub.waitState { - case nomadapi.HostVolumeStateReady: - node, _, err := sub.nomadClient.Nodes().Info(sub.nodeID, nil) - if err != nil { - return err - } - _, ok := node.HostVolumes[vol.Name] - if !ok { - return fmt.Errorf("node %q did not fingerprint volume %q", sub.nodeID, sub.volID) - } - } - - return nil - }), - wait.Timeout(sub.timeout), - wait.Gap(50*time.Millisecond), - )) - - sub.logf("[%v] volume %q is %q on node %q", - time.Since(start), sub.volID, sub.waitState, sub.nodeID) -} - -func (sub *VolumeSubmission) cleanup() { - if os.Getenv("NOMAD_TEST_SKIPCLEANUP") == "1" { - return - } - if sub.noCleanup { - return - } - if sub.volID == "" { - return - } - - sub.noCleanup = true // so this isn't attempted more than once - ctx, cancel := context.WithTimeout(context.Background(), sub.timeout) - defer cancel() - - sub.logf("deleting volume %q", sub.volID) - err := exec.CommandContext(ctx, - "nomad", "volume", "delete", - "-type", "host", "-namespace", sub.namespace, sub.volID).Run() - must.NoError(sub.t, err) -} - -func (sub *VolumeSubmission) logf(msg string, args ...any) { - sub.t.Helper() - util3.Log3(sub.t, sub.verbose, msg, args...) -} - -// WithClient forces the submission to use the Nomad API client passed from the -// calling test -func WithClient(client *nomadapi.Client) Option { - return func(sub *VolumeSubmission) { - sub.nomadClient = client - } -} - -// WithNamespace sets a specific namespace for the volume and the wait -// query. The namespace should not be set in the spec if you're using this -// option. -func WithNamespace(ns string) Option { - return func(sub *VolumeSubmission) { - sub.namespace = ns - } -} - -// WithTimeout changes the default timeout from 10s -func WithTimeout(timeout time.Duration) Option { - return func(sub *VolumeSubmission) { - sub.timeout = timeout - } -} - -// WithWaitState changes the default state we wait for after creating the volume -// from the default of "ready" -func WithWaitState(state api.HostVolumeState) Option { - return func(sub *VolumeSubmission) { - sub.waitState = state - } -} - -// WithNoCleanup is used for test debugging to skip tearing down the volume -func WithNoCleanup() Option { - return func(sub *VolumeSubmission) { - sub.noCleanup = true - } -} - -// WithVerbose is used for test debugging to write more logs -func WithVerbose() Option { - return func(sub *VolumeSubmission) { - sub.verbose = true - } -} diff --git a/e2e/vaultcompat/cluster_setup_test.go b/e2e/vaultcompat/cluster_setup_test.go index 1f0741b60c0..67eefdd4812 100644 --- a/e2e/vaultcompat/cluster_setup_test.go +++ b/e2e/vaultcompat/cluster_setup_test.go @@ -11,6 +11,16 @@ const ( jwtPath = "nomad_jwt" ) +// roleLegacy is the legacy recommendation for nomad cluster role. +var roleLegacy = map[string]interface{}{ + "disallowed_policies": "nomad-server", + "explicit_max_ttl": 0, // use old name for vault compatibility + "name": "nomad-cluster", + "orphan": false, + "period": 259200, // use old name for vault compatibility + "renewable": true, +} + // authConfigJWT is the configuration for the JWT auth method used by Nomad. func authConfigJWT(jwksURL string) map[string]any { return map[string]any{ diff --git a/e2e/vaultcompat/input/cat.hcl b/e2e/vaultcompat/input/cat.hcl new file mode 100644 index 00000000000..b4db40ac321 --- /dev/null +++ b/e2e/vaultcompat/input/cat.hcl @@ -0,0 +1,25 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +job "cat" { + type = "batch" + group "testcase" { + task "cat" { + driver = "raw_exec" + + config { + command = "cat" + args = ["${NOMAD_SECRETS_DIR}/vault_token"] + } + + vault { + policies = ["default"] + } + } + + restart { + attempts = 0 + mode = "fail" + } + } +} diff --git a/e2e/vaultcompat/input/policy_legacy.hcl b/e2e/vaultcompat/input/policy_legacy.hcl new file mode 100644 index 00000000000..1813675762e --- /dev/null +++ b/e2e/vaultcompat/input/policy_legacy.hcl @@ -0,0 +1,30 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +path "auth/token/create/nomad-cluster" { + capabilities = ["update"] +} + +path "auth/token/roles/nomad-cluster" { + capabilities = ["read"] +} + +path "auth/token/lookup-self" { + capabilities = ["read"] +} + +path "auth/token/lookup" { + capabilities = ["update"] +} + +path "auth/token/revoke-accessor" { + capabilities = ["update"] +} + +path "sys/capabilities-self" { + capabilities = ["update"] +} + +path "auth/token/renew-self" { + capabilities = ["update"] +} diff --git a/e2e/vaultcompat/run_ce_test.go b/e2e/vaultcompat/run_ce_test.go index 5515bf4f098..329d60933e9 100644 --- a/e2e/vaultcompat/run_ce_test.go +++ b/e2e/vaultcompat/run_ce_test.go @@ -26,6 +26,16 @@ func usable(v, minimum *version.Version) bool { } } +func testVaultLegacy(t *testing.T, b build) { + vStop, vc := startVault(t, b) + defer vStop() + setupVaultLegacy(t, vc) + + nStop, nc := startNomad(t, configureNomadVaultLegacy(vc)) + defer nStop() + runJob(t, nc, "input/cat.hcl", "default", validateLegacyAllocs) +} + func testVaultJWT(t *testing.T, b build) { vStop, vc := startVault(t, b) defer vStop() diff --git a/e2e/vaultcompat/vaultcompat_test.go b/e2e/vaultcompat/vaultcompat_test.go index c5792ce8e45..4238791a789 100644 --- a/e2e/vaultcompat/vaultcompat_test.go +++ b/e2e/vaultcompat/vaultcompat_test.go @@ -21,6 +21,7 @@ import ( goversion "github.com/hashicorp/go-version" "github.com/hashicorp/nomad/api" nomadapi "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/testutil" @@ -64,6 +65,9 @@ func testVaultBuild(t *testing.T, b build) { must.NoError(t, err) t.Run("vault("+b.Version+")", func(t *testing.T) { + t.Run("legacy", func(t *testing.T) { + testVaultLegacy(t, b) + }) if version.GreaterThanOrEqual(minJWTVersion) { t.Run("jwt", func(t *testing.T) { @@ -76,6 +80,16 @@ func testVaultBuild(t *testing.T, b build) { }) } +func validateLegacyAllocs(allocs []*nomadapi.AllocationListStub) error { + if n := len(allocs); n != 1 { + return fmt.Errorf("expected 1 alloc, got %d", n) + } + if s := allocs[0].ClientStatus; s != "complete" { + return fmt.Errorf("expected alloc status complete, got %s", s) + } + return nil +} + func validateJWTAllocs(allocs []*nomadapi.AllocationListStub) error { if n := len(allocs); n != 2 { return fmt.Errorf("expected 2 allocs, got %d", n) @@ -167,6 +181,27 @@ func startVault(t *testing.T, b build) (func(), *vaultapi.Client) { return vlt.Stop, vlt.Client } +func setupVaultLegacy(t *testing.T, vc *vaultapi.Client) { + policy, err := os.ReadFile("input/policy_legacy.hcl") + must.NoError(t, err) + + sys := vc.Sys() + must.NoError(t, sys.PutPolicy("nomad-server", string(policy))) + + log := vc.Logical() + log.Write("auth/token/roles/nomad-cluster", roleLegacy) + + token := vc.Auth().Token() + secret, err := token.Create(&vaultapi.TokenCreateRequest{ + Policies: []string{"nomad-server"}, + Period: "72h", + NoParent: true, + }) + must.NoError(t, err, must.Sprint("failed to create vault token")) + must.NotNil(t, secret) + must.NotNil(t, secret.Auth) +} + func setupVaultJWT(t *testing.T, vc *vaultapi.Client, jwksURL string) { logical := vc.Logical() sys := vc.Sys() @@ -243,6 +278,18 @@ func startNomad(t *testing.T, cb func(*testutil.TestServerConfig)) (func(), *nom return ts.Stop, nc } +func configureNomadVaultLegacy(vc *vaultapi.Client) func(*testutil.TestServerConfig) { + return func(c *testutil.TestServerConfig) { + c.Vaults = []*testutil.VaultConfig{{ + Enabled: true, + Address: vc.Address(), + Token: vc.Token(), + Role: "nomad-cluster", + AllowUnauthenticated: pointer.Of(true), + }} + } +} + func configureNomadVaultJWT(vc *vaultapi.Client) func(*testutil.TestServerConfig) { return func(c *testutil.TestServerConfig) { c.Vaults = []*testutil.VaultConfig{{ diff --git a/e2e/vaultsecrets/input/acl-role.json b/e2e/vaultsecrets/input/acl-role.json deleted file mode 100644 index 8dadce8afb7..00000000000 --- a/e2e/vaultsecrets/input/acl-role.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "role_type": "jwt", - "bound_audiences": ["vault.io"], - "bound_claims": { - "nomad_namespace": "vault-secrets", - "nomad_job_id": "secrets" - }, - "user_claim": "/nomad_job_id", - "user_claim_json_pointer": true, - "claim_mappings": { - "nomad_namespace": "nomad_namespace", - "nomad_job_id": "nomad_job_id", - "nomad_task": "nomad_task" - }, - "token_type": "service", - "token_policies": ["POLICYID"], - "token_period": "30m", - "token_explicit_max_ttl": 0 -} diff --git a/e2e/vaultsecrets/input/default_wi.nomad.hcl b/e2e/vaultsecrets/input/default_wi.nomad.hcl deleted file mode 100644 index 66905a92b00..00000000000 --- a/e2e/vaultsecrets/input/default_wi.nomad.hcl +++ /dev/null @@ -1,40 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -job "default_wi" { - - constraint { - attribute = "${attr.kernel.name}" - value = "linux" - } - - group "group" { - - task "task" { - - driver = "docker" - - config { - image = "busybox:1" - command = "/bin/sh" - args = ["-c", "sleep 300"] - } - - vault {} - - template { - data = < /tmp/outline.html -$ open /tmp/outline.html -``` - -## Running Enos - -Run the Enos scenario end-to-end: - -``` -$ enos scenario run upgrade --var-file /tmp/enos.vars --timeout 2h -``` - -Enos will not clean up after itself automatically if interrupted. If you have to -interrupt it, you may need to run `enos scenario destroy upgrade --var-file -/tmp/enos.vars ` diff --git a/enos/enos-modules.hcl b/enos/enos-modules.hcl deleted file mode 100644 index a805939f6c6..00000000000 --- a/enos/enos-modules.hcl +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -module "build_artifactory" { - source = "./modules/fetch_artifactory" -} - -module "provision_cluster" { - source = "../e2e/terraform/provision-infra" -} - -module "run_workloads" { - source = "./modules/run_workloads" -} - -module "test_cluster_health" { - source = "./modules/test_cluster_health" -} - -module "upgrade_servers" { - source = "./modules/upgrade_servers" -} - -module "upgrade_clients" { - source = "./modules/upgrade_clients" -} diff --git a/enos/enos-quality.hcl b/enos/enos-quality.hcl deleted file mode 100644 index 5f423045e78..00000000000 --- a/enos/enos-quality.hcl +++ /dev/null @@ -1,47 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -quality "nomad_agent_info" { - description = "A GET call to /v1/agent/members returns the correct number of running servers and they are all alive" -} - -quality "nomad_agent_info_self" { - description = "A GET call to /v1/agent/self against every server returns the same last_log_index as the leader" -} - -quality "nomad_nodes_status" { - description = "A GET call to /v1/nodes returns the correct number of clients and they are all eligible and ready" -} - -quality "nomad_node_eligibility" { - description = "A GET call to /v1/node/:node-id returns the same node.SchedulingEligibility before and after a server upgrade" -} - -quality "nomad_node_metadata" { - description = "A GET call to /v1/node/:node-id returns the same node.Meta for each client before and after a client upgrade" -} - -quality "nomad_job_status" { - description = "A GET call to /v1/jobs returns the correct number of jobs and they are all running" -} - -quality "nomad_register_job" { - description = "A POST call to /v1/jobs results in a new job running and allocations being started accordingly" -} - -quality "nomad_reschedule_alloc" { - description = "A POST / PUT call to /v1/allocation/:alloc_id/stop results in the stopped allocation being rescheduled" -} - -quality "nomad_restore_snapshot" { - description = "A node can be restored from a snapshot built on a previous version" -} - -quality "nomad_allocs_status" { - description = "A GET call to /v1/allocs returns the correct number of allocations and they are all running" -} - -quality "nomad_alloc_reconect" { - description = "A GET call to /v1/alloc/:alloc_id will return the same alloc.CreateTime for each allocation before and after a client upgrade" -} - diff --git a/enos/enos-scenario-upgrade.hcl b/enos/enos-scenario-upgrade.hcl deleted file mode 100644 index 28aa6fd2d6d..00000000000 --- a/enos/enos-scenario-upgrade.hcl +++ /dev/null @@ -1,380 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -scenario "upgrade" { - description = <<-EOF - The upgrade scenario verifies in-place upgrades between previously released versions of Nomad - against another candidate build. - EOF - - matrix { - arch = ["amd64"] - //edition = ["ce", "ent"] - //os = ["linux", "windows"] - edition = ["ent"] - os = ["linux"] - - exclude { - os = ["windows"] - arch = ["arm64"] - } - } - - providers = [ - provider.aws.default, - ] - - locals { - cluster_name = "mcj-${matrix.os}-${matrix.arch}-${matrix.edition}-${var.product_version}" - linux_count = matrix.os == "linux" ? "4" : "0" - windows_count = matrix.os == "windows" ? "4" : "0" - arch = matrix.arch - clients_count = local.linux_count + local.windows_count - test_product_version = matrix.edition == "ent" ? "${var.product_version}+ent" : "${var.product_version}" - test_upgrade_version = matrix.edition == "ent" ? "${var.upgrade_version}+ent" : "${var.upgrade_version}" - } - - step "copy_initial_binary" { - description = <<-EOF - Determine which Nomad artifact we want to use for the scenario, depending on the - 'arch', 'edition' and 'os' and bring it from the artifactory to the local instance - running enos. - EOF - - module = module.build_artifactory - - variables { - artifactory_username = var.artifactory_username - artifactory_token = var.artifactory_token - arch = local.arch - edition = matrix.edition - product_version = var.product_version - os = matrix.os - download_binary_path = "${var.download_binary_path}/${matrix.os}-${matrix.arch}-${matrix.edition}-${var.product_version}" - } - } - - step "provision_cluster" { - depends_on = [step.copy_initial_binary] - - description = <<-EOF - Using the binary from the previous step, provision a Nomad cluster using the e2e - module. - EOF - - module = module.provision_cluster - variables { - name = local.cluster_name - nomad_local_binary = step.copy_initial_binary.nomad_local_binary - server_count = var.server_count - client_count_linux = local.linux_count - client_count_windows_2016 = local.windows_count - nomad_license = var.nomad_license - consul_license = var.consul_license - volumes = false - region = var.aws_region - instance_arch = matrix.arch - } - } - - step "run_initial_workloads" { - depends_on = [step.provision_cluster] - - description = <<-EOF - Verify the health of the cluster by running new workloads - EOF - - module = module.run_workloads - variables { - nomad_addr = step.provision_cluster.nomad_addr - ca_file = step.provision_cluster.ca_file - cert_file = step.provision_cluster.cert_file - key_file = step.provision_cluster.key_file - nomad_token = step.provision_cluster.nomad_token - } - - verifies = [ - quality.nomad_register_job, - ] - } - - step "initial_test_cluster_health" { - depends_on = [step.run_initial_workloads] - - description = <<-EOF - Verify the health of the cluster by checking the status of all servers, nodes, - jobs and allocs and stopping random allocs to check for correct reschedules" - EOF - - module = module.test_cluster_health - variables { - # connecting to the Nomad API - nomad_addr = step.provision_cluster.nomad_addr - ca_file = step.provision_cluster.ca_file - cert_file = step.provision_cluster.cert_file - key_file = step.provision_cluster.key_file - nomad_token = step.provision_cluster.nomad_token - - # configuring assertions - server_count = var.server_count - client_count = local.clients_count - jobs_count = step.run_initial_workloads.jobs_count - alloc_count = step.run_initial_workloads.allocs_count - servers = step.provision_cluster.servers - clients_version = local.test_product_version - servers_version = local.test_product_version - } - - verifies = [ - quality.nomad_agent_info, - quality.nomad_agent_info_self, - quality.nomad_nodes_status, - quality.nomad_job_status, - quality.nomad_allocs_status, - quality.nomad_reschedule_alloc, - ] - } - - step "fetch_upgrade_binary" { - depends_on = [step.provision_cluster, step.initial_test_cluster_health] - - description = <<-EOF - Bring the new upgraded binary from the artifactory to the instance running enos. - EOF - - module = module.build_artifactory - - variables { - artifactory_username = var.artifactory_username - artifactory_token = var.artifactory_token - arch = local.arch - edition = matrix.edition - product_version = var.upgrade_version - os = matrix.os - download_binary = false - } - } - - step "upgrade_servers" { - depends_on = [step.fetch_upgrade_binary] - - description = <<-EOF - Takes the servers one by one, makes a snapshot, updates the binary with the - new one previously fetched and restarts the servers. - - Important: The path where the binary will be placed is hardcoded to match - what the provision-cluster module does. It can be configurable in the future - but for now it is: - - * "C:/opt/nomad.exe" for windows - * "/usr/local/bin/nomad" for linux - - To ensure the servers are upgraded one by one, they use the depends_on meta, - there are ONLY 3 SERVERS being upgraded in the module. - EOF - module = module.upgrade_servers - - verifies = [ - quality.nomad_agent_info, - quality.nomad_agent_info_self, - quality.nomad_restore_snapshot - ] - - variables { - # connecting to the Nomad API - nomad_addr = step.provision_cluster.nomad_addr - ca_file = step.provision_cluster.ca_file - cert_file = step.provision_cluster.cert_file - key_file = step.provision_cluster.key_file - nomad_token = step.provision_cluster.nomad_token - - # driving the upgrade - servers = step.provision_cluster.servers - ssh_key_path = step.provision_cluster.ssh_key_file - artifactory_username = var.artifactory_username - artifactory_token = var.artifactory_token - artifact_url = step.fetch_upgrade_binary.artifact_url - artifact_sha = step.fetch_upgrade_binary.artifact_sha - } - } - - step "server_upgrade_test_cluster_health" { - depends_on = [step.upgrade_servers] - - description = <<-EOF - Verify the health of the cluster by checking the status of all servers, nodes, - jobs and allocs and stopping random allocs to check for correct reschedules" - EOF - - module = module.test_cluster_health - variables { - # connecting to the Nomad API - nomad_addr = step.provision_cluster.nomad_addr - ca_file = step.provision_cluster.ca_file - cert_file = step.provision_cluster.cert_file - key_file = step.provision_cluster.key_file - nomad_token = step.provision_cluster.nomad_token - - # configuring assertions - server_count = var.server_count - client_count = local.clients_count - jobs_count = step.run_initial_workloads.jobs_count - alloc_count = step.run_initial_workloads.allocs_count - servers = step.provision_cluster.servers - clients_version = local.test_product_version - servers_version = local.test_upgrade_version - } - - verifies = [ - quality.nomad_agent_info, - quality.nomad_agent_info_self, - quality.nomad_nodes_status, - quality.nomad_job_status, - quality.nomad_allocs_status, - quality.nomad_reschedule_alloc, - ] - } - - /* step "run_workloads" { - depends_on = [step.server_upgrade_test_cluster_health] - - description = <<-EOF - Verify the health of the cluster by running new workloads - EOF - - module = module.run_workloads - variables { - nomad_addr = step.provision_cluster.nomad_addr - ca_file = step.provision_cluster.ca_file - cert_file = step.provision_cluster.cert_file - key_file = step.provision_cluster.key_file - nomad_token = step.provision_cluster.nomad_token - } - - verifies = [ - quality.nomad_register_job, - ] - } - */ - step "upgrade_clients" { - depends_on = [step.server_upgrade_test_cluster_health] - - description = <<-EOF - Takes the clients one by one, writes some dynamic metadata to them, - updates the binary with the new one previously fetched and restarts them. - - Important: The path where the binary will be placed is hardcoded to match - what the provision-cluster module does. It can be configurable in the future - but for now it is: - - * "C:/opt/nomad.exe" for windows - * "/usr/local/bin/nomad" for linux - - To ensure the clients are upgraded one by one, they use the depends_on meta, - there are ONLY 4 CLIENTS being upgraded in the module. - EOF - - module = module.upgrade_clients - - verifies = [ - quality.nomad_nodes_status, - quality.nomad_job_status, - quality.nomad_node_metadata - ] - - variables { - # connecting to the Nomad API - nomad_addr = step.provision_cluster.nomad_addr - ca_file = step.provision_cluster.ca_file - cert_file = step.provision_cluster.cert_file - key_file = step.provision_cluster.key_file - nomad_token = step.provision_cluster.nomad_token - - # configuring assertions - clients = step.provision_cluster.clients - ssh_key_path = step.provision_cluster.ssh_key_file - artifactory_username = var.artifactory_username - artifactory_token = var.artifactory_token - artifact_url = step.fetch_upgrade_binary.artifact_url - artifact_sha = step.fetch_upgrade_binary.artifact_sha - } - } - - step "client_upgrade_test_cluster_health" { - depends_on = [step.upgrade_clients] - - description = <<-EOF - Verify the health of the cluster by checking the status of all servers, nodes, - jobs and allocs and stopping random allocs to check for correct reschedules" - EOF - - module = module.test_cluster_health - variables { - # connecting to the Nomad API - nomad_addr = step.provision_cluster.nomad_addr - ca_file = step.provision_cluster.ca_file - cert_file = step.provision_cluster.cert_file - key_file = step.provision_cluster.key_file - nomad_token = step.provision_cluster.nomad_token - - # configuring assertions - server_count = var.server_count - client_count = local.clients_count - jobs_count = step.run_initial_workloads.jobs_count - alloc_count = step.run_initial_workloads.allocs_count - servers = step.provision_cluster.servers - clients_version = local.test_upgrade_version - servers_version = local.test_upgrade_version - } - - verifies = [ - quality.nomad_agent_info, - quality.nomad_agent_info_self, - quality.nomad_nodes_status, - quality.nomad_job_status, - quality.nomad_allocs_status, - quality.nomad_reschedule_alloc, - ] - } - - output "servers" { - value = step.provision_cluster.servers - } - - output "linux_clients" { - value = step.provision_cluster.linux_clients - } - - output "windows_clients" { - value = step.provision_cluster.windows_clients - } - - output "message" { - value = step.provision_cluster.message - } - - output "nomad_addr" { - value = step.provision_cluster.nomad_addr - } - - output "ca_file" { - value = step.provision_cluster.ca_file - } - - output "cert_file" { - value = step.provision_cluster.cert_file - } - - output "key_file" { - value = step.provision_cluster.key_file - } - - output "ssh_key_file" { - value = step.provision_cluster.ssh_key_file - } - - output "nomad_token" { - value = step.provision_cluster.nomad_token - sensitive = true - } -} diff --git a/enos/enos-terraform.hcl b/enos/enos-terraform.hcl deleted file mode 100644 index 618630eefa7..00000000000 --- a/enos/enos-terraform.hcl +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -terraform "default" { - required_version = ">= 1.2.0" - - required_providers { - aws = { - source = "hashicorp/aws" - } - - enos = { - source = "registry.terraform.io/hashicorp-forge/enos" - version = ">= 0.4.0" - } - } -} diff --git a/enos/enos-vars.hcl b/enos/enos-vars.hcl deleted file mode 100644 index 18058bb83a6..00000000000 --- a/enos/enos-vars.hcl +++ /dev/null @@ -1,56 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -# Variables for the fetch_artifactory module -variable "artifactory_username" { - type = string - description = "The username to use when connecting to artifactory" - default = null -} - -variable "artifactory_token" { - type = string - description = "The token to use when connecting to artifactory" - default = null - sensitive = true -} - -variable "product_version" { - description = "The version of Nomad we are starting from" - type = string - default = null -} - -variable "upgrade_version" { - description = "The version of Nomad we want to upgrade the cluster to" - type = string - default = null -} - -variable "download_binary_path" { - description = "The path to a local directory where binaries will be downloaded to provision" -} - -# Variables for the provision_cluster module - -variable "nomad_license" { - type = string - description = "If nomad_license is set, deploy a license" - default = "" -} - -variable "consul_license" { - type = string - description = "If consul_license is set, deploy a license" - default = "" -} - -variable "server_count" { - description = "The number of servers to provision." - default = "3" -} - -variable "aws_region" { - description = "The AWS region to deploy to." - default = "us-east-1" -} diff --git a/enos/enos.vars.example.hcl b/enos/enos.vars.example.hcl deleted file mode 100644 index 86ef79781ca..00000000000 --- a/enos/enos.vars.example.hcl +++ /dev/null @@ -1,11 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -artifactory_username = "" -artifactory_token = "" -product_version = "1.8.9" # starting version -upgrade_version = "1.9.4" # version to upgrade to -download_binary_path = "/home/foo/Downloads/nomad" # directory on your machine to download binaries -nomad_license = "" -consul_license = "" -aws_region = "us-east-1" diff --git a/enos/modules/fetch_artifactory/locals.tf b/enos/modules/fetch_artifactory/locals.tf deleted file mode 100644 index 704a8fc240b..00000000000 --- a/enos/modules/fetch_artifactory/locals.tf +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -locals { - - path = var.edition == "ce" ? "nomad/*" : "nomad-enterprise/*" - - artifact_version = var.edition == "ce" ? "${var.product_version}" : "${var.product_version}+ent" - - package_extensions = { - amd64 = { - linux = "_linux_amd64.zip" - windows = "_windows_amd64.zip" - } - - arm64 = { - linux = "_linux_arm64.zip" - } - } - - artifact_name = "nomad_${local.artifact_version}${local.package_extensions[var.arch][var.os]}" - artifact_zip = "${local.artifact_name}.zip" -} diff --git a/enos/modules/fetch_artifactory/main.tf b/enos/modules/fetch_artifactory/main.tf deleted file mode 100644 index 2fb57323b39..00000000000 --- a/enos/modules/fetch_artifactory/main.tf +++ /dev/null @@ -1,36 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -terraform { - required_providers { - enos = { - source = "registry.terraform.io/hashicorp-forge/enos" - } - } -} - -data "enos_artifactory_item" "nomad" { - username = var.artifactory_username - token = var.artifactory_token - host = var.artifactory_host - repo = var.artifactory_repo - path = local.path - name = local.artifact_name - - properties = tomap({ - "product-name" = var.edition == "ce" ? "nomad" : "nomad-enterprise" - }) -} - -resource "enos_local_exec" "install_binary" { - count = var.download_binary ? 1 : 0 - - environment = { - URL = data.enos_artifactory_item.nomad.results[0].url - BINARY_PATH = var.download_binary_path - TOKEN = var.artifactory_token - LOCAL_ZIP = local.artifact_zip - } - - scripts = [abspath("${path.module}/scripts/install.sh")] -} diff --git a/enos/modules/fetch_artifactory/outputs.tf b/enos/modules/fetch_artifactory/outputs.tf deleted file mode 100644 index 2422de088f1..00000000000 --- a/enos/modules/fetch_artifactory/outputs.tf +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -output "nomad_local_binary" { - description = "Path where the binary will be placed" - value = var.os == "windows" ? "${var.download_binary_path}/nomad.exe" : "${var.download_binary_path}/nomad" -} - -output "artifact_url" { - description = "URL to fetch the artifact" - value = data.enos_artifactory_item.nomad.results[0].url -} - -output "artifact_sha" { - description = "sha256 to fetch the artifact" - value = data.enos_artifactory_item.nomad.results[0].sha256 -} diff --git a/enos/modules/fetch_artifactory/scripts/install.sh b/enos/modules/fetch_artifactory/scripts/install.sh deleted file mode 100755 index de49644e3c1..00000000000 --- a/enos/modules/fetch_artifactory/scripts/install.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -set -euo pipefail - -wget --header="Authorization: Bearer $TOKEN" -O "$LOCAL_ZIP" "$URL" - -echo "File downloaded to $LOCAL_ZIP" - -mkdir -p "$BINARY_PATH" -unzip -o "$LOCAL_ZIP" -d "$BINARY_PATH" - -echo "File unzipped to $BINARY_PATH" - -rm "$LOCAL_ZIP" diff --git a/enos/modules/fetch_artifactory/variables.tf b/enos/modules/fetch_artifactory/variables.tf deleted file mode 100644 index e9d419bd6f4..00000000000 --- a/enos/modules/fetch_artifactory/variables.tf +++ /dev/null @@ -1,65 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -variable "artifactory_username" { - type = string - description = "The username to use when connecting to artifactory" - default = null -} - -variable "artifactory_token" { - type = string - description = "The token to use when connecting to artifactory" - default = null - sensitive = true -} - -variable "artifactory_host" { - type = string - description = "The artifactory host to search for Nomad artifacts" - default = "https://artifactory.hashicorp.engineering/artifactory" -} - -variable "artifactory_repo" { - type = string - description = "The artifactory repo to search for Nomad artifacts" - default = "hashicorp-crt-staging-local*" -} - -variable "edition" { - type = string - description = "The edition of the binary to search (one of ce or ent)" - - validation { - condition = contains(["ent", "ce"], var.edition) - error_message = "must be one of ent or ce" - } -} - -variable "os" { - type = string - description = "The operative system the binary is needed for" - default = "linux" -} - -variable "product_version" { - description = "The version of Nomad we are testing" - type = string - default = null -} - -variable "arch" { - description = "The artifactory path to search for Nomad artifacts" - type = string -} - -variable "download_binary" { - description = "Used to control if the artifact should be downloaded to the local instance or not" - default = true -} - -variable "download_binary_path" { - description = "A directory path on the local instance where the artifacts will be installed (requires download_binary is true)" - type = string - default = "/home/ubuntu/nomad" -} diff --git a/enos/modules/run_workloads/jobs/docker-service.nomad.hcl b/enos/modules/run_workloads/jobs/docker-service.nomad.hcl deleted file mode 100644 index 2cec24fbd20..00000000000 --- a/enos/modules/run_workloads/jobs/docker-service.nomad.hcl +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 -variable "alloc_count" { - type = number - default = 1 -} - -job "service-docker" { - - group "service-docker" { - count = var.alloc_count - task "alpine" { - driver = "docker" - - config { - image = "alpine:latest" - command = "sh" - args = ["-c", "while true; do sleep 30000; done"] - - } - - resources { - cpu = 100 - memory = 128 - } - } - } -} diff --git a/enos/modules/run_workloads/jobs/raw-exec-service.nomad.hcl b/enos/modules/run_workloads/jobs/raw-exec-service.nomad.hcl deleted file mode 100644 index 0ceeb735972..00000000000 --- a/enos/modules/run_workloads/jobs/raw-exec-service.nomad.hcl +++ /dev/null @@ -1,40 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -variable "alloc_count" { - type = number - default = 1 -} - -job "service-raw" { - - group "service-raw" { - count = var.alloc_count - task "raw" { - driver = "raw_exec" - - config { - command = "bash" - args = ["-c", "./local/runme.sh"] - } - - template { - data = < /dev/null 2>&1; do - echo "Waiting for Nomad API..." - - current_time=$(date +%s) - elapsed_time=$((current_time - start_time)) - if [ "$elapsed_time" -ge "$TIMEOUT" ]; then - echo "Error: Nomad API did not become available within $TIMEOUT seconds." - exit 1 - fi - - sleep "$INTERVAL" -done - -echo "Nomad API is available!" diff --git a/enos/modules/run_workloads/variables.tf b/enos/modules/run_workloads/variables.tf deleted file mode 100644 index 6281d988c32..00000000000 --- a/enos/modules/run_workloads/variables.tf +++ /dev/null @@ -1,43 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -variable "nomad_addr" { - description = "The Nomad API HTTP address." - type = string - default = "http://localhost:4646" -} - -variable "ca_file" { - description = "A local file path to a PEM-encoded certificate authority used to verify the remote agent's certificate" - type = string -} - -variable "cert_file" { - description = "A local file path to a PEM-encoded certificate provided to the remote agent. If this is specified, key_file or key_pem is also required" - type = string -} - -variable "key_file" { - description = "A local file path to a PEM-encoded private key. This is required if cert_file or cert_pem is specified." - type = string -} - -variable "nomad_token" { - description = "The Secret ID of an ACL token to make requests with, for ACL-enabled clusters." - type = string - sensitive = true -} - -variable "workloads" { - description = "A map of workloads to provision" - - type = map(object({ - job_spec = string - alloc_count = number - })) - - default = { - service_raw_exec = { job_spec = "jobs/raw-exec-service.nomad.hcl", alloc_count = 3 } - service_docker = { job_spec = "jobs/docker-service.nomad.hcl", alloc_count = 3 } - } -} diff --git a/enos/modules/test_cluster_health/main.tf b/enos/modules/test_cluster_health/main.tf deleted file mode 100644 index f7c356d7b59..00000000000 --- a/enos/modules/test_cluster_health/main.tf +++ /dev/null @@ -1,59 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -terraform { - required_providers { - enos = { - source = "registry.terraform.io/hashicorp-forge/enos" - } - } -} - -locals { - servers_addr = join(" ", var.servers) - nomad_env = { - NOMAD_ADDR = var.nomad_addr - NOMAD_CACERT = var.ca_file - NOMAD_CLIENT_CERT = var.cert_file - NOMAD_CLIENT_KEY = var.key_file - NOMAD_TOKEN = var.nomad_token - } -} - -resource "enos_local_exec" "wait_for_nomad_api" { - environment = local.nomad_env - - scripts = [abspath("${path.module}/scripts/wait_for_nomad_api.sh")] -} - -resource "enos_local_exec" "run_tests" { - environment = merge( - local.nomad_env, { - SERVER_COUNT = var.server_count - CLIENT_COUNT = var.client_count - JOB_COUNT = var.jobs_count - ALLOC_COUNT = var.alloc_count - SERVERS = local.servers_addr - }) - - scripts = [ - abspath("${path.module}/scripts/servers.sh"), - abspath("${path.module}/scripts/clients.sh"), - abspath("${path.module}/scripts/jobs.sh"), - abspath("${path.module}/scripts/allocs.sh") - ] -} - -resource "enos_local_exec" "verify_versions" { - environment = merge( - local.nomad_env, { - SERVERS_VERSION = var.servers_version - CLIENTS_VERSION = var.clients_version - }) - - scripts = [ - abspath("${path.module}/scripts/versions.sh"), - ] -} - - diff --git a/enos/modules/test_cluster_health/scripts/allocs.sh b/enos/modules/test_cluster_health/scripts/allocs.sh deleted file mode 100755 index f8cc5abe5d9..00000000000 --- a/enos/modules/test_cluster_health/scripts/allocs.sh +++ /dev/null @@ -1,96 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -set -euo pipefail - -error_exit() { - printf 'Error: %s' "${1}" - exit 1 -} - -MAX_WAIT_TIME=120 -POLL_INTERVAL=2 - -elapsed_time=0 - -# Quality: nomad_allocs_status: A GET call to /v1/allocs returns the correct number of allocations and they are all running - -running_allocs= -allocs_length= - -checkAllocsCount() { - local allocs - allocs=$(nomad alloc status -json) || error_exit "Failed to check alloc status" - - running_allocs=$(echo "$allocs" | jq '[.[] | select(.ClientStatus == "running")]') - allocs_length=$(echo "$running_allocs" | jq 'length') \ - || error_exit "Invalid alloc status -json output" - - if [ "$allocs_length" -eq "$ALLOC_COUNT" ]; then - return 0 - fi - - return 1 -} - -while true; do - checkAllocsCount && break - - if [ "$elapsed_time" -ge "$MAX_WAIT_TIME" ]; then - error_exit "Some allocs are not running:\n$(nomad alloc status -json | jq -r '.[] | select(.ClientStatus != "running") | .ID')" - fi - - echo "Running allocs: $running_allocs, expected $ALLOC_COUNT. Waiting for $elapsed_time Retrying in $POLL_INTERVAL seconds..." - sleep $POLL_INTERVAL - elapsed_time=$((elapsed_time + POLL_INTERVAL)) -done - -echo "All ALLOCS are running." - -# Quality: nomad_reschedule_alloc: A POST / PUT call to /v1/allocation/:alloc_id/stop results in the stopped allocation being rescheduled - -random_index=$((RANDOM % allocs_length)) -random_alloc_id=$(echo "$running_allocs" | jq -r ".[${random_index}].ID") - -nomad alloc stop "$random_alloc_id" \ - || error_exit "Failed to stop allocation $random_alloc_id" - -echo "Waiting for allocation $random_alloc_id to reach 'complete' status..." -elapsed_time=0 - -while true; do - alloc_status=$(nomad alloc status -json "$random_alloc_id" | jq -r '.ClientStatus') - if [ "$alloc_status" == "complete" ]; then - break - fi - - if [ "$elapsed_time" -ge "$MAX_WAIT_TIME" ]; then - error_exit "Allocation $random_alloc_id did not reach 'complete' status within $MAX_WAIT_TIME seconds." - fi - - echo "Current status: $alloc_status, not 'complete'. Waiting for $elapsed_time Retrying in $POLL_INTERVAL seconds..." - sleep $POLL_INTERVAL - elapsed_time=$((elapsed_time + POLL_INTERVAL)) -done - -echo "Waiting for all the allocations to be running again" -elapsed_time=0 - -while true; do - # reset - running_allocs= - allocs_length= - - checkAllocsCount && break - - if [ "$elapsed_time" -ge "$MAX_WAIT_TIME" ]; then - error_exit "Expected $ALLOC_COUNT running allocations, found $running_allocs after $elapsed_time seconds" - fi - - echo "Expected $ALLOC_COUNT running allocations, found $running_allocs Retrying in $POLL_INTERVAL seconds..." - sleep $POLL_INTERVAL - elapsed_time=$((elapsed_time + POLL_INTERVAL)) -done - -echo "Alloc successfully rescheduled" diff --git a/enos/modules/test_cluster_health/scripts/clients.sh b/enos/modules/test_cluster_health/scripts/clients.sh deleted file mode 100755 index 3a5e480ff70..00000000000 --- a/enos/modules/test_cluster_health/scripts/clients.sh +++ /dev/null @@ -1,57 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -set -euo pipefail - -error_exit() { - printf 'Error: %s' "${1}" - exit 1 -} - -# Quality: "nomad_CLIENTS_status: A GET call to /v1/nodes returns the correct number of clients and they are all eligible and ready" - -MAX_WAIT_TIME=20 # Maximum wait time in seconds -POLL_INTERVAL=2 # Interval between status checks - -elapsed_time=0 -ready_clients= -last_error= - -checkReadyClients() { - local clients_length - - ready_clients=$(nomad node status -json | jq '[.[] | select(.Status == "ready")]') || - error_exit "Could not query node status" - - clients_length=$(echo "$ready_clients" | jq 'length') - if [ "$clients_length" -eq "$CLIENT_COUNT" ]; then - last_error= - return 0 - fi - - last_error="Unexpected number of ready clients: $clients_length" - return 1 -} - -checkEligibleClients() { - echo "$ready_clients" | jq -e ' - map(select(.SchedulingEligibility != "eligible")) | length == 0' && return 0 - - last_error=$(echo "$ready_clients" | jq -r ' - map(select(.SchedulingEligibility != "eligible")) | "\(.[].ID) is ineligible"') - return 1 -} - -while true; do - checkReadyClients && checkEligibleClients && break - - if [ "$elapsed_time" -ge "$MAX_WAIT_TIME" ]; then - error_exit "$last_error" - fi - - sleep "$POLL_INTERVAL" - elapsed_time=$((elapsed_time + POLL_INTERVAL)) -done - -echo "All clients are eligible and running." diff --git a/enos/modules/test_cluster_health/scripts/jobs.sh b/enos/modules/test_cluster_health/scripts/jobs.sh deleted file mode 100755 index 167a6650f1b..00000000000 --- a/enos/modules/test_cluster_health/scripts/jobs.sh +++ /dev/null @@ -1,24 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -set -euo pipefail - -error_exit() { - printf 'Error: %s' "${1}" - exit 1 -} - -# Quality: nomad_job_status: A GET call to /v1/jobs returns the correct number of jobs and they are all running. - -jobs_length=$(nomad job status| awk '$4 == "running" {count++} END {print count+0}') - -if [ -z "$jobs_length" ]; then - error_exit "No jobs found" -fi - -if [ "$jobs_length" -ne "$JOB_COUNT" ]; then - error_exit "The number of running jobs ($jobs_length) does not match the expected count ($JOB_COUNT)\n$(nomad job status | awk 'NR > 1 && $4 != "running" {print $4}')" -fi - -echo "All JOBS are running." diff --git a/enos/modules/test_cluster_health/scripts/servers.sh b/enos/modules/test_cluster_health/scripts/servers.sh deleted file mode 100755 index 39d6953897e..00000000000 --- a/enos/modules/test_cluster_health/scripts/servers.sh +++ /dev/null @@ -1,92 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -set -euo pipefail - -error_exit() { - printf 'Error: %s' "${1}" - exit 1 -} - -MAX_WAIT_TIME=40 -POLL_INTERVAL=2 - -elapsed_time=0 -last_error= -leader_last_index= -leader_last_term= - -# Quality: nomad_agent_info: A GET call to /v1/agent/members returns the correct number of running servers and they are all alive - -checkAutopilotHealth() { - local autopilotHealth servers_healthy leader - autopilotHealth=$(nomad operator autopilot health -json) || return 1 - servers_healthy=$(echo "$autopilotHealth" | - jq -r '[.Servers[] | select(.Healthy == true) | .ID] | length') - - if [ "$servers_healthy" -eq 0 ]; then - error_exit "No servers found." - fi - - if [ "$servers_healthy" -eq "$SERVER_COUNT" ]; then - leader=$(echo "$autopilotHealth" | jq -r '.Servers[] | select(.Leader == true)') - leader_last_index=$(echo "$leader" | jq -r '.LastIndex') - leader_last_term=$(echo "$leader" | jq -r '.LastTerm') - return 0 - fi - - last_error="Expected $SERVER_COUNT healthy servers but have $servers_healthy" - return 1 -} - -while true; do - checkAutopilotHealth && break - - if [ "$elapsed_time" -ge "$MAX_WAIT_TIME" ]; then - error_exit "$last_error after $elapsed_time seconds." - fi - - echo "$last_error after $elapsed_time seconds. Retrying in $POLL_INTERVAL seconds..." - sleep "$POLL_INTERVAL" - elapsed_time=$((elapsed_time + POLL_INTERVAL)) -done - -# Quality: nomad_agent_info_self: A GET call to /v1/agent/self against every server returns the same last_log_index as the leader" -# We use the leader's last log index to use as teh measure for the other servers. - -checkServerHealth() { - local ip node_info - ip=$1 - echo "Checking server health for $ip" - - node_info=$(nomad agent-info -address "https://$ip:4646" -json) \ - || error_exit "Unable to get info for node at $ip" - - last_log_index=$(echo "$node_info" | jq -r '.stats.raft.last_log_index') - last_log_term=$(echo "$node_info" | jq -r '.stats.raft.last_log_term') - - if [ "$last_log_index" -ge "$leader_last_index" ] && - [ "$last_log_term" -ge "$leader_last_term" ]; then - return 0 - fi - - last_error="Expected node at $ip to have last log index $leader_last_index and last term $leader_last_term, but found $last_log_index and $last_log_term" - return 1 -} - -for ip in $SERVERS; do - while true; do - checkServerHealth "$ip" && break - - if [ "$elapsed_time" -ge "$MAX_WAIT_TIME" ]; then - error_exit "$last_error after $elapsed_time seconds." - fi - - echo "$last_error after $elapsed_time seconds. Retrying in $POLL_INTERVAL seconds..." - sleep "$POLL_INTERVAL" - elapsed_time=$((elapsed_time + POLL_INTERVAL)) - done -done - -echo "All servers are alive and up to date." diff --git a/enos/modules/test_cluster_health/scripts/versions.sh b/enos/modules/test_cluster_health/scripts/versions.sh deleted file mode 100755 index 54f580c3eaf..00000000000 --- a/enos/modules/test_cluster_health/scripts/versions.sh +++ /dev/null @@ -1,52 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -set -euo pipefail - -error_exit() { - printf 'Error: %s' "${1}" - exit 1 -} - -# Servers version -server_versions=$(nomad server members -json | jq -r '[.[] | select(.Status == "alive") | .Tags.build] | unique') - -if [ "$(echo "$server_versions" | jq 'length')" -eq 0 ]; then - error_exit "Unable to get servers version" -fi - -if [ "$(echo "$server_versions" | jq 'length')" -ne 1 ]; then - error_exit "Servers are running different versions: $(echo "$server_versions" | jq -c '.')" -fi - -final_version=$(echo "$server_versions" | jq -r '.[0]'| xargs) -SERVERS_VERSION=$(echo "$SERVERS_VERSION" | xargs) - -if [ "$final_version" != "$SERVERS_VERSION" ]; then - error_exit "Servers are not running the correct version. Found: $final_version, Expected: $SERVERS_VERSION" -fi - -echo "All servers are running Nomad version $SERVERS_VERSION" - -# Clients version -clients_versions=$(nomad node status -json | jq -r '[.[] | select(.Status == "ready") | .Version] | unique') - - -if [ "$(echo "$clients_versions" | jq 'length')" -eq 0 ]; then - error_exit "Unable to get clients version" -fi - - -if [ "$(echo "$clients_versions" | jq 'length')" -ne 1 ]; then - error_exit "Clients are running different versions: $(echo "$clients_versions" | jq -c '.')" -fi - -final_version=$(echo "$clients_versions" | jq -r '.[0]'| xargs) -CLIENTS_VERSION=$(echo "$CLIENTS_VERSION" | xargs) - -if [ "$final_version" != "$CLIENTS_VERSION" ]; then - error_exit "Clients are not running the correct version. Found: $final_version, Expected: $CLIENTS_VERSION" -fi - -echo "All clients are running Nomad version $CLIENTS_VERSION" diff --git a/enos/modules/test_cluster_health/scripts/wait_for_nomad_api.sh b/enos/modules/test_cluster_health/scripts/wait_for_nomad_api.sh deleted file mode 100755 index cf38b0c6ab1..00000000000 --- a/enos/modules/test_cluster_health/scripts/wait_for_nomad_api.sh +++ /dev/null @@ -1,25 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -set -euo pipefail - -TIMEOUT=10 -INTERVAL=2 - -start_time=$(date +%s) - -while ! nomad server members > /dev/null 2>&1; do - echo "Waiting for Nomad API..." - - current_time=$(date +%s) - elapsed_time=$((current_time - start_time)) - if [ "$elapsed_time" -ge "$TIMEOUT" ]; then - echo "Error: Nomad API did not become available within $TIMEOUT seconds." - exit 1 - fi - - sleep "$INTERVAL" -done - -echo "Nomad API is available!" diff --git a/enos/modules/test_cluster_health/variables.tf b/enos/modules/test_cluster_health/variables.tf deleted file mode 100644 index f3f094ae20c..00000000000 --- a/enos/modules/test_cluster_health/variables.tf +++ /dev/null @@ -1,63 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -variable "nomad_addr" { - description = "The Nomad API HTTP address." - type = string - default = "http://localhost:4646" -} - -variable "ca_file" { - description = "A local file path to a PEM-encoded certificate authority used to verify the remote agent's certificate" - type = string -} - -variable "cert_file" { - description = "A local file path to a PEM-encoded certificate provided to the remote agent. If this is specified, key_file or key_pem is also required" - type = string -} - -variable "key_file" { - description = "A local file path to a PEM-encoded private key. This is required if cert_file or cert_pem is specified." - type = string -} - -variable "nomad_token" { - description = "The Secret ID of an ACL token to make requests with, for ACL-enabled clusters." - type = string -} - -variable "server_count" { - description = "The expected number of servers." - type = number -} - -variable "client_count" { - description = "The expected number of Ubuntu clients." - type = number -} - -variable "jobs_count" { - description = "The number of jobs that should be running in the cluster" - type = number -} - -variable "alloc_count" { - description = "Number of allocation that should be running in the cluster" - type = number -} - -variable "clients_version" { - description = "Binary version running on the clients" - type = string -} - -variable "servers_version" { - description = "Binary version running on the servers" - type = string -} - -variable "servers" { - description = "List of public IP address of the nomad servers" - type = list -} diff --git a/enos/modules/upgrade_clients/main.tf b/enos/modules/upgrade_clients/main.tf deleted file mode 100644 index a0ae56a39c9..00000000000 --- a/enos/modules/upgrade_clients/main.tf +++ /dev/null @@ -1,211 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -terraform { - required_providers { - enos = { - source = "registry.terraform.io/hashicorp-forge/enos" - } - } -} - -locals { - nomad_env = { - NOMAD_ADDR = var.nomad_addr - NOMAD_CACERT = var.ca_file - NOMAD_CLIENT_CERT = var.cert_file - NOMAD_CLIENT_KEY = var.key_file - NOMAD_TOKEN = var.nomad_token - } - - artifactory = { - username = var.artifactory_username - token = var.artifactory_token - url = var.artifact_url - sha256 = var.artifact_sha - } - - tls = { - ca_file = var.ca_file - cert_file = var.cert_file - key_file = var.key_file - } -} - -resource "enos_local_exec" "wait_for_nomad_api" { - environment = local.nomad_env - - scripts = [abspath("${path.module}/scripts/wait_for_nomad_api.sh")] -} - -#//////////////////////////////////////////////////////////////////////////////// -#// Upgrading the first client -#//////////////////////////////////////////////////////////////////////////////// - -resource "enos_local_exec" "set_metadata_on_first_client" { - depends_on = [enos_local_exec.wait_for_nomad_api] - - environment = merge( - local.nomad_env, - { - CLIENT_IP = var.clients[0] - } - ) - - scripts = [abspath("${path.module}/scripts/set_metadata.sh")] -} - -module upgrade_first_client { - depends_on = [enos_local_exec.set_metadata_on_first_client] - - source = "../upgrade_instance" - - nomad_addr = var.nomad_addr - tls = local.tls - nomad_token = var.nomad_token - platform = var.platform - instance_address = var.clients[0] - ssh_key_path = var.ssh_key_path - artifactory_release = local.artifactory -} - -resource "enos_local_exec" "verify_metadata_from_first_client" { - depends_on = [module.upgrade_first_client] - - environment = merge( - local.nomad_env, - { - CLIENT_IP = var.clients[0] - }) - - scripts = [abspath("${path.module}/scripts/verify_metadata.sh")] -} - -#//////////////////////////////////////////////////////////////////////////////// -#// Upgrading the second client -#//////////////////////////////////////////////////////////////////////////////// - -resource "enos_local_exec" "set_metadata_on_second_client" { - depends_on = [enos_local_exec.verify_metadata_from_first_client] - - environment = merge( - local.nomad_env, - { - CLIENT_IP = var.clients[1] - } - ) - - scripts = [abspath("${path.module}/scripts/set_metadata.sh")] -} - -module upgrade_second_client { - depends_on = [enos_local_exec.set_metadata_on_second_client] - - source = "../upgrade_instance" - - nomad_addr = var.nomad_addr - tls = local.tls - nomad_token = var.nomad_token - platform = var.platform - instance_address = var.clients[1] - ssh_key_path = var.ssh_key_path - artifactory_release = local.artifactory -} - -resource "enos_local_exec" "verify_metadata_from_second_client" { - depends_on = [module.upgrade_second_client] - - environment = merge( - local.nomad_env, - { - CLIENT_IP = var.clients[1] - }) - - scripts = [abspath("${path.module}/scripts/verify_metadata.sh")] -} - -#//////////////////////////////////////////////////////////////////////////////// -#// Upgrading the third client -#//////////////////////////////////////////////////////////////////////////////// - -resource "enos_local_exec" "set_metadata_on_third_client" { - depends_on = [enos_local_exec.verify_metadata_from_second_client] - - environment = merge( - local.nomad_env, - { - CLIENT_IP = var.clients[2] - } - ) - - scripts = [abspath("${path.module}/scripts/set_metadata.sh")] -} - -module upgrade_third_client { - depends_on = [enos_local_exec.set_metadata_on_third_client] - - source = "../upgrade_instance" - - nomad_addr = var.nomad_addr - tls = local.tls - nomad_token = var.nomad_token - platform = var.platform - instance_address = var.clients[2] - ssh_key_path = var.ssh_key_path - artifactory_release = local.artifactory -} - -resource "enos_local_exec" "verify_metadata_from_third_client" { - depends_on = [module.upgrade_third_client] - - environment = merge( - local.nomad_env, - { - CLIENT_IP = var.clients[2] - }) - - scripts = [abspath("${path.module}/scripts/verify_metadata.sh")] -} - -#//////////////////////////////////////////////////////////////////////////////// -#// Upgrading the forth client -#//////////////////////////////////////////////////////////////////////////////// - -resource "enos_local_exec" "set_metadata_on_forth_client" { - depends_on = [enos_local_exec.verify_metadata_from_third_client] - - environment = merge( - local.nomad_env, - { - CLIENT_IP = var.clients[3] - } - ) - - scripts = [abspath("${path.module}/scripts/set_metadata.sh")] -} - -module upgrade_forth_client { - depends_on = [enos_local_exec.set_metadata_on_forth_client] - - source = "../upgrade_instance" - - nomad_addr = var.nomad_addr - tls = local.tls - nomad_token = var.nomad_token - platform = var.platform - instance_address = var.clients[3] - ssh_key_path = var.ssh_key_path - artifactory_release = local.artifactory -} - -resource "enos_local_exec" "verify_metadata_from_forth_client" { - depends_on = [module.upgrade_forth_client] - - environment = merge( - local.nomad_env, - { - CLIENT_IP = var.clients[3] - }) - - scripts = [abspath("${path.module}/scripts/verify_metadata.sh")] -} diff --git a/enos/modules/upgrade_clients/scripts/set_metadata.sh b/enos/modules/upgrade_clients/scripts/set_metadata.sh deleted file mode 100755 index 45fb65981fd..00000000000 --- a/enos/modules/upgrade_clients/scripts/set_metadata.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -set -euo pipefail - -if ! client_id=$(nomad node status -address "http://$CLIENT_IP:4646" -self -json | jq '.ID' | tr -d '"'); then - echo "No client found at $CLIENT_IP" - exit 1 -fi - -if ! nomad node meta apply \ - -node-id "$client_id" node_ip="$CLIENT_IP" nomad_addr="$NOMAD_ADDR"; then - echo "Failed to set metadata for node: $client_id at $CLIENT_IP" - exit 1 -fi - -echo "Metadata updated in $client_id at $CLIENT_IP" diff --git a/enos/modules/upgrade_clients/scripts/verify_metadata.sh b/enos/modules/upgrade_clients/scripts/verify_metadata.sh deleted file mode 100755 index 898718b6960..00000000000 --- a/enos/modules/upgrade_clients/scripts/verify_metadata.sh +++ /dev/null @@ -1,68 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -set -euo pipefail - -error_exit() { - printf 'Error: %s' "${1}" - exit 1 -} - -MAX_WAIT_TIME=10 # Maximum wait time in seconds -POLL_INTERVAL=2 # Interval between status checks - -elapsed_time=0 -last_error= -client_id= - -checkClientReady() { - local client client_status - echo "Checking client health for $CLIENT_IP" - - client=$(nomad node status -address "https://$CLIENT_IP:4646" -self -json) || - error_exit "Unable to get info for node at $CLIENT_IP" - - client_status=$(echo "$client" | jq -r '.Status') - if [ "$client_status" == "ready" ]; then - client_id=$(echo "$client" | jq '.ID' | tr -d '"') - last_error= - return 0 - fi - - last_error="Node at $CLIENT_IP is ${client_status}, not ready" - return 1 -} - -while true; do - checkClientReady && break - if [ "$elapsed_time" -ge "$MAX_WAIT_TIME" ]; then - error_exit "$last_error within $elapsed_time seconds." - exit 1 - fi - - echo "$last_error within $elapsed_time seconds. Retrying in $POLL_INTERVAL seconds..." - sleep "$POLL_INTERVAL" - elapsed_time=$((elapsed_time + POLL_INTERVAL)) -done - -# Quality: "nomad_node_metadata: A GET call to /v1/node/:node-id returns the same node.Meta for each node before and after a node upgrade" - -if ! client_meta=$(nomad node meta read -json -node-id "$client_id"); then - echo "Failed to read metadata for node: $client_id" - exit 1 -fi - -meta_node_ip=$(echo "$client_meta" | jq -r '.Dynamic.node_ip' ) -if [ "$meta_node_ip" != "$CLIENT_IP" ]; then - echo "Wrong value returned for node_ip: $meta_node_ip" - exit 1 -fi - -meta_nomad_addr=$(echo "$client_meta" | jq -r '.Dynamic.nomad_addr' ) -if [ "$meta_nomad_addr" != "$NOMAD_ADDR" ]; then - echo "Wrong value returned for nomad_addr: $meta_nomad_addr" - exit 1 -fi - -echo "Metadata correct in $client_id at $CLIENT_IP" diff --git a/enos/modules/upgrade_clients/scripts/wait_for_nomad_api.sh b/enos/modules/upgrade_clients/scripts/wait_for_nomad_api.sh deleted file mode 100755 index 4e325446e09..00000000000 --- a/enos/modules/upgrade_clients/scripts/wait_for_nomad_api.sh +++ /dev/null @@ -1,25 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -set -xeuo pipefail - -TIMEOUT=10 -INTERVAL=2 - -start_time=$(date +%s) - -while ! nomad server members > /dev/null 2>&1; do - echo "Waiting for Nomad API..." - - current_time=$(date +%s) - elapsed_time=$((current_time - start_time)) - if [ "$elapsed_time" -ge "$TIMEOUT" ]; then - echo "Error: Nomad API did not become available within $TIMEOUT seconds." - exit 1 - fi - - sleep "$INTERVAL" -done - -echo "Nomad API is available!" diff --git a/enos/modules/upgrade_clients/variables.tf b/enos/modules/upgrade_clients/variables.tf deleted file mode 100644 index 71f0bdbb17c..00000000000 --- a/enos/modules/upgrade_clients/variables.tf +++ /dev/null @@ -1,73 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -variable "name" { - description = "Used to name various infrastructure components, must be unique per cluster" - default = "nomad-e2e" -} - -variable "nomad_addr" { - description = "The Nomad API HTTP address." - type = string - default = "http://localhost:4646" -} - -variable "ca_file" { - description = "A local file path to a PEM-encoded certificate authority used to verify the remote agent's certificate" - type = string -} - -variable "cert_file" { - description = "A local file path to a PEM-encoded certificate provided to the remote agent. If this is specified, key_file or key_pem is also required" - type = string -} - -variable "key_file" { - description = "A local file path to a PEM-encoded private key. This is required if cert_file or cert_pem is specified." - type = string -} - -variable "nomad_token" { - description = "The Secret ID of an ACL token to make requests with, for ACL-enabled clusters." - type = string - sensitive = true -} - -variable "platform" { - description = "Operative system of the instance to upgrade" - type = string - default = "linux" -} - -variable "ssh_key_path" { - description = "Path to the ssh private key that can be used to connect to the instance where the server is running" - type = string -} - -variable "clients" { - description = "List of public IP address of the nomad clients that will be updated" - type = list -} - -variable "artifactory_username" { - type = string - description = "The username to use when connecting to artifactory" - default = null -} - -variable "artifactory_token" { - type = string - description = "The token to use when connecting to artifactory" - default = null - sensitive = true -} - -variable "artifact_url" { - type = string - description = "The fully qualified Artifactory item URL" -} - -variable "artifact_sha" { - type = string - description = "The Artifactory item SHA 256 sum" -} diff --git a/enos/modules/upgrade_instance/.gitignore b/enos/modules/upgrade_instance/.gitignore deleted file mode 100644 index 01ae18969c9..00000000000 --- a/enos/modules/upgrade_instance/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -// Don't commit cluster snapshots -*.snap diff --git a/enos/modules/upgrade_instance/main.tf b/enos/modules/upgrade_instance/main.tf deleted file mode 100644 index ac1cf132587..00000000000 --- a/enos/modules/upgrade_instance/main.tf +++ /dev/null @@ -1,57 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -terraform { - required_providers { - enos = { - source = "registry.terraform.io/hashicorp-forge/enos" - } - } -} - -locals { - binary_destination = var.platform == "windows" ? "C:/opt/" : "/usr/local/bin/" - ssh_user = var.platform == "windows" ? "Administrator" : "ubuntu" - ssh_config = { - host = var.instance_address - private_key_path = var.ssh_key_path - user = local.ssh_user - } -} - -resource "enos_bundle_install" "nomad" { - destination = local.binary_destination - - artifactory = var.artifactory_release - - transport = { - ssh = local.ssh_config - } -} - -resource "enos_remote_exec" "restart_linux_services" { - count = var.platform == "linux" ? 1 : 0 - depends_on = [enos_bundle_install.nomad] - - - transport = { - ssh = local.ssh_config - } - - inline = [ - "sudo systemctl restart nomad", - ] -} - -resource "enos_remote_exec" "restart_windows_services" { - count = var.platform == "windows" ? 1 : 0 - depends_on = [enos_bundle_install.nomad] - - transport = { - ssh = local.ssh_config - } - - inline = [ - "powershell Restart-Service Nomad" - ] -} diff --git a/enos/modules/upgrade_instance/variables.tf b/enos/modules/upgrade_instance/variables.tf deleted file mode 100644 index 186ad5ab367..00000000000 --- a/enos/modules/upgrade_instance/variables.tf +++ /dev/null @@ -1,50 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -variable "nomad_addr" { - description = "The Nomad API HTTP address of the instance being upgraded." - type = string - default = "http://localhost:4646" -} - -variable "nomad_token" { - description = "The Secret ID of an ACL token to make requests with, for ACL-enabled clusters." - type = string -} - -variable "platform" { - description = "Operating system of the instance to upgrade" - type = string - default = "linux" -} - -variable "instance_address" { - description = "Public IP address of the instance that will be updated" - type = string -} - -variable "ssh_key_path" { - description = "Path to the ssh private key that can be used to connect to the instance where the server is running" - type = string -} - -variable "artifactory_release" { - type = object({ - username = string - token = string - url = string - sha256 = string - }) - description = "The Artifactory release information to install Nomad artifacts from Artifactory" - default = null -} - -variable "tls" { - type = object({ - ca_file = string - cert_file = string - key_file = string - }) - description = "Paths to tls keys and certificates for Nomad CLI" - default = null -} diff --git a/enos/modules/upgrade_servers/main.tf b/enos/modules/upgrade_servers/main.tf deleted file mode 100644 index c766c1aa9e8..00000000000 --- a/enos/modules/upgrade_servers/main.tf +++ /dev/null @@ -1,157 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -terraform { - required_providers { - enos = { - source = "registry.terraform.io/hashicorp-forge/enos" - } - } -} - -locals { - nomad_env = { - NOMAD_ADDR = var.nomad_addr - NOMAD_CACERT = var.ca_file - NOMAD_CLIENT_CERT = var.cert_file - NOMAD_CLIENT_KEY = var.key_file - NOMAD_TOKEN = var.nomad_token - SERVERS = join(" ", var.servers) - } - - artifactory = { - username = var.artifactory_username - token = var.artifactory_token - url = var.artifact_url - sha256 = var.artifact_sha - } - - tls = { - ca_file = var.ca_file - cert_file = var.cert_file - key_file = var.key_file - } -} - -resource "random_pet" "upgrade" { -} - -resource "enos_local_exec" "wait_for_leader" { - environment = local.nomad_env - - scripts = [abspath("${path.module}/scripts/wait_for_stable_cluster.sh")] -} - -//////////////////////////////////////////////////////////////////////////////// -// Upgrading the first server -//////////////////////////////////////////////////////////////////////////////// -// Taking a snapshot forces the cluster to store a new snapshot that will be -// used to restore the cluster after the restart, because it will be the most -// recent available, the resulting file wont be used.. -resource "enos_local_exec" "take_first_cluster_snapshot" { - depends_on = [enos_local_exec.wait_for_leader] - - environment = local.nomad_env - - inline = [ - "nomad operator snapshot save -stale -address https://${var.servers[0]}:4646 ${random_pet.upgrade.id}-0.snap", - ] -} - -module upgrade_first_server { - depends_on = [enos_local_exec.take_first_cluster_snapshot] - - source = "../upgrade_instance" - - nomad_addr = var.nomad_addr - tls = local.tls - nomad_token = var.nomad_token - platform = var.platform - instance_address = var.servers[0] - ssh_key_path = var.ssh_key_path - artifactory_release = local.artifactory -} - -resource "enos_local_exec" "first_leader_verification" { - depends_on = [module.upgrade_first_server] - - environment = local.nomad_env - - scripts = [abspath("${path.module}/scripts/wait_for_stable_cluster.sh")] -} - -//////////////////////////////////////////////////////////////////////////////// -// Upgrading the second server -//////////////////////////////////////////////////////////////////////////////// -// Taking a snapshot forces the cluster to store a new snapshot that will be -// used to restore the cluster after the restart, because it will be the most -// recent available, the resulting file wont be used.. -resource "enos_local_exec" "take_second_cluster_snapshot" { - depends_on = [enos_local_exec.first_leader_verification] - - environment = local.nomad_env - - inline = [ - "nomad operator snapshot save -stale -address https://${var.servers[1]}:4646 ${random_pet.upgrade.id}-1.snap", - ] -} - -module upgrade_second_server { - depends_on = [enos_local_exec.take_second_cluster_snapshot] - - source = "../upgrade_instance" - - nomad_addr = var.nomad_addr - tls = local.tls - nomad_token = var.nomad_token - platform = var.platform - instance_address = var.servers[1] - ssh_key_path = var.ssh_key_path - artifactory_release = local.artifactory -} - -resource "enos_local_exec" "second_leader_verification" { - depends_on = [module.upgrade_second_server] - - environment = local.nomad_env - - scripts = [abspath("${path.module}/scripts/wait_for_stable_cluster.sh")] -} - -//////////////////////////////////////////////////////////////////////////////// -// Upgrading the third server -//////////////////////////////////////////////////////////////////////////////// -// Taking a snapshot forces the cluster to store a new snapshot that will be -// used to restore the cluster after the restart, because it will be the most -// recent available, the resulting file wont be used. -resource "enos_local_exec" "take_third_cluster_snapshot" { - depends_on = [enos_local_exec.first_leader_verification] - - environment = local.nomad_env - - inline = [ - "nomad operator snapshot save -stale -address https://${var.servers[2]}:4646 ${random_pet.upgrade.id}-1.snap", - ] -} - -module upgrade_third_server { - depends_on = [enos_local_exec.take_third_cluster_snapshot] - - source = "../upgrade_instance" - - nomad_addr = var.nomad_addr - tls = local.tls - nomad_token = var.nomad_token - platform = var.platform - instance_address = var.servers[2] - ssh_key_path = var.ssh_key_path - artifactory_release = local.artifactory -} - -resource "enos_local_exec" "last_leader_verification" { - depends_on = [module.upgrade_third_server] - - environment = local.nomad_env - - scripts = [abspath("${path.module}/scripts/wait_for_stable_cluster.sh")] -} diff --git a/enos/modules/upgrade_servers/scripts/wait_for_stable_cluster.sh b/enos/modules/upgrade_servers/scripts/wait_for_stable_cluster.sh deleted file mode 100755 index fbe93181aee..00000000000 --- a/enos/modules/upgrade_servers/scripts/wait_for_stable_cluster.sh +++ /dev/null @@ -1,83 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -set -euo pipefail - -error_exit() { - printf 'Error: %s' "${1}" - exit 1 -} - -MAX_WAIT_TIME=10 #40 -POLL_INTERVAL=2 - -elapsed_time=0 -last_config_index= -last_error= - -checkRaftConfiguration() { - local raftConfig leader - raftConfig=$(nomad operator api /v1/operator/raft/configuration) || return 1 - leader=$(echo "$raftConfig" | jq -r '[.Servers[] | select(.Leader == true)']) - - echo "$raftConfig" | jq '.' - echo "$leader" - if [ "$(echo "$leader" | jq 'length')" -eq 1 ]; then - last_config_index=$(echo "$raftConfig" | jq -r '.Index') - echo "last_config_index: $last_config_index" - return 0 - fi - - last_error="No leader found" - return 1 -} - -while true; do - checkRaftConfiguration && break - if [ "$elapsed_time" -ge "$MAX_WAIT_TIME" ]; then - error_exit "${last_error} after $elapsed_time seconds." - fi - - echo "${last_error} after $elapsed_time seconds. Retrying in $POLL_INTERVAL seconds..." - sleep "$POLL_INTERVAL" - elapsed_time=$((elapsed_time + POLL_INTERVAL)) -done - - -# reset timer -elapsed_time=0 -last_log_index= - -checkServerHealth() { - local ip node_info - ip=$1 - echo "Checking server health for $ip" - - node_info=$(nomad agent-info -address "https://$ip:4646" -json) \ - || error_exit "Unable to get info for node at $ip" - - last_log_index=$(echo "$node_info" | jq -r '.stats.raft.last_log_index') - if [ "$last_log_index" -ge "$last_config_index" ]; then - return 0 - fi - - last_error="Expected node at $ip to have last log index at least $last_config_index but found $last_log_index" - return 1 -} - -for ip in $SERVERS; do - while true; do - checkServerHealth "$ip" && break - - if [ "$elapsed_time" -ge "$MAX_WAIT_TIME" ]; then - error_exit "$last_error after $elapsed_time seconds." - fi - - echo "${last_error} after $elapsed_time seconds. Retrying in $POLL_INTERVAL seconds..." - sleep "$POLL_INTERVAL" - elapsed_time=$((elapsed_time + POLL_INTERVAL)) - done -done - -echo "All servers are alive and up to date." diff --git a/enos/modules/upgrade_servers/variables.tf b/enos/modules/upgrade_servers/variables.tf deleted file mode 100644 index 7b1a6eaad4a..00000000000 --- a/enos/modules/upgrade_servers/variables.tf +++ /dev/null @@ -1,73 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -variable "name" { - description = "Used to name various infrastructure components, must be unique per cluster" - default = "nomad-e2e" -} - -variable "nomad_addr" { - description = "The Nomad API HTTP address." - type = string - default = "http://localhost:4646" -} - -variable "ca_file" { - description = "A local file path to a PEM-encoded certificate authority used to verify the remote agent's certificate" - type = string -} - -variable "cert_file" { - description = "A local file path to a PEM-encoded certificate provided to the remote agent. If this is specified, key_file or key_pem is also required" - type = string -} - -variable "key_file" { - description = "A local file path to a PEM-encoded private key. This is required if cert_file or cert_pem is specified." - type = string -} - -variable "nomad_token" { - description = "The Secret ID of an ACL token to make requests with, for ACL-enabled clusters." - type = string - sensitive = true -} - -variable "platform" { - description = "Operative system of the instance to upgrade" - type = string - default = "linux" -} - -variable "ssh_key_path" { - description = "Path to the ssh private key that can be used to connect to the instance where the server is running" - type = string -} - -variable "servers" { - description = "List of public IP address of the nomad servers that will be updated" - type = list -} - -variable "artifactory_username" { - type = string - description = "The username to use when connecting to artifactory" - default = null -} - -variable "artifactory_token" { - type = string - description = "The token to use when connecting to artifactory" - default = null - sensitive = true -} - -variable "artifact_url" { - type = string - description = "The fully qualified Artifactory item URL" -} - -variable "artifact_sha" { - type = string - description = "The Artifactory item SHA 256 sum" -} diff --git a/go.mod b/go.mod index dd350244451..a2465a5a0fd 100644 --- a/go.mod +++ b/go.mod @@ -5,6 +5,7 @@ go 1.23 // Pinned dependencies are noted in github.com/hashicorp/nomad/issues/11826. replace ( github.com/Microsoft/go-winio => github.com/endocrimes/go-winio v0.4.13-0.20190628114223-fb47a8b41948 + github.com/armon/go-metrics => github.com/armon/go-metrics v0.0.0-20230509193637-d9ca9af9f1f9 github.com/hashicorp/hcl => github.com/hashicorp/hcl v1.0.1-0.20201016140508-a07e7d50bbee ) @@ -16,8 +17,11 @@ require ( github.com/Masterminds/sprig/v3 v3.3.0 github.com/Microsoft/go-winio v0.6.1 github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e + github.com/armon/go-metrics v0.5.3 + github.com/aws/aws-sdk-go-v2 v1.36.1 github.com/aws/aws-sdk-go-v2/config v1.29.6 github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.28 + github.com/aws/aws-sdk-go-v2/service/ecs v1.53.0 github.com/aws/smithy-go v1.22.2 github.com/container-storage-interface/spec v1.10.0 github.com/containerd/go-cni v1.1.12 @@ -63,7 +67,6 @@ require ( github.com/hashicorp/go-kms-wrapping/wrappers/gcpckms/v2 v2.0.12 github.com/hashicorp/go-kms-wrapping/wrappers/transit/v2 v2.0.12 github.com/hashicorp/go-memdb v1.3.4 - github.com/hashicorp/go-metrics v0.5.4 github.com/hashicorp/go-msgpack/v2 v2.1.2 github.com/hashicorp/go-multierror v1.1.1 github.com/hashicorp/go-netaddrs v0.1.0 @@ -82,10 +85,10 @@ require ( github.com/hashicorp/memberlist v0.5.3 github.com/hashicorp/net-rpc-msgpackrpc/v2 v2.0.0 github.com/hashicorp/nomad/api v0.0.0-20230103221135-ce00d683f9be - github.com/hashicorp/raft v1.7.2 + github.com/hashicorp/raft v1.7.1 github.com/hashicorp/raft-autopilot v0.1.6 - github.com/hashicorp/raft-boltdb/v2 v2.3.1 - github.com/hashicorp/serf v0.10.2 + github.com/hashicorp/raft-boltdb/v2 v2.3.0 + github.com/hashicorp/serf v0.10.2-0.20240320153621-5d32001edfaa github.com/hashicorp/vault/api v1.15.0 github.com/hashicorp/yamux v0.1.2 github.com/hpcloud/tail v1.0.1-0.20170814160653-37f427138745 @@ -180,10 +183,8 @@ require ( github.com/agext/levenshtein v1.2.1 // indirect github.com/apparentlymart/go-cidr v1.0.1 // indirect github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect - github.com/armon/go-metrics v0.4.1 // indirect github.com/armon/go-radix v1.0.0 // indirect github.com/aws/aws-sdk-go v1.55.6 // indirect - github.com/aws/aws-sdk-go-v2 v1.36.1 // indirect github.com/aws/aws-sdk-go-v2/credentials v1.17.59 // indirect github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.32 // indirect github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.32 // indirect @@ -243,6 +244,7 @@ require ( github.com/gorilla/mux v1.8.1 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-immutable-radix v1.3.1 // indirect + github.com/hashicorp/go-metrics v0.5.4 // indirect github.com/hashicorp/go-msgpack v1.1.6-0.20240304204939-8824e8ccc35f // indirect github.com/hashicorp/go-retryablehttp v0.7.7 // indirect github.com/hashicorp/go-rootcerts v1.0.2 // indirect @@ -253,8 +255,7 @@ require ( github.com/hashicorp/go-secure-stdlib/tlsutil v0.1.3 // indirect github.com/hashicorp/go-set/v2 v2.1.0 // indirect github.com/hashicorp/golang-lru v1.0.2 // indirect - github.com/hashicorp/mdns v1.0.5 // indirect - github.com/hashicorp/raft-boltdb v0.0.0-20250113192317-e8660f88bcc9 // indirect + github.com/hashicorp/mdns v1.0.4 // indirect github.com/hashicorp/vault/api/auth/kubernetes v0.5.0 // indirect github.com/hashicorp/vic v1.5.1-0.20190403131502-bbfe86ec9443 // indirect github.com/huandu/xstrings v1.5.0 // indirect diff --git a/go.sum b/go.sum index f51ae984071..7ce776ab50a 100644 --- a/go.sum +++ b/go.sum @@ -684,7 +684,6 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03 github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8= github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/DataDog/datadog-go v3.2.0+incompatible h1:qSG2N4FghB1He/r2mFrWKCaL7dXCilEuNEeAn20fdD4= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.25.0 h1:3c8yed4lgqTt+oTQ+JNMDo+F4xprBf+O/il4ZC0nRLw= @@ -737,9 +736,8 @@ github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e h1:QEF07wC0T1rKkctt1RINW/+RMTVmiwxETico2l3gxJA= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= -github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878/go.mod h1:3AMJUQhVx52RsWOnlkpikZr01T/yAVN2gn0861vByNg= -github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= -github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= +github.com/armon/go-metrics v0.0.0-20230509193637-d9ca9af9f1f9 h1:51N4T44k8crLrlHy1zgBKGdYKjzjquaXw/RPbq/bH+o= +github.com/armon/go-metrics v0.0.0-20230509193637-d9ca9af9f1f9/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= @@ -762,6 +760,8 @@ github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.32 h1:m1GeXHVMJsRsUAqG6H github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.32/go.mod h1:IitoQxGfaKdVLNg0hD8/DXmAqNy0H4K2H2Sf91ti8sI= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2 h1:Pg9URiobXy85kgFev3og2CuOZ8JZUBENF+dcgWBaYNk= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc= +github.com/aws/aws-sdk-go-v2/service/ecs v1.53.0 h1:TCQZX4ztlcWXAcZouKh9qJMcVaH/qTidFTfsvJwUI30= +github.com/aws/aws-sdk-go-v2/service/ecs v1.53.0/go.mod h1:Ghi1OWUv4+VMEULWiHsKH2gNA3KAcMoLWsvU0eRXvIA= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2 h1:D4oz8/CzT9bAEYtVhSBmFj2dNOtaHOtMKc2vHBwYizA= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2/go.mod h1:Za3IHqTQ+yNcRHxu1OFucBh0ACZT4j4VQFF0BqpZcLY= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.13 h1:SYVGSFQHlchIcy6e7x12bsrxClCXSP5et8cqVhL8cuw= @@ -1278,25 +1278,24 @@ github.com/hashicorp/hcl/v2 v2.20.2-0.20240517235513-55d9c02d147d/go.mod h1:62ZY github.com/hashicorp/hil v0.0.0-20210521165536-27a72121fd40 h1:ExwaL+hUy1ys2AWDbsbh/lxQS2EVCYxuj0LoyLTdB3Y= github.com/hashicorp/hil v0.0.0-20210521165536-27a72121fd40/go.mod h1:n2TSygSNwsLJ76m8qFXTSc7beTb+auJxYdqrnoqwZWE= github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY= -github.com/hashicorp/mdns v1.0.5 h1:1M5hW1cunYeoXOqHwEb/GBDDHAFo0Yqb/uz/beC6LbE= -github.com/hashicorp/mdns v1.0.5/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= +github.com/hashicorp/mdns v1.0.4 h1:sY0CMhFmjIPDMlTB+HfymFHCaYLhgifZ0QhjaYKD/UQ= +github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= github.com/hashicorp/memberlist v0.5.3 h1:tQ1jOCypD0WvMemw/ZhhtH+PWpzcftQvgCorLu0hndk= github.com/hashicorp/memberlist v0.5.3/go.mod h1:h60o12SZn/ua/j0B6iKAZezA4eDaGsIuPO70eOaJ6WE= github.com/hashicorp/net-rpc-msgpackrpc/v2 v2.0.0 h1:kBpVVl1sl3MaSrs97e0+pDQhSrqJv9gVbSUrPpVfl1w= github.com/hashicorp/net-rpc-msgpackrpc/v2 v2.0.0/go.mod h1:6pdNz0vo0mF0GvhwDG56O3N18qBrAz/XRIcfINfTbwo= -github.com/hashicorp/raft v1.1.0/go.mod h1:4Ak7FSPnuvmb0GV6vgIAJ4vYT4bek9bb6Q+7HVbyzqM= github.com/hashicorp/raft v1.2.0/go.mod h1:vPAJM8Asw6u8LxC3eJCUZmRP/E4QmUGE1R7g7k8sG/8= -github.com/hashicorp/raft v1.7.2 h1:pyvxhfJ4R8VIAlHKvLoKQWElZspsCVT6YWuxVxsPAgc= -github.com/hashicorp/raft v1.7.2/go.mod h1:DfvCGFxpAUPE0L4Uc8JLlTPtc3GzSbdH0MTJCLgnmJQ= +github.com/hashicorp/raft v1.7.1 h1:ytxsNx4baHsRZrhUcbt3+79zc4ly8qm7pi0393pSchY= +github.com/hashicorp/raft v1.7.1/go.mod h1:hUeiEwQQR/Nk2iKDD0dkEhklSsu3jcAcqvPzPoZSAEM= github.com/hashicorp/raft-autopilot v0.1.6 h1:C1q3RNF2FfXNZfHWbvVAu0QixaQK8K5pX4O5lh+9z4I= github.com/hashicorp/raft-autopilot v0.1.6/go.mod h1:Af4jZBwaNOI+tXfIqIdbcAnh/UyyqIMj/pOISIfhArw= github.com/hashicorp/raft-boltdb v0.0.0-20171010151810-6e5ba93211ea/go.mod h1:pNv7Wc3ycL6F5oOWn+tPGo2gWD4a5X+yp/ntwdKLjRk= -github.com/hashicorp/raft-boltdb v0.0.0-20250113192317-e8660f88bcc9 h1:DtRY4x+oreq0BTrrfF66XeCg6DPJuR2AL4Ejeipau/A= -github.com/hashicorp/raft-boltdb v0.0.0-20250113192317-e8660f88bcc9/go.mod h1:FLQZr+lEOtW/5JZQCqRihQOrmyqWRqpJ+pP1gjb8XTE= -github.com/hashicorp/raft-boltdb/v2 v2.3.1 h1:ackhdCNPKblmOhjEU9+4lHSJYFkJd6Jqyvj6eW9pwkc= -github.com/hashicorp/raft-boltdb/v2 v2.3.1/go.mod h1:n4S+g43dXF1tqDT+yzcXHhXM6y7MrlUd3TTwGRcUvQE= -github.com/hashicorp/serf v0.10.2 h1:m5IORhuNSjaxeljg5DeQVDlQyVkhRIjJDimbkCa8aAc= -github.com/hashicorp/serf v0.10.2/go.mod h1:T1CmSGfSeGfnfNy/w0odXQUR1rfECGd2Qdsp84DjOiY= +github.com/hashicorp/raft-boltdb v0.0.0-20230125174641-2a8082862702 h1:RLKEcCuKcZ+qp2VlaaZsYZfLOmIiuJNpEi48Rl8u9cQ= +github.com/hashicorp/raft-boltdb v0.0.0-20230125174641-2a8082862702/go.mod h1:nTakvJ4XYq45UXtn0DbwR4aU9ZdjlnIenpbs6Cd+FM0= +github.com/hashicorp/raft-boltdb/v2 v2.3.0 h1:fPpQR1iGEVYjZ2OELvUHX600VAK5qmdnDEv3eXOwZUA= +github.com/hashicorp/raft-boltdb/v2 v2.3.0/go.mod h1:YHukhB04ChJsLHLJEUD6vjFyLX2L3dsX3wPBZcX4tmc= +github.com/hashicorp/serf v0.10.2-0.20240320153621-5d32001edfaa h1:UXgK+AZPfeQ1vOXXXfBj7C7mZpWUgRFcMAKpyyYrYgU= +github.com/hashicorp/serf v0.10.2-0.20240320153621-5d32001edfaa/go.mod h1:RiISHML4PEb0ZN6S6uNW04TO8D6EUtTIOpCzzDnZeGk= github.com/hashicorp/vault/api v1.10.0/go.mod h1:jo5Y/ET+hNyz+JnKDt8XLAdKs+AM0G5W0Vp1IrFI8N8= github.com/hashicorp/vault/api v1.15.0 h1:O24FYQCWwhwKnF7CuSqP30S51rTV7vz1iACXE/pj5DA= github.com/hashicorp/vault/api v1.15.0/go.mod h1:+5YTO09JGn0u+b6ySD/LLVf8WkJCPLAL2Vkmrn2+CM8= @@ -1537,7 +1536,6 @@ github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSg github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= @@ -1552,7 +1550,6 @@ github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6T github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= @@ -1561,7 +1558,6 @@ github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9 github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= @@ -1839,7 +1835,6 @@ golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= diff --git a/helper/funcs.go b/helper/funcs.go index 2695540ffa7..e251328f697 100644 --- a/helper/funcs.go +++ b/helper/funcs.go @@ -9,7 +9,6 @@ import ( "maps" "math" "net/http" - "os" "path/filepath" "reflect" "regexp" @@ -526,50 +525,3 @@ func Merge[T comparable](a, b T) T { } return a } - -// FlattenMultierror takes a multierror and unwraps it if there's only one error -// in the output, otherwise returning the multierror or nil. -func FlattenMultierror(err error) error { - mErr, ok := err.(*multierror.Error) - if !ok { - return err - } - // note: mErr is a pointer so we still need to nil-check even after the cast - if mErr == nil { - return nil - } - if mErr.Len() == 1 { - return mErr.Errors[0] - } - return mErr.ErrorOrNil() -} - -// FindExecutableFiles looks in the provided path for executables and returns -// a map where keys are filenames and values are the absolute path. -func FindExecutableFiles(path string) (map[string]string, error) { - executables := make(map[string]string) - entries, err := os.ReadDir(path) - if err != nil { - return executables, err - } - for _, e := range entries { - i, err := e.Info() - if err != nil { - return executables, err - } - if !IsExecutable(i) { - continue - } - p := filepath.Join(path, i.Name()) - abs, err := filepath.Abs(p) - if err != nil { - return executables, err - } - executables[i.Name()] = abs - } - return executables, nil -} - -func IsExecutable(i os.FileInfo) bool { - return !i.IsDir() && i.Mode()&0o111 != 0 -} diff --git a/helper/funcs_test.go b/helper/funcs_test.go index 86e6fd4c8e8..4e1947f28d4 100644 --- a/helper/funcs_test.go +++ b/helper/funcs_test.go @@ -4,14 +4,12 @@ package helper import ( - "errors" "fmt" "maps" "reflect" "sort" "testing" - multierror "github.com/hashicorp/go-multierror" "github.com/hashicorp/go-set/v3" "github.com/shoenig/test/must" "github.com/stretchr/testify/require" @@ -485,46 +483,3 @@ func Test_SliceSetEq(t *testing.T) { must.True(t, SliceSetEq(a, b)) }) } - -func TestFlattenMultiError(t *testing.T) { - - err := FlattenMultierror(nil) - must.Nil(t, err) - - err0 := errors.New("oh no!") - err = FlattenMultierror(err0) - must.Eq(t, `oh no!`, err.Error()) - - var mErr0 *multierror.Error - err = FlattenMultierror(mErr0) - must.Nil(t, err) - - mErr0 = multierror.Append(mErr0, func() error { - return nil - }()) - err = FlattenMultierror(mErr0) - must.Nil(t, err) - - var mErr1 *multierror.Error - mErr1 = multierror.Append(mErr1, func() error { - var mErr *multierror.Error - mErr = multierror.Append(mErr, errors.New("inner1")) - return mErr - }()) - err = FlattenMultierror(mErr1) - must.Eq(t, `inner1`, err.Error()) - - var mErr2 *multierror.Error - mErr2 = multierror.Append(mErr2, func() error { - var mErr *multierror.Error - mErr = multierror.Append(mErr, errors.New("inner1")) - mErr = multierror.Append(mErr, errors.New("inner2")) - return mErr - }()) - err = FlattenMultierror(mErr2) - must.Eq(t, `2 errors occurred: - * inner1 - * inner2 - -`, err.Error()) -} diff --git a/helper/pluginutils/loader/init.go b/helper/pluginutils/loader/init.go index 9bcbc6081aa..7d3003627db 100644 --- a/helper/pluginutils/loader/init.go +++ b/helper/pluginutils/loader/init.go @@ -264,15 +264,10 @@ func (l *PluginLoader) fingerprintPlugins(plugins []os.FileInfo, configs map[str fingerprinted := make(map[PluginID]*pluginInfo, len(plugins)) for _, p := range plugins { name := cleanPluginExecutable(p.Name()) - - // Use the cleaned plugin name to check whether it is configured by the - // operator for use. If it is not, skip loading it and log a message, so - // operators can easily see this. c, ok := configs[name] if !ok { - l.logger.Warn("plugin not referenced in the agent configuration file, loading skipped", - "plugin", name) - continue + // COMPAT(1.7): Skip executing unconfigured plugins in 1.8 or later. + l.logger.Warn("plugin not referenced in the agent configuration file, future versions of Nomad will not load this plugin until the agent configuration is updated", "plugin", name) } info, err := l.fingerprintPlugin(p, c) if err != nil { diff --git a/helper/pluginutils/loader/loader_test.go b/helper/pluginutils/loader/loader_test.go index 5b0f4ce01ab..939ee7b7104 100644 --- a/helper/pluginutils/loader/loader_test.go +++ b/helper/pluginutils/loader/loader_test.go @@ -102,9 +102,8 @@ func TestPluginLoader_External(t *testing.T) { ci.Parallel(t) require := require.New(t) - // Create three plugins. Only two will be referenced within the agent config - // meaning the third should not be loaded. - plugins := []string{"mock-device", "mock-device-2", "mock-device-3"} + // Create two plugins + plugins := []string{"mock-device", "mock-device-2"} pluginVersions := []string{"v0.0.1", "v0.0.2"} h := newHarness(t, plugins) @@ -134,8 +133,6 @@ func TestPluginLoader_External(t *testing.T) { require.NoError(err) // Get the catalog and assert we have the two plugins - // - // Note: mock-device-3 is ignored because it does not have a related config. c := l.Catalog() require.Len(c, 1) require.Contains(c, base.PluginTypeDevice) diff --git a/helper/raftutil/msgtypes.go b/helper/raftutil/msgtypes.go index d70e2aea211..615881173c9 100644 --- a/helper/raftutil/msgtypes.go +++ b/helper/raftutil/msgtypes.go @@ -68,7 +68,4 @@ var msgTypeNames = map[structs.MessageType]string{ structs.WrappedRootKeysUpsertRequestType: "WrappedRootKeysUpsertRequestType", structs.NamespaceUpsertRequestType: "NamespaceUpsertRequestType", structs.NamespaceDeleteRequestType: "NamespaceDeleteRequestType", - structs.HostVolumeRegisterRequestType: "HostVolumeRegisterRequestType", - structs.HostVolumeDeleteRequestType: "HostVolumeDeleteRequestType", - structs.TaskGroupHostVolumeClaimDeleteRequestType: "TaskGroupHostVolumeClaimDeleteRequestType", } diff --git a/helper/users/lookup_windows_test.go b/helper/users/lookup_windows_test.go index 4ef11b67c15..0c8e5d70d47 100644 --- a/helper/users/lookup_windows_test.go +++ b/helper/users/lookup_windows_test.go @@ -11,7 +11,6 @@ import ( "path/filepath" "testing" - "github.com/hashicorp/nomad/helper/testlog" "github.com/shoenig/test/must" ) diff --git a/lib/auth/jwt/validator.go b/lib/auth/jwt/validator.go index 9f5050de76b..b1ebaacf8ce 100644 --- a/lib/auth/jwt/validator.go +++ b/lib/auth/jwt/validator.go @@ -10,8 +10,8 @@ import ( "slices" "time" + "github.com/armon/go-metrics" "github.com/hashicorp/cap/jwt" - metrics "github.com/hashicorp/go-metrics/compat" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/nomad/structs" ) diff --git a/nomad/acl_endpoint.go b/nomad/acl_endpoint.go index 21815e0b631..2551421cc30 100644 --- a/nomad/acl_endpoint.go +++ b/nomad/acl_endpoint.go @@ -14,10 +14,10 @@ import ( "strings" "time" + "github.com/armon/go-metrics" capOIDC "github.com/hashicorp/cap/oidc" "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-memdb" - metrics "github.com/hashicorp/go-metrics/compat" "github.com/hashicorp/go-set/v3" policy "github.com/hashicorp/nomad/acl" diff --git a/nomad/alloc_endpoint.go b/nomad/alloc_endpoint.go index d0b47640fd6..9e8678eb174 100644 --- a/nomad/alloc_endpoint.go +++ b/nomad/alloc_endpoint.go @@ -8,9 +8,9 @@ import ( "net/http" "time" + "github.com/armon/go-metrics" "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-memdb" - metrics "github.com/hashicorp/go-metrics/compat" "github.com/hashicorp/nomad/acl" "github.com/hashicorp/nomad/helper/pointer" diff --git a/nomad/auth/auth.go b/nomad/auth/auth.go index a2d88a5074c..38fd6445562 100644 --- a/nomad/auth/auth.go +++ b/nomad/auth/auth.go @@ -12,8 +12,8 @@ import ( "strings" "time" + "github.com/armon/go-metrics" "github.com/hashicorp/go-hclog" - metrics "github.com/hashicorp/go-metrics/compat" "github.com/hashicorp/nomad/acl" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/nomad/state" diff --git a/nomad/autopilot.go b/nomad/autopilot.go index a4c7401c9de..0d5b59360f6 100644 --- a/nomad/autopilot.go +++ b/nomad/autopilot.go @@ -8,7 +8,7 @@ import ( "fmt" "strconv" - metrics "github.com/hashicorp/go-metrics/compat" + metrics "github.com/armon/go-metrics" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/raft" diff --git a/nomad/blocked_evals.go b/nomad/blocked_evals.go index 7368bf30b44..57706c195b2 100644 --- a/nomad/blocked_evals.go +++ b/nomad/blocked_evals.go @@ -7,8 +7,8 @@ import ( "sync" "time" + "github.com/armon/go-metrics" "github.com/hashicorp/go-hclog" - metrics "github.com/hashicorp/go-metrics/compat" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/nomad/structs" ) diff --git a/nomad/client_alloc_endpoint.go b/nomad/client_alloc_endpoint.go index e3d9eb571b9..9ed17e5fa0a 100644 --- a/nomad/client_alloc_endpoint.go +++ b/nomad/client_alloc_endpoint.go @@ -11,8 +11,8 @@ import ( "net/http" "time" + "github.com/armon/go-metrics" "github.com/hashicorp/go-hclog" - metrics "github.com/hashicorp/go-metrics/compat" "github.com/hashicorp/go-msgpack/v2/codec" "github.com/hashicorp/nomad/acl" diff --git a/nomad/client_csi_endpoint.go b/nomad/client_csi_endpoint.go index 4052b380d26..8f2faf943f1 100644 --- a/nomad/client_csi_endpoint.go +++ b/nomad/client_csi_endpoint.go @@ -10,9 +10,9 @@ import ( "strings" "time" + metrics "github.com/armon/go-metrics" log "github.com/hashicorp/go-hclog" memdb "github.com/hashicorp/go-memdb" - metrics "github.com/hashicorp/go-metrics/compat" cstructs "github.com/hashicorp/nomad/client/structs" "github.com/hashicorp/nomad/nomad/structs" diff --git a/nomad/client_fs_endpoint.go b/nomad/client_fs_endpoint.go index 22d1cdd7f4d..165d88b70a6 100644 --- a/nomad/client_fs_endpoint.go +++ b/nomad/client_fs_endpoint.go @@ -11,8 +11,8 @@ import ( "strings" "time" + metrics "github.com/armon/go-metrics" log "github.com/hashicorp/go-hclog" - metrics "github.com/hashicorp/go-metrics/compat" cstructs "github.com/hashicorp/nomad/client/structs" "github.com/hashicorp/nomad/helper/pointer" diff --git a/nomad/client_host_volume_endpoint.go b/nomad/client_host_volume_endpoint.go deleted file mode 100644 index 5cf2a3fa03b..00000000000 --- a/nomad/client_host_volume_endpoint.go +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package nomad - -import ( - "fmt" - "time" - - log "github.com/hashicorp/go-hclog" - metrics "github.com/hashicorp/go-metrics/compat" - cstructs "github.com/hashicorp/nomad/client/structs" - "github.com/hashicorp/nomad/nomad/structs" -) - -// ClientHostVolume is the client RPC endpoint for host volumes -type ClientHostVolume struct { - srv *Server - ctx *RPCContext - logger log.Logger -} - -func NewClientHostVolumeEndpoint(srv *Server, ctx *RPCContext) *ClientHostVolume { - return &ClientHostVolume{srv: srv, ctx: ctx, logger: srv.logger.Named("client_host_volume")} -} - -func (c *ClientHostVolume) Create(args *cstructs.ClientHostVolumeCreateRequest, reply *cstructs.ClientHostVolumeCreateResponse) error { - defer metrics.MeasureSince([]string{"nomad", "client_host_node", "create"}, time.Now()) - return c.sendVolumeRPC( - args.NodeID, - "HostVolume.Create", - "ClientHostVolume.Create", - structs.RateMetricWrite, - args, - reply, - ) -} - -func (c *ClientHostVolume) Register(args *cstructs.ClientHostVolumeRegisterRequest, reply *cstructs.ClientHostVolumeRegisterResponse) error { - defer metrics.MeasureSince([]string{"nomad", "client_host_node", "register"}, time.Now()) - return c.sendVolumeRPC( - args.NodeID, - "HostVolume.Register", - "ClientHostVolume.Register", - structs.RateMetricWrite, - args, - reply, - ) -} - -func (c *ClientHostVolume) Delete(args *cstructs.ClientHostVolumeDeleteRequest, reply *cstructs.ClientHostVolumeDeleteResponse) error { - defer metrics.MeasureSince([]string{"nomad", "client_host_volume", "delete"}, time.Now()) - return c.sendVolumeRPC( - args.NodeID, - "HostVolume.Delete", - "ClientHostVolume.Delete", - structs.RateMetricWrite, - args, - reply, - ) -} - -func (c *ClientHostVolume) sendVolumeRPC(nodeID, method, fwdMethod, op string, args any, reply any) error { - // client requests aren't RequestWithIdentity, so we use a placeholder here - // to populate the identity data for metrics - identityReq := &structs.GenericRequest{} - aclObj, err := c.srv.AuthenticateServerOnly(c.ctx, identityReq) - c.srv.MeasureRPCRate("client_host_volume", op, identityReq) - - if err != nil || !aclObj.AllowServerOp() { - return structs.ErrPermissionDenied - } - - // Make sure Node is valid and new enough to support RPC - snap, err := c.srv.State().Snapshot() - if err != nil { - return err - } - - _, err = getNodeForRpc(snap, nodeID) - if err != nil { - return err - } - - // Get the connection to the client - state, ok := c.srv.getNodeConn(nodeID) - if !ok { - return findNodeConnAndForward(c.srv, nodeID, fwdMethod, args, reply) - } - - // Make the RPC - if err := NodeRpc(state.Session, method, args, reply); err != nil { - return fmt.Errorf("%s error: %w", method, err) - } - return nil -} diff --git a/nomad/client_meta_endpoint.go b/nomad/client_meta_endpoint.go index 1fadd40c73a..dbf577b6d38 100644 --- a/nomad/client_meta_endpoint.go +++ b/nomad/client_meta_endpoint.go @@ -6,8 +6,8 @@ package nomad import ( "time" + metrics "github.com/armon/go-metrics" log "github.com/hashicorp/go-hclog" - metrics "github.com/hashicorp/go-metrics/compat" "github.com/hashicorp/nomad/nomad/structs" nstructs "github.com/hashicorp/nomad/nomad/structs" ) diff --git a/nomad/client_stats_endpoint.go b/nomad/client_stats_endpoint.go index 4393a8ca0fa..d74bcdedbdc 100644 --- a/nomad/client_stats_endpoint.go +++ b/nomad/client_stats_endpoint.go @@ -6,8 +6,8 @@ package nomad import ( "time" + metrics "github.com/armon/go-metrics" log "github.com/hashicorp/go-hclog" - metrics "github.com/hashicorp/go-metrics/compat" nstructs "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/client/structs" diff --git a/nomad/consul.go b/nomad/consul.go index 2feb99d936e..19a8eeed0da 100644 --- a/nomad/consul.go +++ b/nomad/consul.go @@ -13,9 +13,9 @@ import ( "sync" "time" + "github.com/armon/go-metrics" "github.com/hashicorp/consul/api" "github.com/hashicorp/go-hclog" - metrics "github.com/hashicorp/go-metrics/compat" "github.com/hashicorp/nomad/command/agent/consul" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/nomad/structs" diff --git a/nomad/csi_endpoint.go b/nomad/csi_endpoint.go index fc86e761745..6f7bd2caa87 100644 --- a/nomad/csi_endpoint.go +++ b/nomad/csi_endpoint.go @@ -11,10 +11,10 @@ import ( "strings" "time" + "github.com/armon/go-metrics" "github.com/dustin/go-humanize" "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-memdb" - metrics "github.com/hashicorp/go-metrics/compat" "github.com/hashicorp/go-multierror" "github.com/hashicorp/nomad/acl" diff --git a/nomad/deployment_endpoint.go b/nomad/deployment_endpoint.go index 13291b6de3d..e775f07a7c0 100644 --- a/nomad/deployment_endpoint.go +++ b/nomad/deployment_endpoint.go @@ -8,9 +8,9 @@ import ( "net/http" "time" + "github.com/armon/go-metrics" "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-memdb" - metrics "github.com/hashicorp/go-metrics/compat" "github.com/hashicorp/nomad/acl" "github.com/hashicorp/nomad/nomad/state" diff --git a/nomad/eval_broker.go b/nomad/eval_broker.go index 3205eb55245..8d99eac45ef 100644 --- a/nomad/eval_broker.go +++ b/nomad/eval_broker.go @@ -13,7 +13,7 @@ import ( "sync" "time" - metrics "github.com/hashicorp/go-metrics/compat" + metrics "github.com/armon/go-metrics" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/helper/broker" diff --git a/nomad/eval_endpoint.go b/nomad/eval_endpoint.go index a0d5e3159bf..018115355f1 100644 --- a/nomad/eval_endpoint.go +++ b/nomad/eval_endpoint.go @@ -9,10 +9,10 @@ import ( "net/http" "time" + "github.com/armon/go-metrics" "github.com/hashicorp/go-bexpr" "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-memdb" - metrics "github.com/hashicorp/go-metrics/compat" "github.com/hashicorp/go-multierror" "github.com/hashicorp/go-version" diff --git a/nomad/event_endpoint.go b/nomad/event_endpoint.go index 13146ff95fb..8d8256e1686 100644 --- a/nomad/event_endpoint.go +++ b/nomad/event_endpoint.go @@ -271,18 +271,6 @@ func validateNsOp(namespace string, topics map[structs.Topic][]string, aclObj *a if ok := aclObj.AllowNsOp(namespace, acl.NamespaceCapabilityReadJob); !ok { return structs.ErrPermissionDenied } - case structs.TopicHostVolume: - if ok := aclObj.AllowNsOp(namespace, acl.NamespaceCapabilityHostVolumeRead); !ok { - return structs.ErrPermissionDenied - } - case structs.TopicCSIVolume: - if ok := aclObj.AllowNsOp(namespace, acl.NamespaceCapabilityCSIReadVolume); !ok { - return structs.ErrPermissionDenied - } - case structs.TopicCSIPlugin: - if ok := aclObj.AllowNsOp(namespace, acl.NamespaceCapabilityReadJob); !ok { - return structs.ErrPermissionDenied - } case structs.TopicNode: if ok := aclObj.AllowNodeRead(); !ok { return structs.ErrPermissionDenied diff --git a/nomad/event_endpoint_test.go b/nomad/event_endpoint_test.go index b75941445c1..58a37e74ada 100644 --- a/nomad/event_endpoint_test.go +++ b/nomad/event_endpoint_test.go @@ -409,66 +409,6 @@ func TestEventStream_validateNsOp(t *testing.T) { Management: false, ExpectedErr: structs.ErrPermissionDenied, }, - { - Name: "read host volumes - correct policy and ns", - Topics: map[structs.Topic][]string{ - structs.TopicHostVolume: {"*"}, - }, - Policy: mock.NamespacePolicy("foo", "", []string{acl.NamespaceCapabilityHostVolumeRead}), - Namespace: "foo", - Management: false, - ExpectedErr: nil, - }, - { - Name: "read host volumes - incorrect policy or ns", - Topics: map[structs.Topic][]string{ - structs.TopicHostVolume: {"*"}, - }, - Policy: mock.NamespacePolicy("foo", "", []string{acl.NamespaceCapabilityReadJob}), - Namespace: "foo", - Management: false, - ExpectedErr: structs.ErrPermissionDenied, - }, - { - Name: "read csi volumes - correct policy and ns", - Topics: map[structs.Topic][]string{ - structs.TopicCSIVolume: {"*"}, - }, - Policy: mock.NamespacePolicy("foo", "", []string{acl.NamespaceCapabilityCSIReadVolume}), - Namespace: "foo", - Management: false, - ExpectedErr: nil, - }, - { - Name: "read csi volumes - incorrect policy or ns", - Topics: map[structs.Topic][]string{ - structs.TopicCSIVolume: {"*"}, - }, - Policy: mock.NamespacePolicy("foo", "", []string{acl.NamespaceCapabilityReadJob}), - Namespace: "foo", - Management: false, - ExpectedErr: structs.ErrPermissionDenied, - }, - { - Name: "read csi plugin - correct policy and ns", - Topics: map[structs.Topic][]string{ - structs.TopicCSIPlugin: {"*"}, - }, - Policy: mock.NamespacePolicy("foo", "", []string{acl.NamespaceCapabilityReadJob}), - Namespace: "foo", - Management: false, - ExpectedErr: nil, - }, - { - Name: "read csi plugin - incorrect policy or ns", - Topics: map[structs.Topic][]string{ - structs.TopicCSIPlugin: {"*"}, - }, - Policy: mock.NamespacePolicy("foo", "", []string{acl.NamespaceCapabilityReadJob}), - Namespace: "bar", - Management: false, - ExpectedErr: structs.ErrPermissionDenied, - }, } for _, tc := range cases { diff --git a/nomad/fsm.go b/nomad/fsm.go index 8ba018ea34b..2ae3a2341b0 100644 --- a/nomad/fsm.go +++ b/nomad/fsm.go @@ -10,10 +10,10 @@ import ( "sync" "time" + "github.com/armon/go-metrics" "github.com/hashicorp/go-bexpr" "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-memdb" - metrics "github.com/hashicorp/go-metrics/compat" "github.com/hashicorp/go-msgpack/v2/codec" "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/helper/uuid" @@ -57,7 +57,6 @@ const ( NodePoolSnapshot SnapshotType = 28 JobSubmissionSnapshot SnapshotType = 29 RootKeySnapshot SnapshotType = 30 - HostVolumeSnapshot SnapshotType = 31 // TimeTableSnapshot // Deprecated: Nomad no longer supports TimeTable snapshots since 1.9.2 @@ -103,7 +102,6 @@ var snapshotTypeStrings = map[SnapshotType]string{ NodePoolSnapshot: "NodePool", JobSubmissionSnapshot: "JobSubmission", RootKeySnapshot: "WrappedRootKeys", - HostVolumeSnapshot: "HostVolumeSnapshot", NamespaceSnapshot: "Namespace", } @@ -383,12 +381,9 @@ func (n *nomadFSM) Apply(log *raft.Log) interface{} { return n.applyACLBindingRulesDelete(buf[1:], log.Index) case structs.WrappedRootKeysUpsertRequestType: return n.applyWrappedRootKeysUpsert(msgType, buf[1:], log.Index) + case structs.JobVersionTagRequestType: return n.applyJobVersionTag(buf[1:], log.Index) - case structs.HostVolumeRegisterRequestType: - return n.applyHostVolumeRegister(msgType, buf[1:], log.Index) - case structs.HostVolumeDeleteRequestType: - return n.applyHostVolumeDelete(msgType, buf[1:], log.Index) } // Check enterprise only message types. @@ -1941,17 +1936,6 @@ func (n *nomadFSM) restoreImpl(old io.ReadCloser, filter *FSMFilter) error { return err } - case HostVolumeSnapshot: - vol := new(structs.HostVolume) - if err := dec.Decode(vol); err != nil { - return err - } - if filter.Include(vol) { - if err := restore.HostVolumeRestore(vol); err != nil { - return err - } - } - default: // Check if this is an enterprise only object being restored restorer, ok := n.enterpriseRestorers[snapType] @@ -2420,36 +2404,6 @@ func (n *nomadFSM) applyWrappedRootKeysDelete(msgType structs.MessageType, buf [ return nil } -func (n *nomadFSM) applyHostVolumeRegister(msgType structs.MessageType, buf []byte, index uint64) interface{} { - defer metrics.MeasureSince([]string{"nomad", "fsm", "apply_host_volume_register"}, time.Now()) - - var req structs.HostVolumeRegisterRequest - if err := structs.Decode(buf, &req); err != nil { - panic(fmt.Errorf("failed to decode request: %v", err)) - } - - if err := n.state.UpsertHostVolume(index, req.Volume); err != nil { - n.logger.Error("UpsertHostVolumes failed", "error", err) - return err - } - return nil -} - -func (n *nomadFSM) applyHostVolumeDelete(msgType structs.MessageType, buf []byte, index uint64) interface{} { - defer metrics.MeasureSince([]string{"nomad", "fsm", "apply_host_volume_delete"}, time.Now()) - - var req structs.HostVolumeDeleteRequest - if err := structs.Decode(buf, &req); err != nil { - panic(fmt.Errorf("failed to decode request: %v", err)) - } - - if err := n.state.DeleteHostVolume(index, req.RequestNamespace(), req.VolumeID); err != nil { - n.logger.Error("DeleteHostVolumes failed", "error", err) - return err - } - return nil -} - func (s *nomadSnapshot) Persist(sink raft.SnapshotSink) error { defer metrics.MeasureSince([]string{"nomad", "fsm", "persist"}, time.Now()) // Register the nodes @@ -2583,10 +2537,6 @@ func (s *nomadSnapshot) Persist(sink raft.SnapshotSink) error { sink.Cancel() return err } - if err := s.persistHostVolumes(sink, encoder); err != nil { - sink.Cancel() - return err - } return nil } @@ -3324,22 +3274,6 @@ func (s *nomadSnapshot) persistJobSubmissions(sink raft.SnapshotSink, encoder *c return nil } -func (s *nomadSnapshot) persistHostVolumes(sink raft.SnapshotSink, encoder *codec.Encoder) error { - iter, err := s.snap.HostVolumes(nil, state.SortDefault) - if err != nil { - return err - } - for raw := iter.Next(); raw != nil; raw = iter.Next() { - vol := raw.(*structs.HostVolume) - - sink.Write([]byte{byte(HostVolumeSnapshot)}) - if err := encoder.Encode(vol); err != nil { - return err - } - } - return nil -} - // Release is a no-op, as we just need to GC the pointer // to the state store snapshot. There is nothing to explicitly // cleanup. diff --git a/nomad/heartbeat.go b/nomad/heartbeat.go index 720e7b00540..1d8596a83de 100644 --- a/nomad/heartbeat.go +++ b/nomad/heartbeat.go @@ -8,9 +8,9 @@ import ( "sync" "time" + metrics "github.com/armon/go-metrics" log "github.com/hashicorp/go-hclog" memdb "github.com/hashicorp/go-memdb" - metrics "github.com/hashicorp/go-metrics/compat" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/nomad/structs" diff --git a/nomad/host_volume_endpoint.go b/nomad/host_volume_endpoint.go deleted file mode 100644 index 4f987212a37..00000000000 --- a/nomad/host_volume_endpoint.go +++ /dev/null @@ -1,755 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package nomad - -import ( - "context" - "errors" - "fmt" - "net/http" - "regexp" - "strings" - "sync" - "time" - - "github.com/hashicorp/go-hclog" - "github.com/hashicorp/go-memdb" - metrics "github.com/hashicorp/go-metrics/compat" - "github.com/hashicorp/nomad/acl" - cstructs "github.com/hashicorp/nomad/client/structs" - "github.com/hashicorp/nomad/helper" - "github.com/hashicorp/nomad/nomad/state" - "github.com/hashicorp/nomad/nomad/state/paginator" - "github.com/hashicorp/nomad/nomad/structs" - "github.com/hashicorp/nomad/scheduler" -) - -// HostVolume is the server RPC endpoint for host volumes -type HostVolume struct { - srv *Server - ctx *RPCContext - logger hclog.Logger - - // volOps is used to serialize operations per volume ID - volOps sync.Map -} - -func NewHostVolumeEndpoint(srv *Server, ctx *RPCContext) *HostVolume { - return &HostVolume{srv: srv, ctx: ctx, logger: srv.logger.Named("host_volume")} -} - -func (v *HostVolume) Get(args *structs.HostVolumeGetRequest, reply *structs.HostVolumeGetResponse) error { - authErr := v.srv.Authenticate(v.ctx, args) - if done, err := v.srv.forward("HostVolume.Get", args, args, reply); done { - return err - } - v.srv.MeasureRPCRate("host_volume", structs.RateMetricRead, args) - if authErr != nil { - return structs.ErrPermissionDenied - } - defer metrics.MeasureSince([]string{"nomad", "host_volume", "get"}, time.Now()) - - allowVolume := acl.NamespaceValidator(acl.NamespaceCapabilityHostVolumeRead) - aclObj, err := v.srv.ResolveACL(args) - if err != nil { - return err - } - if !allowVolume(aclObj, args.RequestNamespace()) { - return structs.ErrPermissionDenied - } - - opts := blockingOptions{ - queryOpts: &args.QueryOptions, - queryMeta: &reply.QueryMeta, - run: func(ws memdb.WatchSet, store *state.StateStore) error { - - vol, err := store.HostVolumeByID(ws, args.Namespace, args.ID, true) - if err != nil { - return err - } - - reply.Volume = vol - if vol != nil { - reply.Index = vol.ModifyIndex - } else { - index, err := store.Index(state.TableHostVolumes) - if err != nil { - return err - } - - // Ensure we never set the index to zero, otherwise a blocking - // query cannot be used. We floor the index at one, since - // realistically the first write must have a higher index. - if index == 0 { - index = 1 - } - reply.Index = index - } - return nil - }} - return v.srv.blockingRPC(&opts) -} - -func (v *HostVolume) List(args *structs.HostVolumeListRequest, reply *structs.HostVolumeListResponse) error { - authErr := v.srv.Authenticate(v.ctx, args) - if done, err := v.srv.forward("HostVolume.List", args, args, reply); done { - return err - } - v.srv.MeasureRPCRate("host_volume", structs.RateMetricList, args) - if authErr != nil { - return structs.ErrPermissionDenied - } - defer metrics.MeasureSince([]string{"nomad", "host_volume", "list"}, time.Now()) - - aclObj, err := v.srv.ResolveACL(args) - if err != nil { - return err - } - - ns := args.RequestNamespace() - - sort := state.SortOption(args.Reverse) - opts := blockingOptions{ - queryOpts: &args.QueryOptions, - queryMeta: &reply.QueryMeta, - run: func(ws memdb.WatchSet, store *state.StateStore) error { - - var iter memdb.ResultIterator - var err error - - switch { - case args.NodeID != "": - iter, err = store.HostVolumesByNodeID(ws, args.NodeID, sort) - case args.NodePool != "": - iter, err = store.HostVolumesByNodePool(ws, args.NodePool, sort) - default: - iter, err = store.HostVolumes(ws, sort) - } - if err != nil { - return err - } - - // Generate the tokenizer to use for pagination using namespace and - // ID to ensure complete uniqueness. - tokenizer := paginator.NewStructsTokenizer(iter, - paginator.StructsTokenizerOptions{ - WithNamespace: true, - WithID: true, - }, - ) - - filters := []paginator.Filter{ - paginator.GenericFilter{ - Allow: func(raw any) (bool, error) { - vol := raw.(*structs.HostVolume) - // empty prefix doesn't filter - if !strings.HasPrefix(vol.Name, args.Prefix) && - !strings.HasPrefix(vol.ID, args.Prefix) { - return false, nil - } - if args.NodeID != "" && vol.NodeID != args.NodeID { - return false, nil - } - if args.NodePool != "" && vol.NodePool != args.NodePool { - return false, nil - } - - if ns != structs.AllNamespacesSentinel && - vol.Namespace != ns { - return false, nil - } - - allowVolume := acl.NamespaceValidator(acl.NamespaceCapabilityHostVolumeRead) - return allowVolume(aclObj, ns), nil - }, - }, - } - - // Set up our output after we have checked the error. - var vols []*structs.HostVolumeStub - - // Build the paginator. This includes the function that is - // responsible for appending a variable to the variables - // stubs slice. - paginatorImpl, err := paginator.NewPaginator(iter, tokenizer, filters, args.QueryOptions, - func(raw any) error { - vol := raw.(*structs.HostVolume) - vols = append(vols, vol.Stub()) - return nil - }) - if err != nil { - return structs.NewErrRPCCodedf( - http.StatusBadRequest, "failed to create result paginator: %v", err) - } - - // Calling page populates our output variable stub array as well as - // returns the next token. - nextToken, err := paginatorImpl.Page() - if err != nil { - return structs.NewErrRPCCodedf( - http.StatusBadRequest, "failed to read result page: %v", err) - } - - reply.Volumes = vols - reply.NextToken = nextToken - - // Use the index table to populate the query meta as we have no way - // of tracking the max index on deletes. - return v.srv.setReplyQueryMeta(store, state.TableHostVolumes, &reply.QueryMeta) - }, - } - - return v.srv.blockingRPC(&opts) -} - -func (v *HostVolume) Create(args *structs.HostVolumeCreateRequest, reply *structs.HostVolumeCreateResponse) error { - - authErr := v.srv.Authenticate(v.ctx, args) - if done, err := v.srv.forward("HostVolume.Create", args, args, reply); done { - return err - } - v.srv.MeasureRPCRate("host_volume", structs.RateMetricWrite, args) - if authErr != nil { - return structs.ErrPermissionDenied - } - defer metrics.MeasureSince([]string{"nomad", "host_volume", "create"}, time.Now()) - - allowVolume := acl.NamespaceValidator(acl.NamespaceCapabilityHostVolumeCreate) - aclObj, err := v.srv.ResolveACL(args) - if err != nil { - return err - } - - if args.Volume == nil { - return fmt.Errorf("missing volume definition") - } - - vol := args.Volume - if vol.Namespace == "" { - vol.Namespace = args.RequestNamespace() - } - if !allowVolume(aclObj, vol.Namespace) { - return structs.ErrPermissionDenied - } - - // ensure we only try to create a valid volume or make valid updates to a - // volume - snap, err := v.srv.State().Snapshot() - if err != nil { - return err - } - existing, err := v.validateVolumeUpdate(vol, snap) - if err != nil { - return err - } - - // set zero values as needed, possibly from existing - now := time.Now() - vol.CanonicalizeForCreate(existing, now) - - // make sure any nodes or pools actually exist - err = v.validateVolumeForState(vol, snap) - if err != nil { - return fmt.Errorf("validating volume %q against state failed: %v", vol.Name, err) - } - - _, err = v.placeHostVolume(snap, vol) - if err != nil { - return fmt.Errorf("could not place volume %q: %w", vol.Name, err) - } - - warn, err := v.enforceEnterprisePolicy( - snap, vol, args.GetIdentity().GetACLToken(), args.PolicyOverride) - if warn != nil { - reply.Warnings = warn.Error() - } - if err != nil { - return err - } - - // serialize client RPC and raft write per volume ID - index, err := v.serializeCall(vol.ID, "create", func() (uint64, error) { - // Attempt to create the volume on the client. - // - // NOTE: creating the volume on the client via the plugin can't be made - // atomic with the registration, and creating the volume provides values - // we want to write on the Volume in raft anyways. - if err = v.createVolume(vol); err != nil { - return 0, err - } - - // Write a newly created or modified volume to raft. We create a new - // request here because we've likely mutated the volume. - _, idx, err := v.srv.raftApply(structs.HostVolumeRegisterRequestType, - &structs.HostVolumeRegisterRequest{ - Volume: vol, - WriteRequest: args.WriteRequest, - }) - if err != nil { - v.logger.Error("raft apply failed", "error", err, "method", "register") - return 0, err - } - return idx, nil - }) - if err != nil { - return err - } - - reply.Volume = vol - reply.Index = index - return nil -} - -func (v *HostVolume) Register(args *structs.HostVolumeRegisterRequest, reply *structs.HostVolumeRegisterResponse) error { - - authErr := v.srv.Authenticate(v.ctx, args) - if done, err := v.srv.forward("HostVolume.Register", args, args, reply); done { - return err - } - v.srv.MeasureRPCRate("host_volume", structs.RateMetricWrite, args) - if authErr != nil { - return structs.ErrPermissionDenied - } - defer metrics.MeasureSince([]string{"nomad", "host_volume", "register"}, time.Now()) - - allowVolume := acl.NamespaceValidator(acl.NamespaceCapabilityHostVolumeRegister) - aclObj, err := v.srv.ResolveACL(args) - if err != nil { - return err - } - - if args.Volume == nil { - return fmt.Errorf("missing volume definition") - } - - vol := args.Volume - if vol.Namespace == "" { - vol.Namespace = args.RequestNamespace() - } - if !allowVolume(aclObj, vol.Namespace) { - return structs.ErrPermissionDenied - } - - snap, err := v.srv.State().Snapshot() - if err != nil { - return err - } - - if vol.NodeID == "" { - return errors.New("cannot register volume: node ID is required") - } - if vol.HostPath == "" { - return errors.New("cannot register volume: host path is required") - } - - existing, err := v.validateVolumeUpdate(vol, snap) - if err != nil { - return err - } - - // set zero values as needed, possibly from existing - now := time.Now() - vol.CanonicalizeForRegister(existing, now) - - // make sure any nodes or pools actually exist - err = v.validateVolumeForState(vol, snap) - if err != nil { - return fmt.Errorf("validating volume %q against state failed: %v", vol.ID, err) - } - - warn, err := v.enforceEnterprisePolicy( - snap, vol, args.GetIdentity().GetACLToken(), args.PolicyOverride) - if warn != nil { - reply.Warnings = warn.Error() - } - if err != nil { - return err - } - - // serialize client RPC and raft write per volume ID - index, err := v.serializeCall(vol.ID, "register", func() (uint64, error) { - // Attempt to register the volume on the client. - // - // NOTE: registering the volume on the client via the plugin can't be made - // atomic with the registration. - if err = v.registerVolume(vol); err != nil { - return 0, err - } - - // Write a newly created or modified volume to raft. We create a new - // request here because we've likely mutated the volume. - _, idx, err := v.srv.raftApply(structs.HostVolumeRegisterRequestType, - &structs.HostVolumeRegisterRequest{ - Volume: vol, - WriteRequest: args.WriteRequest, - }) - if err != nil { - v.logger.Error("raft apply failed", "error", err, "method", "register") - return 0, err - } - return idx, nil - }) - if err != nil { - return err - } - - reply.Volume = vol - reply.Index = index - return nil -} - -func (v *HostVolume) validateVolumeUpdate( - vol *structs.HostVolume, snap *state.StateSnapshot) (*structs.HostVolume, error) { - - // validate the volume spec - err := vol.Validate() - if err != nil { - return nil, fmt.Errorf("volume validation failed: %v", err) - } - - // validate any update we're making - var existing *structs.HostVolume - if vol.ID != "" { - existing, err = snap.HostVolumeByID(nil, vol.Namespace, vol.ID, true) - if err != nil { - return nil, err // should never hit, bail out - } - if existing == nil { - return nil, fmt.Errorf("cannot update volume %q: volume does not exist", vol.ID) - } - err = vol.ValidateUpdate(existing) - if err != nil { - return existing, fmt.Errorf("validating volume %q update failed: %v", vol.ID, err) - } - } - return existing, nil -} - -// validateVolumeForState ensures that any references to node IDs or node pools are valid -func (v *HostVolume) validateVolumeForState(vol *structs.HostVolume, snap *state.StateSnapshot) error { - var poolFromExistingNode string - if vol.NodeID != "" { - node, err := snap.NodeByID(nil, vol.NodeID) - if err != nil { - return err // should never hit, bail out - } - if node == nil { - return fmt.Errorf("node %q does not exist", vol.NodeID) - } - poolFromExistingNode = node.NodePool - } - - if vol.NodePool != "" { - pool, err := snap.NodePoolByName(nil, vol.NodePool) - if err != nil { - return err // should never hit, bail out - } - if pool == nil { - return fmt.Errorf("node pool %q does not exist", vol.NodePool) - } - if poolFromExistingNode != "" && poolFromExistingNode != pool.Name { - return fmt.Errorf("node ID %q is not in pool %q", vol.NodeID, vol.NodePool) - } - } - - return nil -} - -func (v *HostVolume) createVolume(vol *structs.HostVolume) error { - - method := "ClientHostVolume.Create" - cReq := &cstructs.ClientHostVolumeCreateRequest{ - ID: vol.ID, - Name: vol.Name, - PluginID: vol.PluginID, - Namespace: vol.Namespace, - NodeID: vol.NodeID, - RequestedCapacityMinBytes: vol.RequestedCapacityMinBytes, - RequestedCapacityMaxBytes: vol.RequestedCapacityMaxBytes, - Parameters: vol.Parameters, - } - cResp := &cstructs.ClientHostVolumeCreateResponse{} - err := v.srv.RPC(method, cReq, cResp) - if err != nil { - return err - } - - if vol.State == structs.HostVolumeStateUnknown { - vol.State = structs.HostVolumeStatePending - } - - vol.HostPath = cResp.HostPath - vol.CapacityBytes = cResp.CapacityBytes - - return nil -} - -func (v *HostVolume) registerVolume(vol *structs.HostVolume) error { - - method := "ClientHostVolume.Register" - cReq := &cstructs.ClientHostVolumeRegisterRequest{ - ID: vol.ID, - Name: vol.Name, - NodeID: vol.NodeID, - HostPath: vol.HostPath, - CapacityBytes: vol.CapacityBytes, - Parameters: vol.Parameters, - } - cResp := &cstructs.ClientHostVolumeRegisterResponse{} - err := v.srv.RPC(method, cReq, cResp) - if err != nil { - return err - } - - if vol.State == structs.HostVolumeStateUnknown { - vol.State = structs.HostVolumeStatePending - } - - return nil -} - -// placeHostVolume adds a node to volumes that don't already have one. The node -// will match the node pool and constraints, which doesn't already have a volume -// by that name. It returns the node (for testing) and an error indicating -// placement failed. -func (v *HostVolume) placeHostVolume(snap *state.StateSnapshot, vol *structs.HostVolume) (*structs.Node, error) { - if vol.NodeID != "" { - node, err := snap.NodeByID(nil, vol.NodeID) - if err != nil { - return nil, err - } - if node == nil { - return nil, fmt.Errorf("no such node %s", vol.NodeID) - } - vol.NodePool = node.NodePool - return node, nil - } - - poolFilterFn, err := v.enterpriseNodePoolFilter(snap, vol) - if err != nil { - return nil, err - } - - var iter memdb.ResultIterator - if vol.NodePool != "" { - if !poolFilterFn(vol.NodePool) { - return nil, fmt.Errorf("namespace %q does not allow volumes to use node pool %q", - vol.Namespace, vol.NodePool) - } - iter, err = snap.NodesByNodePool(nil, vol.NodePool) - } else { - iter, err = snap.Nodes(nil) - } - if err != nil { - return nil, err - } - - var checker *scheduler.ConstraintChecker - ctx := &placementContext{ - regexpCache: make(map[string]*regexp.Regexp), - versionCache: make(map[string]scheduler.VerConstraints), - semverCache: make(map[string]scheduler.VerConstraints), - } - constraints := []*structs.Constraint{{ - LTarget: fmt.Sprintf("${attr.plugins.host_volume.%s.version}", vol.PluginID), - Operand: "is_set", - }} - constraints = append(constraints, vol.Constraints...) - checker = scheduler.NewConstraintChecker(ctx, constraints) - - var ( - filteredByExisting int - filteredByGovernance int - filteredByFeasibility int - ) - - for { - raw := iter.Next() - if raw == nil { - break - } - candidate := raw.(*structs.Node) - - // note: this is a race if multiple users create volumes of the same - // name concurrently, but we can't solve it on the server because we - // haven't yet written to state. The client will reject requests to - // create/register a volume with the same name with a different ID. - if _, hasVol := candidate.HostVolumes[vol.Name]; hasVol { - filteredByExisting++ - continue - } - - if !poolFilterFn(candidate.NodePool) { - filteredByGovernance++ - continue - } - - if checker != nil { - if ok := checker.Feasible(candidate); !ok { - filteredByFeasibility++ - continue - } - } - - vol.NodeID = candidate.ID - vol.NodePool = candidate.NodePool - return candidate, nil - - } - - return nil, fmt.Errorf( - "no node meets constraints: %d nodes had existing volume, %d nodes filtered by node pool governance, %d nodes were infeasible", - filteredByExisting, filteredByGovernance, filteredByFeasibility) -} - -// placementContext implements the scheduler.ConstraintContext interface, a -// minimal subset of the scheduler.Context interface that we need to create a -// feasibility checker for constraints -type placementContext struct { - regexpCache map[string]*regexp.Regexp - versionCache map[string]scheduler.VerConstraints - semverCache map[string]scheduler.VerConstraints -} - -func (ctx *placementContext) Metrics() *structs.AllocMetric { return &structs.AllocMetric{} } -func (ctx *placementContext) RegexpCache() map[string]*regexp.Regexp { return ctx.regexpCache } - -func (ctx *placementContext) VersionConstraintCache() map[string]scheduler.VerConstraints { - return ctx.versionCache -} - -func (ctx *placementContext) SemverConstraintCache() map[string]scheduler.VerConstraints { - return ctx.semverCache -} - -func (v *HostVolume) Delete(args *structs.HostVolumeDeleteRequest, reply *structs.HostVolumeDeleteResponse) error { - - authErr := v.srv.Authenticate(v.ctx, args) - if done, err := v.srv.forward("HostVolume.Delete", args, args, reply); done { - return err - } - v.srv.MeasureRPCRate("host_volume", structs.RateMetricWrite, args) - if authErr != nil { - return structs.ErrPermissionDenied - } - defer metrics.MeasureSince([]string{"nomad", "host_volume", "delete"}, time.Now()) - - // Note that all deleted volumes need to be in the same namespace - allowVolume := acl.NamespaceValidator(acl.NamespaceCapabilityHostVolumeDelete) - aclObj, err := v.srv.ResolveACL(args) - if err != nil { - return err - } - if !allowVolume(aclObj, args.RequestNamespace()) { - return structs.ErrPermissionDenied - } - - if args.VolumeID == "" { - return fmt.Errorf("missing volume ID to delete") - } - - snap, err := v.srv.State().Snapshot() - if err != nil { - return err - } - - ns := args.RequestNamespace() - id := args.VolumeID - - vol, err := snap.HostVolumeByID(nil, ns, id, true) - if err != nil { - return fmt.Errorf("could not query host volume: %w", err) - } - if vol == nil { - return fmt.Errorf("no such volume: %s", id) - } - if len(vol.Allocations) > 0 { - allocIDs := helper.ConvertSlice(vol.Allocations, - func(a *structs.AllocListStub) string { return a.ID }) - return fmt.Errorf("volume %s in use by allocations: %v", id, allocIDs) - } - - // serialize client RPC and raft write per volume ID - index, err := v.serializeCall(vol.ID, "delete", func() (uint64, error) { - if err := v.deleteVolume(vol); err != nil { - return 0, err - } - _, idx, err := v.srv.raftApply(structs.HostVolumeDeleteRequestType, args) - if err != nil { - v.logger.Error("raft apply failed", "error", err, "method", "delete") - return 0, err - } - return idx, nil - }) - if err != nil { - return err - } - - reply.Index = index - return nil -} - -func (v *HostVolume) deleteVolume(vol *structs.HostVolume) error { - - method := "ClientHostVolume.Delete" - cReq := &cstructs.ClientHostVolumeDeleteRequest{ - ID: vol.ID, - Name: vol.Name, - PluginID: vol.PluginID, - Namespace: vol.Namespace, - NodeID: vol.NodeID, - HostPath: vol.HostPath, - Parameters: vol.Parameters, - } - cResp := &cstructs.ClientHostVolumeDeleteResponse{} - err := v.srv.RPC(method, cReq, cResp) - if err != nil { - return err - } - - return nil -} - -// serializeCall serializes fn() per volume, so DHV plugins can assume that -// Nomad will not run concurrent operations for the same volume, and for us -// to avoid interleaving client RPCs with raft writes. -// Concurrent calls should all run eventually (or timeout, or server shutdown), -// but there is no guarantee that they will run in the order received. -// The passed fn is expected to return a raft index and error. -func (v *HostVolume) serializeCall(volumeID, op string, fn func() (uint64, error)) (uint64, error) { - timeout := 2 * time.Minute // 2x the client RPC timeout - for { - ctx, done := context.WithTimeout(v.srv.shutdownCtx, timeout) - - loaded, occupied := v.volOps.LoadOrStore(volumeID, ctx) - - if !occupied { - v.logger.Trace("HostVolume RPC running ", "operation", op) - // run the fn! - index, err := fn() - - // done() must come after Delete, so that other unblocked requests - // will Store a fresh context when they continue. - v.volOps.Delete(volumeID) - done() - - return index, err - } - - // another one is running; wait for it to finish. - v.logger.Trace("HostVolume RPC waiting", "operation", op) - - // cancel the tentative context; we'll use the one we pulled from - // volOps (set by another RPC call) instead. - done() - - otherCtx := loaded.(context.Context) - select { - case <-otherCtx.Done(): - continue - case <-v.srv.shutdownCh: - return 0, structs.ErrNoLeader - } - } -} diff --git a/nomad/host_volume_endpoint_ce.go b/nomad/host_volume_endpoint_ce.go deleted file mode 100644 index 982ffd1214f..00000000000 --- a/nomad/host_volume_endpoint_ce.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -//go:build !ent -// +build !ent - -package nomad - -import ( - "github.com/hashicorp/nomad/nomad/state" - "github.com/hashicorp/nomad/nomad/structs" -) - -// enforceEnterprisePolicy is the CE stub for Enterprise governance via -// Sentinel policy and quotas -func (v *HostVolume) enforceEnterprisePolicy( - _ *state.StateSnapshot, - _ *structs.HostVolume, - _ *structs.ACLToken, - _ bool, -) (error, error) { - return nil, nil -} - -// enterpriseNodePoolFilter is the CE stub for filtering nodes during placement -// via Enterprise node pool governance. -func (v *HostVolume) enterpriseNodePoolFilter(_ *state.StateSnapshot, _ *structs.HostVolume) (func(string) bool, error) { - return func(_ string) bool { return true }, nil -} diff --git a/nomad/host_volume_endpoint_test.go b/nomad/host_volume_endpoint_test.go deleted file mode 100644 index 8f420e05d51..00000000000 --- a/nomad/host_volume_endpoint_test.go +++ /dev/null @@ -1,1118 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package nomad - -import ( - "context" - "errors" - "fmt" - "sync" - "testing" - "time" - - "github.com/hashicorp/go-multierror" - "github.com/hashicorp/go-set/v3" - msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc/v2" - "github.com/hashicorp/nomad/ci" - "github.com/hashicorp/nomad/client" - "github.com/hashicorp/nomad/client/config" - cstructs "github.com/hashicorp/nomad/client/structs" - "github.com/hashicorp/nomad/helper" - "github.com/hashicorp/nomad/helper/testlog" - "github.com/hashicorp/nomad/helper/uuid" - "github.com/hashicorp/nomad/nomad/mock" - "github.com/hashicorp/nomad/nomad/structs" - "github.com/hashicorp/nomad/testutil" - "github.com/hashicorp/nomad/version" - "github.com/shoenig/test" - "github.com/shoenig/test/must" - "github.com/shoenig/test/wait" -) - -func TestHostVolumeEndpoint_CreateRegisterGetDelete(t *testing.T) { - ci.Parallel(t) - - srv, _, cleanupSrv := TestACLServer(t, func(c *Config) { - c.NumSchedulers = 0 - }) - t.Cleanup(cleanupSrv) - testutil.WaitForLeader(t, srv.RPC) - store := srv.fsm.State() - - c1, node1 := newMockHostVolumeClient(t, srv, "prod") - c2, _ := newMockHostVolumeClient(t, srv, "default") - c2.setCreate(nil, errors.New("this node should never receive create RPC")) - c2.setDelete("this node should never receive delete RPC") - - index := uint64(1001) - - token := mock.CreatePolicyAndToken(t, store, index, "volume-manager", - `namespace "apps" { capabilities = ["host-volume-register"] } - node { policy = "read" }`).SecretID - - index++ - otherToken := mock.CreatePolicyAndToken(t, store, index, "other", - `namespace "foo" { capabilities = ["host-volume-register"] } - node { policy = "read" }`).SecretID - - index++ - powerToken := mock.CreatePolicyAndToken(t, store, index, "cluster-admin", - `namespace "*" { capabilities = ["host-volume-write"] } - node { policy = "read" }`).SecretID - - index++ - ns := "apps" - nspace := mock.Namespace() - nspace.Name = ns - must.NoError(t, store.UpsertNamespaces(index, []*structs.Namespace{nspace})) - - codec := rpcClient(t, srv) - - req := &structs.HostVolumeCreateRequest{ - WriteRequest: structs.WriteRequest{ - Region: srv.Region(), - AuthToken: token}, - } - - t.Run("invalid create", func(t *testing.T) { - - req.Namespace = ns - var resp structs.HostVolumeCreateResponse - err := msgpackrpc.CallWithCodec(codec, "HostVolume.Create", req, &resp) - must.EqError(t, err, "missing volume definition") - - req.Volume = &structs.HostVolume{RequestedCapabilities: []*structs.HostVolumeCapability{ - {AttachmentMode: "foo"}}} - - err = msgpackrpc.CallWithCodec(codec, "HostVolume.Create", req, &resp) - must.EqError(t, err, `volume validation failed: 2 errors occurred: - * missing name - * invalid attachment mode: "foo" - -`) - - req.Volume = &structs.HostVolume{ - Name: "example", - PluginID: "example_plugin", - Constraints: []*structs.Constraint{{ - RTarget: "r1", - Operand: "=", - }}, - RequestedCapacityMinBytes: 200000, - RequestedCapacityMaxBytes: 100000, - RequestedCapabilities: []*structs.HostVolumeCapability{ - { - AttachmentMode: structs.HostVolumeAttachmentModeFilesystem, - AccessMode: structs.HostVolumeAccessModeSingleNodeWriter, - }, - { - AttachmentMode: "bad", - AccessMode: "invalid", - }, - }, - } - - err = msgpackrpc.CallWithCodec(codec, "HostVolume.Create", req, &resp) - must.EqError(t, err, `volume validation failed: 3 errors occurred: - * capacity_max (100000) must be larger than capacity_min (200000) - * invalid attachment mode: "bad" - * invalid constraint: 1 error occurred: - * No LTarget provided but is required by constraint - - - -`) - - invalidNode := &structs.Node{ID: uuid.Generate(), NodePool: "does-not-exist"} - volOnInvalidNode := mock.HostVolumeRequestForNode(ns, invalidNode) - req.Volume = volOnInvalidNode - err = msgpackrpc.CallWithCodec(codec, "HostVolume.Create", req, &resp) - must.EqError(t, err, fmt.Sprintf( - `validating volume "example" against state failed: node %q does not exist`, - invalidNode.ID)) - - req.Volume.NodeID = "" - err = msgpackrpc.CallWithCodec(codec, "HostVolume.Register", req, &resp) - must.EqError(t, err, "cannot register volume: node ID is required") - }) - - var expectIndex uint64 - - c1.setCreate(&cstructs.ClientHostVolumeCreateResponse{ - HostPath: "/var/nomad/alloc_mounts/foo", - CapacityBytes: 150000, - }, nil) - - vol1 := mock.HostVolumeRequest("apps") - vol1.Name = "example1" - vol1.NodePool = "prod" - vol2 := mock.HostVolumeRequest("apps") - vol2.Name = "example2" - vol2.NodePool = "prod" - - t.Run("invalid permissions", func(t *testing.T) { - var resp structs.HostVolumeCreateResponse - req.AuthToken = otherToken - - req.Volume = vol1 - err := msgpackrpc.CallWithCodec(codec, "HostVolume.Create", req, &resp) - must.EqError(t, err, "Permission denied") - }) - - t.Run("invalid node constraints", func(t *testing.T) { - vol1.Constraints[0].RTarget = "r2" - vol2.Constraints[0].RTarget = "r2" - - defer func() { - vol1.Constraints[0].RTarget = "r1" - vol2.Constraints[0].RTarget = "r1" - }() - - req.Volume = vol1.Copy() - var resp structs.HostVolumeCreateResponse - req.AuthToken = token - err := msgpackrpc.CallWithCodec(codec, "HostVolume.Create", req, &resp) - must.EqError(t, err, `could not place volume "example1": no node meets constraints: 0 nodes had existing volume, 0 nodes filtered by node pool governance, 1 nodes were infeasible`) - - req.Volume = vol2.Copy() - resp = structs.HostVolumeCreateResponse{} - err = msgpackrpc.CallWithCodec(codec, "HostVolume.Create", req, &resp) - must.EqError(t, err, `could not place volume "example2": no node meets constraints: 0 nodes had existing volume, 0 nodes filtered by node pool governance, 1 nodes were infeasible`) - }) - - t.Run("valid create", func(t *testing.T) { - var resp structs.HostVolumeCreateResponse - req.AuthToken = token - req.Volume = vol1.Copy() - err := msgpackrpc.CallWithCodec(codec, "HostVolume.Create", req, &resp) - must.NoError(t, err) - must.NotNil(t, resp.Volume) - vol1 = resp.Volume - - expectIndex = resp.Index - req.Volume = vol2.Copy() - resp = structs.HostVolumeCreateResponse{} - err = msgpackrpc.CallWithCodec(codec, "HostVolume.Create", req, &resp) - must.NoError(t, err) - must.NotNil(t, resp.Volume) - vol2 = resp.Volume - - getReq := &structs.HostVolumeGetRequest{ - ID: vol1.ID, - QueryOptions: structs.QueryOptions{ - Region: srv.Region(), - Namespace: ns, - AuthToken: otherToken, - }, - } - var getResp structs.HostVolumeGetResponse - err = msgpackrpc.CallWithCodec(codec, "HostVolume.Get", getReq, &getResp) - must.EqError(t, err, "Permission denied") - - getReq.AuthToken = token - err = msgpackrpc.CallWithCodec(codec, "HostVolume.Get", getReq, &getResp) - must.NoError(t, err) - must.NotNil(t, getResp.Volume) - }) - - t.Run("invalid updates", func(t *testing.T) { - - invalidVol1 := vol1.Copy() - invalidVol2 := &structs.HostVolume{ - NodeID: uuid.Generate(), - RequestedCapabilities: []*structs.HostVolumeCapability{ - { - AttachmentMode: structs.HostVolumeAttachmentModeFilesystem, - AccessMode: "foo", - }}, - } - - createReq := &structs.HostVolumeCreateRequest{ - Volume: invalidVol2, - WriteRequest: structs.WriteRequest{ - Region: srv.Region(), - Namespace: ns, - AuthToken: token}, - } - c1.setCreate(nil, errors.New("should not call this endpoint on invalid RPCs")) - var createResp structs.HostVolumeCreateResponse - err := msgpackrpc.CallWithCodec(codec, "HostVolume.Create", createReq, &createResp) - must.EqError(t, err, `volume validation failed: 2 errors occurred: - * missing name - * invalid access mode: "foo" - -`, must.Sprint("initial validation failures should exit early")) - - invalidVol1.NodeID = uuid.Generate() - invalidVol1.RequestedCapacityMinBytes = 100 - invalidVol1.RequestedCapacityMaxBytes = 200 - registerReq := &structs.HostVolumeRegisterRequest{ - Volume: invalidVol1, - WriteRequest: structs.WriteRequest{ - Region: srv.Region(), - Namespace: ns, - AuthToken: token}, - } - var registerResp structs.HostVolumeRegisterResponse - err = msgpackrpc.CallWithCodec(codec, "HostVolume.Register", registerReq, ®isterResp) - must.EqError(t, err, fmt.Sprintf(`validating volume %q update failed: 2 errors occurred: - * node ID cannot be updated - * capacity_max (200) cannot be less than existing provisioned capacity (150000) - -`, invalidVol1.ID), must.Sprint("update validation checks should have failed")) - - }) - - t.Run("blocking Get unblocks on write", func(t *testing.T) { - nextVol1 := vol1.Copy() - nextVol1.RequestedCapacityMaxBytes = 300000 - registerReq := &structs.HostVolumeRegisterRequest{ - Volume: nextVol1, - WriteRequest: structs.WriteRequest{ - Region: srv.Region(), - Namespace: ns, - AuthToken: token}, - } - - c1.setCreate(nil, errors.New("should not call this endpoint on register RPC")) - - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - t.Cleanup(cancel) - volCh := make(chan *structs.HostVolume) - errCh := make(chan error) - - getReq := &structs.HostVolumeGetRequest{ - ID: vol1.ID, - QueryOptions: structs.QueryOptions{ - Region: srv.Region(), - Namespace: ns, - AuthToken: token, - MinQueryIndex: expectIndex, - }, - } - - go func() { - codec := rpcClient(t, srv) - var getResp structs.HostVolumeGetResponse - err := msgpackrpc.CallWithCodec(codec, "HostVolume.Get", getReq, &getResp) - if err != nil { - errCh <- err - } - volCh <- getResp.Volume - }() - - // re-register the volume long enough later that we can be sure we won't - // win a race with the get RPC goroutine - time.AfterFunc(200*time.Millisecond, func() { - codec := rpcClient(t, srv) - var registerResp structs.HostVolumeRegisterResponse - err := msgpackrpc.CallWithCodec(codec, "HostVolume.Register", registerReq, ®isterResp) - must.NoError(t, err) - }) - - select { - case <-ctx.Done(): - t.Fatal("timeout or cancelled") - case vol := <-volCh: - must.Greater(t, expectIndex, vol.ModifyIndex) - case err := <-errCh: - t.Fatalf("unexpected error: %v", err) - } - }) - - t.Run("delete blocked by allocation claims", func(t *testing.T) { - - // claim one of the volumes with a pending allocation - alloc := mock.MinAlloc() - alloc.NodeID = node1.ID - alloc.Job.TaskGroups[0].Volumes = map[string]*structs.VolumeRequest{"example": { - Name: "example", - Type: structs.VolumeTypeHost, - Source: vol2.Name, - }} - index++ - must.NoError(t, store.UpsertAllocs(structs.MsgTypeTestSetup, - index, []*structs.Allocation{alloc})) - - delReq := &structs.HostVolumeDeleteRequest{ - VolumeID: vol2.ID, - WriteRequest: structs.WriteRequest{ - Region: srv.Region(), - Namespace: ns, - AuthToken: token}, - } - var delResp structs.HostVolumeDeleteResponse - - err := msgpackrpc.CallWithCodec(codec, "HostVolume.Delete", delReq, &delResp) - must.EqError(t, err, "Permission denied") - - delReq.AuthToken = powerToken - err = msgpackrpc.CallWithCodec(codec, "HostVolume.Delete", delReq, &delResp) - must.EqError(t, err, fmt.Sprintf("volume %s in use by allocations: [%s]", vol2.ID, alloc.ID)) - - // update the allocations terminal so the delete works - alloc = alloc.Copy() - alloc.ClientStatus = structs.AllocClientStatusFailed - nArgs := &structs.AllocUpdateRequest{ - Alloc: []*structs.Allocation{alloc}, - WriteRequest: structs.WriteRequest{ - Region: srv.Region(), - AuthToken: node1.SecretID}, - } - err = msgpackrpc.CallWithCodec(codec, "Node.UpdateAlloc", nArgs, &structs.GenericResponse{}) - - err = msgpackrpc.CallWithCodec(codec, "HostVolume.Delete", delReq, &delResp) - must.NoError(t, err) - - getReq := &structs.HostVolumeGetRequest{ - ID: vol2.ID, - QueryOptions: structs.QueryOptions{ - Region: srv.Region(), - Namespace: ns, - AuthToken: token, - }, - } - var getResp structs.HostVolumeGetResponse - err = msgpackrpc.CallWithCodec(codec, "HostVolume.Get", getReq, &getResp) - must.NoError(t, err) - must.Nil(t, getResp.Volume) - }) - - // delete vol1 to finish cleaning up - var delResp structs.HostVolumeDeleteResponse - err := msgpackrpc.CallWithCodec(codec, "HostVolume.Delete", &structs.HostVolumeDeleteRequest{ - VolumeID: vol1.ID, - WriteRequest: structs.WriteRequest{ - Region: srv.Region(), - Namespace: vol1.Namespace, - AuthToken: powerToken, - }, - }, &delResp) - must.NoError(t, err) - - // should be no volumes left - var listResp structs.HostVolumeListResponse - err = msgpackrpc.CallWithCodec(codec, "HostVolume.List", &structs.HostVolumeListRequest{ - QueryOptions: structs.QueryOptions{ - Region: srv.Region(), - Namespace: "*", - AuthToken: token, - }, - }, &listResp) - must.NoError(t, err) - must.Len(t, 0, listResp.Volumes, must.Sprintf("expect no volumes to remain, got: %+v", listResp)) -} - -func TestHostVolumeEndpoint_List(t *testing.T) { - ci.Parallel(t) - - srv, rootToken, cleanupSrv := TestACLServer(t, func(c *Config) { - c.NumSchedulers = 0 - }) - t.Cleanup(cleanupSrv) - testutil.WaitForLeader(t, srv.RPC) - store := srv.fsm.State() - codec := rpcClient(t, srv) - - index := uint64(1001) - - token := mock.CreatePolicyAndToken(t, store, index, "volume-manager", - `namespace "apps" { capabilities = ["host-volume-register"] } - node { policy = "read" }`).SecretID - - index++ - otherToken := mock.CreatePolicyAndToken(t, store, index, "other", - `namespace "foo" { capabilities = ["host-volume-read"] } - node { policy = "read" }`).SecretID - - index++ - ns1 := "apps" - ns2 := "system" - nspace1, nspace2 := mock.Namespace(), mock.Namespace() - nspace1.Name = ns1 - nspace2.Name = ns2 - must.NoError(t, store.UpsertNamespaces(index, []*structs.Namespace{nspace1, nspace2})) - - _, node0 := newMockHostVolumeClient(t, srv, "default") - _, node1 := newMockHostVolumeClient(t, srv, "default") - _, node2 := newMockHostVolumeClient(t, srv, "prod") - - vol1 := mock.HostVolumeRequestForNode(ns1, node0) - vol1.Name = "foobar-example" - vol1.HostPath = "/tmp/vol1" - - vol2 := mock.HostVolumeRequestForNode(ns1, node1) - vol2.Name = "foobaz-example" - vol2.HostPath = "/tmp/vol2" - - vol3 := mock.HostVolumeRequestForNode(ns2, node2) - vol3.Name = "foobar-example" - vol3.HostPath = "/tmp/vol3" - - vol4 := mock.HostVolumeRequestForNode(ns2, node1) - vol4.Name = "foobaz-example" - vol4.HostPath = "/tmp/vol4" - - // we need to register these rather than upsert them so we have the correct - // indexes for unblocking later. - registerReq := &structs.HostVolumeRegisterRequest{ - WriteRequest: structs.WriteRequest{ - Region: srv.Region(), - AuthToken: rootToken.SecretID}, - } - - var registerResp structs.HostVolumeRegisterResponse - - // write the volumes in reverse order so our later test can get a blocking - // query index from a Get it has access to - - registerReq.Volume = vol4 - err := msgpackrpc.CallWithCodec(codec, "HostVolume.Register", registerReq, ®isterResp) - must.NoError(t, err) - vol4 = registerResp.Volume - - registerReq.Volume = vol3 - registerResp = structs.HostVolumeRegisterResponse{} - err = msgpackrpc.CallWithCodec(codec, "HostVolume.Register", registerReq, ®isterResp) - must.NoError(t, err) - vol3 = registerResp.Volume - - registerReq.Volume = vol2 - registerResp = structs.HostVolumeRegisterResponse{} - err = msgpackrpc.CallWithCodec(codec, "HostVolume.Register", registerReq, ®isterResp) - must.NoError(t, err) - vol2 = registerResp.Volume - - registerReq.Volume = vol1 - registerResp = structs.HostVolumeRegisterResponse{} - err = msgpackrpc.CallWithCodec(codec, "HostVolume.Register", registerReq, ®isterResp) - must.NoError(t, err) - vol1 = registerResp.Volume - - testCases := []struct { - name string - req *structs.HostVolumeListRequest - expectVolIDs []string - }{ - { - name: "wrong namespace for token", - req: &structs.HostVolumeListRequest{ - QueryOptions: structs.QueryOptions{ - Region: srv.Region(), - Namespace: ns1, - AuthToken: otherToken, - }, - }, - expectVolIDs: []string{}, - }, - { - name: "query by namespace", - req: &structs.HostVolumeListRequest{ - QueryOptions: structs.QueryOptions{ - Region: srv.Region(), - Namespace: ns1, - AuthToken: token, - }, - }, - expectVolIDs: []string{vol1.ID, vol2.ID}, - }, - { - name: "wildcard namespace", - req: &structs.HostVolumeListRequest{ - QueryOptions: structs.QueryOptions{ - Region: srv.Region(), - Namespace: structs.AllNamespacesSentinel, - AuthToken: token, - }, - }, - expectVolIDs: []string{vol1.ID, vol2.ID, vol3.ID, vol4.ID}, - }, - { - name: "query by prefix", - req: &structs.HostVolumeListRequest{ - QueryOptions: structs.QueryOptions{ - Region: srv.Region(), - Namespace: ns1, - AuthToken: token, - Prefix: "foobar", - }, - }, - expectVolIDs: []string{vol1.ID}, - }, - { - name: "query by node", - req: &structs.HostVolumeListRequest{ - NodeID: node1.ID, - QueryOptions: structs.QueryOptions{ - Region: srv.Region(), - Namespace: structs.AllNamespacesSentinel, - AuthToken: token, - }, - }, - expectVolIDs: []string{vol2.ID, vol4.ID}, - }, - { - name: "query by node pool", - req: &structs.HostVolumeListRequest{ - NodePool: "prod", - QueryOptions: structs.QueryOptions{ - Region: srv.Region(), - Namespace: structs.AllNamespacesSentinel, - AuthToken: token, - }, - }, - expectVolIDs: []string{vol3.ID}, - }, - { - name: "query by incompatible node ID and pool", - req: &structs.HostVolumeListRequest{ - NodeID: node1.ID, - NodePool: "prod", - QueryOptions: structs.QueryOptions{ - Region: srv.Region(), - Namespace: structs.AllNamespacesSentinel, - AuthToken: token, - }, - }, - expectVolIDs: []string{}, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - var resp structs.HostVolumeListResponse - err := msgpackrpc.CallWithCodec(codec, "HostVolume.List", tc.req, &resp) - must.NoError(t, err) - - gotIDs := helper.ConvertSlice(resp.Volumes, - func(v *structs.HostVolumeStub) string { return v.ID }) - must.SliceContainsAll(t, tc.expectVolIDs, gotIDs, - must.Sprintf("got: %v", gotIDs)) - }) - } - - t.Run("blocking query unblocks", func(t *testing.T) { - - // the Get response from the most-recently written volume will have the - // index we want to block on - getReq := &structs.HostVolumeGetRequest{ - ID: vol1.ID, - QueryOptions: structs.QueryOptions{ - Region: srv.Region(), - Namespace: ns1, - AuthToken: token, - }, - } - var getResp structs.HostVolumeGetResponse - err = msgpackrpc.CallWithCodec(codec, "HostVolume.Get", getReq, &getResp) - must.NoError(t, err) - must.NotNil(t, getResp.Volume) - - nextVol := getResp.Volume.Copy() - nextVol.RequestedCapacityMaxBytes = 300000 - registerReq.Volume = nextVol - registerReq.Namespace = nextVol.Namespace - - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - t.Cleanup(cancel) - respCh := make(chan *structs.HostVolumeListResponse) - errCh := make(chan error) - - // prepare the blocking List query - - req := &structs.HostVolumeListRequest{ - QueryOptions: structs.QueryOptions{ - Region: srv.Region(), - Namespace: ns1, - AuthToken: token, - MinQueryIndex: getResp.Index, - }, - } - - go func() { - codec := rpcClient(t, srv) - var listResp structs.HostVolumeListResponse - err := msgpackrpc.CallWithCodec(codec, "HostVolume.List", req, &listResp) - if err != nil { - errCh <- err - } - respCh <- &listResp - }() - - // re-register the volume long enough later that we can be sure we won't - // win a race with the get RPC goroutine - time.AfterFunc(200*time.Millisecond, func() { - codec := rpcClient(t, srv) - var registerResp structs.HostVolumeRegisterResponse - err = msgpackrpc.CallWithCodec(codec, "HostVolume.Register", registerReq, ®isterResp) - must.NoError(t, err) - }) - - select { - case <-ctx.Done(): - t.Fatal("timeout or cancelled") - case listResp := <-respCh: - must.Greater(t, req.MinQueryIndex, listResp.Index) - case err := <-errCh: - t.Fatalf("unexpected error: %v", err) - } - }) -} - -func TestHostVolumeEndpoint_placeVolume(t *testing.T) { - srv, _, cleanupSrv := TestACLServer(t, func(c *Config) { - c.NumSchedulers = 0 - }) - t.Cleanup(cleanupSrv) - testutil.WaitForLeader(t, srv.RPC) - store := srv.fsm.State() - - endpoint := &HostVolume{ - srv: srv, - logger: testlog.HCLogger(t), - } - - node0, node1, node2, node3 := mock.Node(), mock.Node(), mock.Node(), mock.Node() - node0.NodePool = structs.NodePoolDefault - node0.Attributes["plugins.host_volume.mkdir.version"] = "0.0.1" - - node1.NodePool = "dev" - node1.Meta["rack"] = "r2" - node1.Attributes["plugins.host_volume.mkdir.version"] = "0.0.1" - - node2.NodePool = "prod" - node2.Attributes["plugins.host_volume.mkdir.version"] = "0.0.1" - - node3.NodePool = "prod" - node3.Meta["rack"] = "r3" - node3.HostVolumes = map[string]*structs.ClientHostVolumeConfig{"example": { - Name: "example", - Path: "/srv", - }} - node3.Attributes["plugins.host_volume.mkdir.version"] = "0.0.1" - - must.NoError(t, store.UpsertNode(structs.MsgTypeTestSetup, 1000, node0)) - must.NoError(t, store.UpsertNode(structs.MsgTypeTestSetup, 1000, node1)) - must.NoError(t, store.UpsertNode(structs.MsgTypeTestSetup, 1000, node2)) - must.NoError(t, store.UpsertNode(structs.MsgTypeTestSetup, 1000, node3)) - - testCases := []struct { - name string - vol *structs.HostVolume - expect *structs.Node - expectErr string - }{ - { - name: "only one in node pool", - vol: &structs.HostVolume{NodePool: "default", PluginID: "mkdir"}, - expect: node0, - }, - { - name: "only one that matches constraints", - vol: &structs.HostVolume{ - PluginID: "mkdir", - Constraints: []*structs.Constraint{ - { - LTarget: "${meta.rack}", - RTarget: "r2", - Operand: "=", - }, - }}, - expect: node1, - }, - { - name: "only one available in pool", - vol: &structs.HostVolume{NodePool: "prod", Name: "example", PluginID: "mkdir"}, - expect: node2, - }, - { - name: "no matching constraint", - vol: &structs.HostVolume{ - PluginID: "mkdir", - Constraints: []*structs.Constraint{ - { - LTarget: "${meta.rack}", - RTarget: "r6", - Operand: "=", - }, - }}, - expectErr: "no node meets constraints: 0 nodes had existing volume, 0 nodes filtered by node pool governance, 4 nodes were infeasible", - }, - { - name: "no matching plugin", - vol: &structs.HostVolume{PluginID: "not-mkdir"}, - expectErr: "no node meets constraints: 0 nodes had existing volume, 0 nodes filtered by node pool governance, 4 nodes were infeasible", - }, - { - name: "match already has a volume with the same name", - vol: &structs.HostVolume{ - Name: "example", - PluginID: "mkdir", - Constraints: []*structs.Constraint{ - { - LTarget: "${meta.rack}", - RTarget: "r3", - Operand: "=", - }, - }}, - expectErr: "no node meets constraints: 1 nodes had existing volume, 0 nodes filtered by node pool governance, 3 nodes were infeasible", - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - snap, _ := store.Snapshot() - node, err := endpoint.placeHostVolume(snap, tc.vol) - if tc.expectErr == "" { - must.NoError(t, err) - must.Eq(t, tc.expect, node) - } else { - must.EqError(t, err, tc.expectErr) - must.Nil(t, node) - } - }) - } -} - -// TestHostVolumeEndpoint_concurrency checks that create/register/delete RPC -// calls can not run concurrently for a single volume. -func TestHostVolumeEndpoint_concurrency(t *testing.T) { - ci.Parallel(t) - - srv, cleanup := TestServer(t, func(c *Config) { c.NumSchedulers = 0 }) - t.Cleanup(cleanup) - testutil.WaitForLeader(t, srv.RPC) - - c, node := newMockHostVolumeClient(t, srv, "default") - - vol := &structs.HostVolume{ - Name: "test-vol", - Namespace: "default", - NodeID: node.ID, - PluginID: "mkdir", - HostPath: "/pretend/path", - RequestedCapabilities: []*structs.HostVolumeCapability{ - { - AttachmentMode: structs.HostVolumeAttachmentModeFilesystem, - AccessMode: structs.HostVolumeAccessModeSingleNodeWriter, - }, - }, - } - wr := structs.WriteRequest{Region: srv.Region()} - - // tell the mock client how it should respond to create calls - c.setCreate(&cstructs.ClientHostVolumeCreateResponse{ - VolumeName: "test-vol", - HostPath: "/pretend/path", - }, nil) - - // create the volume for us to attempt concurrent operations on - cVol := vol.Copy() // copy because HostPath gets mutated - cVol.Parameters = map[string]string{"created": "initial"} - createReq := &structs.HostVolumeCreateRequest{ - Volume: cVol, - WriteRequest: wr, - } - var createResp structs.HostVolumeCreateResponse - must.NoError(t, srv.RPC("HostVolume.Create", createReq, &createResp)) - got, err := srv.State().HostVolumeByID(nil, vol.Namespace, createResp.Volume.ID, false) - must.NoError(t, err) - must.Eq(t, map[string]string{"created": "initial"}, got.Parameters) - - // warning: below here be (concurrency) dragons. if this test fails, - // it is rather difficult to troubleshoot. sorry! - - // this is critical -- everything needs to use the same volume ID, - // because that's what the serialization is based on. - vol.ID = createResp.Volume.ID - - // "create" volume #2 (same vol except for parameters) - cVol2 := vol.Copy() - cVol2.Parameters = map[string]string{"created": "again"} - // "register" volume - rVol := vol.Copy() - rVol.Parameters = map[string]string{"registered": "yup"} - - // prepare the mock client to block its calls, and get a CancelFunc - // to make sure we don't get any deadlocked client RPCs. - cancelClientRPCBlocks, err := c.setBlockChan() - must.NoError(t, err) - - // each operation goroutine will put its name in here when it completes, - // so we can wait until the whole RPC completes before checking state. - rpcDoneCh := make(chan string) - rpcDone := func(op string) { - t.Helper() - select { - case rpcDoneCh <- op: - case <-time.After(time.Second): - t.Errorf("timed out writing %q to rpcDoneCh", op) - } - } - - // start all the RPCs concurrently - var funcs multierror.Group - // create - funcs.Go(func() error { - createReq = &structs.HostVolumeCreateRequest{ - Volume: cVol2, - WriteRequest: wr, - } - createResp = structs.HostVolumeCreateResponse{} - err := srv.RPC("HostVolume.Create", createReq, &createResp) - rpcDone("create") - return err - }) - // register - funcs.Go(func() error { - registerReq := &structs.HostVolumeRegisterRequest{ - Volume: rVol, - WriteRequest: wr, - } - var registerResp structs.HostVolumeRegisterResponse - err := srv.RPC("HostVolume.Register", registerReq, ®isterResp) - rpcDone("register") - return err - }) - // delete - funcs.Go(func() error { - deleteReq := &structs.HostVolumeDeleteRequest{ - VolumeID: vol.ID, - WriteRequest: wr, - } - var deleteResp structs.HostVolumeDeleteResponse - err := srv.RPC("HostVolume.Delete", deleteReq, &deleteResp) - rpcDone("delete") - return err - }) - - // NOTE: below here, we avoid `must` methods, because a t.Fatal causes all - // the above goroutines to halt with confusing errors. - - // keep track of which operations have completed - opSet := set.From([]string{"create", "register", "delete"}) - -LOOP: - for { - if opSet.Empty() { - break // all done! - } - - // unblock a client RPC; it will tell us which one it let through. - op, err := c.unblockCurrent() - if err != nil { - t.Errorf("error unblocking client RPC: %v", err) - break - } - - if !opSet.Remove(op) { - t.Errorf("mystery unblocked RPC operation: %q", op) - break - } - - // make sure the server RPC has totally completed (and written state), - // and that the server RPC matches the unblocked client RPC. - select { - case serverOp := <-rpcDoneCh: - if serverOp != op { - t.Errorf("client RPC says %q; server RPC says %q", op, serverOp) - continue - } - case <-time.After(time.Second): - t.Error("timeout waiting for an RPC to finish") - break LOOP - } - - // get the volume to check - got, err := srv.State().HostVolumeByID(nil, vol.Namespace, vol.ID, false) - if err != nil { - t.Errorf("error reading state: %v", err) - break - } - - switch op { - - case "create": - if got == nil { - t.Error("volume should not be nil after create RPC") - continue - } - test.Eq(t, cVol2.Parameters, got.Parameters) - - case "register": - if got == nil { - t.Error("volume should not be nil after register RPC") - continue - } - test.Eq(t, rVol.Parameters, got.Parameters) - - case "delete": - test.Nil(t, got, test.Sprint("")) - } - } - - // everything should be done by now, but just in case. - cancelClientRPCBlocks() - - mErr := funcs.Wait() - test.NoError(t, helper.FlattenMultierror(mErr)) - - // all of 'em should have happened! - test.Eq(t, []string{}, opSet.Slice(), test.Sprint("remaining opSet should be empty")) -} - -// mockHostVolumeClient models client RPCs that have side-effects on the -// client host -type mockHostVolumeClient struct { - lock sync.Mutex - nextCreateResponse *cstructs.ClientHostVolumeCreateResponse - nextCreateErr error - nextRegisterErr error - nextDeleteErr error - // blockChan is used to test server->client RPC serialization. - // do not block on this channel while the main lock is held. - blockChan chan string - // shutdownCtx is an escape hatch to release any/all blocked RPCs - shutdownCtx context.Context -} - -// newMockHostVolumeClient configures a RPC-only Nomad test agent and returns a -// mockHostVolumeClient so we can send it client RPCs -func newMockHostVolumeClient(t *testing.T, srv *Server, pool string) (*mockHostVolumeClient, *structs.Node) { - t.Helper() - - mockClientEndpoint := &mockHostVolumeClient{} - - c1, cleanup := client.TestRPCOnlyClient(t, func(c *config.Config) { - c.Node.NodePool = pool - c.Node.Attributes["nomad.version"] = version.Version - c.Node.Attributes["plugins.host_volume.mkdir.version"] = "0.0.1" - c.Node.Meta["rack"] = "r1" - }, srv.config.RPCAddr, map[string]any{"HostVolume": mockClientEndpoint}) - t.Cleanup(cleanup) - - must.Wait(t, wait.InitialSuccess(wait.BoolFunc(func() bool { - node, err := srv.fsm.State().NodeByID(nil, c1.NodeID()) - if err != nil { - return false - } - if node != nil && node.Status == structs.NodeStatusReady { - return true - } - return false - }), - wait.Timeout(time.Second*5), - wait.Gap(time.Millisecond), - ), must.Sprint("client did not fingerprint before timeout")) - - return mockClientEndpoint, c1.Node() -} - -func (v *mockHostVolumeClient) setCreate( - resp *cstructs.ClientHostVolumeCreateResponse, err error) { - v.lock.Lock() - defer v.lock.Unlock() - v.nextCreateResponse = resp - v.nextCreateErr = err -} - -func (v *mockHostVolumeClient) setDelete(errMsg string) { - v.lock.Lock() - defer v.lock.Unlock() - v.nextDeleteErr = errors.New(errMsg) -} - -func (v *mockHostVolumeClient) Create( - req *cstructs.ClientHostVolumeCreateRequest, - resp *cstructs.ClientHostVolumeCreateResponse) error { - - if err := v.block("create"); err != nil { - return err - } - - v.lock.Lock() - defer v.lock.Unlock() - if v.nextCreateResponse == nil { - return nil // prevents panics from incorrect tests - } - *resp = *v.nextCreateResponse - return v.nextCreateErr -} - -func (v *mockHostVolumeClient) Register( - req *cstructs.ClientHostVolumeRegisterRequest, - resp *cstructs.ClientHostVolumeRegisterResponse) error { - - if err := v.block("register"); err != nil { - return err - } - - v.lock.Lock() - defer v.lock.Unlock() - *resp = cstructs.ClientHostVolumeRegisterResponse{} - return v.nextRegisterErr -} - -func (v *mockHostVolumeClient) Delete( - req *cstructs.ClientHostVolumeDeleteRequest, - resp *cstructs.ClientHostVolumeDeleteResponse) error { - - if err := v.block("delete"); err != nil { - return err - } - - v.lock.Lock() - defer v.lock.Unlock() - return v.nextDeleteErr -} - -func (v *mockHostVolumeClient) setBlockChan() (context.CancelFunc, error) { - v.lock.Lock() - defer v.lock.Unlock() - if v.blockChan != nil { - return nil, errors.New("blockChan already set") - } - v.blockChan = make(chan string) // no buffer to ensure blockage - // timeout context to ensure blockage is not endless - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - v.shutdownCtx = ctx - return cancel, nil -} - -func (v *mockHostVolumeClient) getBlockChan() chan string { - v.lock.Lock() - defer v.lock.Unlock() - return v.blockChan -} - -// block stalls the RPC until something (a test) runs unblockCurrent, -// if something (a test) had previously run setBlockChan to set it up. -func (v *mockHostVolumeClient) block(op string) error { - bc := v.getBlockChan() - if bc == nil { - return nil - } - select { - case bc <- op: - return nil - case <-v.shutdownCtx.Done(): - // if this happens, it'll be because unblockCurrent was not run enough - return fmt.Errorf("shutdownCtx done before blockChan unblocked: %w", v.shutdownCtx.Err()) - } -} - -// unblockCurrent reads from blockChan to unblock a running RPC. -// it must be run once per RPC that is started. -func (v *mockHostVolumeClient) unblockCurrent() (string, error) { - bc := v.getBlockChan() - if bc == nil { - return "", errors.New("no blockChan") - } - select { - case current := <-bc: - return current, nil - case <-time.After(time.Second): - return "", errors.New("unblockCurrent timeout") - } -} diff --git a/nomad/job_endpoint.go b/nomad/job_endpoint.go index cdb73632e91..60f2d67c101 100644 --- a/nomad/job_endpoint.go +++ b/nomad/job_endpoint.go @@ -12,10 +12,10 @@ import ( "strings" "time" + "github.com/armon/go-metrics" "github.com/golang/snappy" "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-memdb" - metrics "github.com/hashicorp/go-metrics/compat" "github.com/hashicorp/go-multierror" "github.com/hashicorp/go-set/v3" "github.com/hashicorp/nomad/acl" diff --git a/nomad/job_endpoint_statuses.go b/nomad/job_endpoint_statuses.go index ff8120fb9ae..74ff445620d 100644 --- a/nomad/job_endpoint_statuses.go +++ b/nomad/job_endpoint_statuses.go @@ -8,8 +8,8 @@ import ( "net/http" "time" + "github.com/armon/go-metrics" "github.com/hashicorp/go-memdb" - metrics "github.com/hashicorp/go-metrics/compat" "github.com/hashicorp/go-set/v3" "github.com/hashicorp/nomad/acl" "github.com/hashicorp/nomad/nomad/state" diff --git a/nomad/keyring_endpoint.go b/nomad/keyring_endpoint.go index 2500cfed82e..018f7f63816 100644 --- a/nomad/keyring_endpoint.go +++ b/nomad/keyring_endpoint.go @@ -8,9 +8,9 @@ import ( "fmt" "time" + "github.com/armon/go-metrics" "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-memdb" - metrics "github.com/hashicorp/go-metrics/compat" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/state" diff --git a/nomad/leader.go b/nomad/leader.go index 0ef08b74cb1..271635b01ad 100644 --- a/nomad/leader.go +++ b/nomad/leader.go @@ -13,9 +13,9 @@ import ( "sync" "time" + "github.com/armon/go-metrics" "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-memdb" - metrics "github.com/hashicorp/go-metrics/compat" "github.com/hashicorp/go-version" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/helper/uuid" diff --git a/nomad/lock/delay.go b/nomad/lock/delay.go index e407f891eeb..4e1cf89e99d 100644 --- a/nomad/lock/delay.go +++ b/nomad/lock/delay.go @@ -7,7 +7,7 @@ import ( "sync" "time" - metrics "github.com/hashicorp/go-metrics/compat" + "github.com/armon/go-metrics" "github.com/hashicorp/nomad/helper" ) diff --git a/nomad/lock/ttl.go b/nomad/lock/ttl.go index 5989a33fa3b..f0befe3b41f 100644 --- a/nomad/lock/ttl.go +++ b/nomad/lock/ttl.go @@ -7,7 +7,7 @@ import ( "sync" "time" - metrics "github.com/hashicorp/go-metrics/compat" + "github.com/armon/go-metrics" "github.com/hashicorp/nomad/helper" ) diff --git a/nomad/mock/host_volumes.go b/nomad/mock/host_volumes.go deleted file mode 100644 index d0c5b14ac02..00000000000 --- a/nomad/mock/host_volumes.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package mock - -import ( - "github.com/hashicorp/nomad/helper/uuid" - "github.com/hashicorp/nomad/nomad/structs" -) - -func HostVolumeRequest(ns string) *structs.HostVolume { - vol := &structs.HostVolume{ - Namespace: ns, - Name: "example", - PluginID: "mkdir", - NodePool: structs.NodePoolDefault, - Constraints: []*structs.Constraint{ - { - LTarget: "${meta.rack}", - RTarget: "r1", - Operand: "=", - }, - }, - RequestedCapacityMinBytes: 100000, - RequestedCapacityMaxBytes: 200000, - Parameters: map[string]string{"foo": "bar"}, - State: structs.HostVolumeStatePending, - } - return vol - -} - -func HostVolumeRequestForNode(ns string, node *structs.Node) *structs.HostVolume { - vol := HostVolumeRequest(ns) - vol.NodeID = node.ID - vol.NodePool = node.NodePool - return vol -} - -func HostVolume() *structs.HostVolume { - volID := uuid.Generate() - vol := HostVolumeRequest(structs.DefaultNamespace) - vol.ID = volID - vol.NodeID = uuid.Generate() - vol.CapacityBytes = 150000 - vol.HostPath = "/var/data/nomad/alloc_mounts/" + volID - return vol -} - -// TaskGroupHostVolumeClaim creates a claim for a given job, alloc and host -// volume request -func TaskGroupHostVolumeClaim(job *structs.Job, alloc *structs.Allocation, dhv *structs.HostVolume) *structs.TaskGroupHostVolumeClaim { - return &structs.TaskGroupHostVolumeClaim{ - ID: uuid.Generate(), - Namespace: structs.DefaultNamespace, - JobID: job.ID, - TaskGroupName: job.TaskGroups[0].Name, - AllocID: alloc.ID, - VolumeID: dhv.ID, - VolumeName: dhv.Name, - CreateIndex: 1000, - ModifyIndex: 1000, - } -} diff --git a/nomad/namespace_endpoint.go b/nomad/namespace_endpoint.go index b9812dac528..61452345b11 100644 --- a/nomad/namespace_endpoint.go +++ b/nomad/namespace_endpoint.go @@ -7,8 +7,8 @@ import ( "fmt" "time" + "github.com/armon/go-metrics" "github.com/hashicorp/go-memdb" - metrics "github.com/hashicorp/go-metrics/compat" "github.com/hashicorp/go-multierror" "github.com/hashicorp/nomad/nomad/state" diff --git a/nomad/node_endpoint.go b/nomad/node_endpoint.go index ed9ca3b914e..3f626464cef 100644 --- a/nomad/node_endpoint.go +++ b/nomad/node_endpoint.go @@ -13,9 +13,9 @@ import ( "sync" "time" + "github.com/armon/go-metrics" "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-memdb" - metrics "github.com/hashicorp/go-metrics/compat" "github.com/hashicorp/go-multierror" vapi "github.com/hashicorp/vault/api" "golang.org/x/sync/errgroup" diff --git a/nomad/node_endpoint_test.go b/nomad/node_endpoint_test.go index 8de03e22be7..3a3d3acc250 100644 --- a/nomad/node_endpoint_test.go +++ b/nomad/node_endpoint_test.go @@ -4198,15 +4198,13 @@ func TestClientEndpoint_DeriveVaultToken_Bad(t *testing.T) { t.Fatalf("err: %v", err) } - badSecret := uuid.Generate() req := &structs.DeriveVaultTokenRequest{ NodeID: node.ID, - SecretID: badSecret, + SecretID: uuid.Generate(), AllocID: alloc.ID, Tasks: tasks, QueryOptions: structs.QueryOptions{ - Region: "global", - AuthToken: badSecret, + Region: "global", }, } @@ -4313,8 +4311,7 @@ func TestClientEndpoint_DeriveVaultToken(t *testing.T) { AllocID: alloc.ID, Tasks: tasks, QueryOptions: structs.QueryOptions{ - Region: "global", - AuthToken: node.SecretID, + Region: "global", }, } @@ -4398,8 +4395,7 @@ func TestClientEndpoint_DeriveVaultToken_VaultError(t *testing.T) { AllocID: alloc.ID, Tasks: tasks, QueryOptions: structs.QueryOptions{ - Region: "global", - AuthToken: node.SecretID, + Region: "global", }, } @@ -4522,14 +4518,11 @@ func TestClientEndpoint_DeriveSIToken(t *testing.T) { r.NoError(err) request := &structs.DeriveSITokenRequest{ - NodeID: node.ID, - SecretID: node.SecretID, - AllocID: alloc.ID, - Tasks: []string{sidecarTask.Name}, - QueryOptions: structs.QueryOptions{ - Region: "global", - AuthToken: node.SecretID, - }, + NodeID: node.ID, + SecretID: node.SecretID, + AllocID: alloc.ID, + Tasks: []string{sidecarTask.Name}, + QueryOptions: structs.QueryOptions{Region: "global"}, } var response structs.DeriveSITokenResponse @@ -4583,14 +4576,11 @@ func TestClientEndpoint_DeriveSIToken_ConsulError(t *testing.T) { r.NoError(err) request := &structs.DeriveSITokenRequest{ - NodeID: node.ID, - SecretID: node.SecretID, - AllocID: alloc.ID, - Tasks: []string{sidecarTask.Name}, - QueryOptions: structs.QueryOptions{ - Region: "global", - AuthToken: node.SecretID, - }, + NodeID: node.ID, + SecretID: node.SecretID, + AllocID: alloc.ID, + Tasks: []string{sidecarTask.Name}, + QueryOptions: structs.QueryOptions{Region: "global"}, } var response structs.DeriveSITokenResponse diff --git a/nomad/node_pool_endpoint.go b/nomad/node_pool_endpoint.go index 25a9292dfee..efc88f39e53 100644 --- a/nomad/node_pool_endpoint.go +++ b/nomad/node_pool_endpoint.go @@ -9,8 +9,8 @@ import ( "net/http" "time" + metrics "github.com/armon/go-metrics" "github.com/hashicorp/go-memdb" - metrics "github.com/hashicorp/go-metrics/compat" multierror "github.com/hashicorp/go-multierror" "github.com/hashicorp/nomad/acl" "github.com/hashicorp/nomad/nomad/state" diff --git a/nomad/periodic_endpoint.go b/nomad/periodic_endpoint.go index bb70ae908b9..1baf9ddfa9b 100644 --- a/nomad/periodic_endpoint.go +++ b/nomad/periodic_endpoint.go @@ -7,9 +7,9 @@ import ( "fmt" "time" + "github.com/armon/go-metrics" "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-memdb" - metrics "github.com/hashicorp/go-metrics/compat" "github.com/hashicorp/nomad/acl" "github.com/hashicorp/nomad/nomad/structs" diff --git a/nomad/periodic_test.go b/nomad/periodic_test.go index 62a9dd68291..1718c434ca9 100644 --- a/nomad/periodic_test.go +++ b/nomad/periodic_test.go @@ -19,7 +19,6 @@ import ( "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/testutil" - "github.com/shoenig/test/must" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -89,13 +88,6 @@ func (m *MockJobEvalDispatcher) dispatchedJobs(parent *structs.Job) []*structs.J return jobs } -func (m *MockJobEvalDispatcher) hasJob(id structs.NamespacedID) bool { - m.lock.Lock() - defer m.lock.Unlock() - _, ok := m.Jobs[id] - return ok -} - type times []time.Time func (t times) Len() int { return len(t) } @@ -270,32 +262,39 @@ func TestPeriodicDispatch_Add_TriggersUpdate(t *testing.T) { job := testPeriodicJob(time.Now().Add(10 * time.Second)) // Add it. - must.NoError(t, p.Add(job)) + if err := p.Add(job); err != nil { + t.Fatalf("Add failed %v", err) + } // Update it to be sooner and re-add. expected := time.Now().Round(1 * time.Second).Add(1 * time.Second) - job = job.Copy() job.Periodic.Spec = fmt.Sprintf("%d", expected.Unix()) - must.NoError(t, p.Add(job)) + if err := p.Add(job); err != nil { + t.Fatalf("Add failed %v", err) + } // Check that nothing is created. tuple := structs.NamespacedID{ ID: job.ID, Namespace: job.Namespace, } - must.False(t, m.hasJob(tuple), - must.Sprint("periodic dispatcher created eval too early")) + if _, ok := m.Jobs[tuple]; ok { + t.Fatalf("periodic dispatcher created eval at the wrong time") + } time.Sleep(2 * time.Second) // Check that job was launched correctly. times, err := m.LaunchTimes(p, job.Namespace, job.ID) - must.NoError(t, err, - must.Sprint("failed to get launch times for job")) - must.Len(t, 1, times, - must.Sprint("incorrect number of launch times for job")) - must.Eq(t, expected, times[0], - must.Sprint("periodic dispatcher created eval for wrong time")) + if err != nil { + t.Fatalf("failed to get launch times for job %q", job.ID) + } + if len(times) != 1 { + t.Fatalf("incorrect number of launch times for job %q", job.ID) + } + if times[0] != expected { + t.Fatalf("periodic dispatcher created eval for time %v; want %v", times[0], expected) + } } func TestPeriodicDispatch_Remove_Untracked(t *testing.T) { diff --git a/nomad/plan_apply.go b/nomad/plan_apply.go index 5fff21edcf9..e2e76894a76 100644 --- a/nomad/plan_apply.go +++ b/nomad/plan_apply.go @@ -9,9 +9,9 @@ import ( "runtime" "time" + metrics "github.com/armon/go-metrics" log "github.com/hashicorp/go-hclog" memdb "github.com/hashicorp/go-memdb" - metrics "github.com/hashicorp/go-metrics/compat" multierror "github.com/hashicorp/go-multierror" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/state" diff --git a/nomad/plan_apply_node_tracker.go b/nomad/plan_apply_node_tracker.go index c79d02e1033..9f080109c1a 100644 --- a/nomad/plan_apply_node_tracker.go +++ b/nomad/plan_apply_node_tracker.go @@ -7,8 +7,8 @@ import ( "fmt" "time" + "github.com/armon/go-metrics" "github.com/hashicorp/go-hclog" - metrics "github.com/hashicorp/go-metrics/compat" lru "github.com/hashicorp/golang-lru/v2" "github.com/hashicorp/nomad/helper" "golang.org/x/time/rate" diff --git a/nomad/plan_endpoint.go b/nomad/plan_endpoint.go index 79b5026d5df..465481d1ff9 100644 --- a/nomad/plan_endpoint.go +++ b/nomad/plan_endpoint.go @@ -7,8 +7,8 @@ import ( "fmt" "time" + "github.com/armon/go-metrics" "github.com/hashicorp/go-hclog" - metrics "github.com/hashicorp/go-metrics/compat" "github.com/hashicorp/nomad/nomad/structs" ) diff --git a/nomad/plan_queue.go b/nomad/plan_queue.go index ef132f85490..bd9f23e6387 100644 --- a/nomad/plan_queue.go +++ b/nomad/plan_queue.go @@ -9,7 +9,7 @@ import ( "sync" "time" - metrics "github.com/hashicorp/go-metrics/compat" + metrics "github.com/armon/go-metrics" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/nomad/structs" ) diff --git a/nomad/rpc.go b/nomad/rpc.go index 455c7162e58..b32cbfaab28 100644 --- a/nomad/rpc.go +++ b/nomad/rpc.go @@ -17,10 +17,10 @@ import ( "strings" "time" + metrics "github.com/armon/go-metrics" "github.com/hashicorp/go-connlimit" log "github.com/hashicorp/go-hclog" memdb "github.com/hashicorp/go-memdb" - metrics "github.com/hashicorp/go-metrics/compat" "github.com/hashicorp/go-msgpack/v2/codec" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/helper/pool" diff --git a/nomad/rpc_rate_metrics.go b/nomad/rpc_rate_metrics.go index 6b60e28316f..8caf7a8b9bb 100644 --- a/nomad/rpc_rate_metrics.go +++ b/nomad/rpc_rate_metrics.go @@ -4,7 +4,7 @@ package nomad import ( - metrics "github.com/hashicorp/go-metrics/compat" + "github.com/armon/go-metrics" "github.com/hashicorp/nomad/nomad/structs" ) diff --git a/nomad/scaling_endpoint.go b/nomad/scaling_endpoint.go index 8ebafd099f6..d7b3766fd00 100644 --- a/nomad/scaling_endpoint.go +++ b/nomad/scaling_endpoint.go @@ -7,9 +7,9 @@ import ( "strings" "time" + "github.com/armon/go-metrics" "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-memdb" - metrics "github.com/hashicorp/go-metrics/compat" "github.com/hashicorp/nomad/acl" "github.com/hashicorp/nomad/nomad/state" "github.com/hashicorp/nomad/nomad/structs" diff --git a/nomad/search_endpoint.go b/nomad/search_endpoint.go index f430b9c213c..4a66e939238 100644 --- a/nomad/search_endpoint.go +++ b/nomad/search_endpoint.go @@ -9,9 +9,9 @@ import ( "strings" "time" + "github.com/armon/go-metrics" "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-memdb" - metrics "github.com/hashicorp/go-metrics/compat" "github.com/hashicorp/nomad/acl" "github.com/hashicorp/nomad/nomad/state" @@ -41,7 +41,6 @@ var ( structs.ScalingPolicies, structs.Variables, structs.Namespaces, - structs.HostVolumes, } ) @@ -85,8 +84,6 @@ func (s *Search) getPrefixMatches(iter memdb.ResultIterator, prefix string) ([]s id = t.ID case *structs.CSIVolume: id = t.ID - case *structs.HostVolume: - id = t.ID case *structs.ScalingPolicy: id = t.ID case *structs.Namespace: @@ -408,8 +405,6 @@ func getResourceIter(context structs.Context, aclObj *acl.ACL, namespace, prefix return store.ScalingPoliciesByIDPrefix(ws, namespace, prefix) case structs.Volumes: return store.CSIVolumesByIDPrefix(ws, namespace, prefix) - case structs.HostVolumes: - return store.HostVolumesByIDPrefix(ws, namespace, prefix, state.SortDefault) case structs.Namespaces: iter, err := store.NamespacesByNamePrefix(ws, prefix) if err != nil { @@ -689,8 +684,6 @@ func sufficientSearchPerms(aclObj *acl.ACL, namespace string, context structs.Co acl.NamespaceCapabilityCSIReadVolume, acl.NamespaceCapabilityListJobs, acl.NamespaceCapabilityReadJob)(aclObj, namespace) - case structs.HostVolumes: - return acl.NamespaceValidator(acl.NamespaceCapabilityHostVolumeRead)(aclObj, namespace) case structs.Variables: return aclObj.AllowVariableSearch(namespace) case structs.Plugins: @@ -781,8 +774,7 @@ func (s *Search) FuzzySearch(args *structs.FuzzySearchRequest, reply *structs.Fu for _, ctx := range prefixContexts { switch ctx { // only apply on the types that use UUID prefix searching - case structs.Evals, structs.Deployments, structs.ScalingPolicies, - structs.Volumes, structs.HostVolumes, structs.Quotas, structs.Recommendations: + case structs.Evals, structs.Deployments, structs.ScalingPolicies, structs.Volumes, structs.Quotas, structs.Recommendations: iter, err := getResourceIter(ctx, aclObj, namespace, roundUUIDDownIfOdd(args.Prefix, args.Context), ws, state) if err != nil { if !s.silenceError(err) { @@ -798,9 +790,7 @@ func (s *Search) FuzzySearch(args *structs.FuzzySearchRequest, reply *structs.Fu for _, ctx := range fuzzyContexts { switch ctx { // skip the types that use UUID prefix searching - case structs.Evals, structs.Deployments, structs.ScalingPolicies, - structs.Volumes, structs.HostVolumes, structs.Quotas, - structs.Recommendations: + case structs.Evals, structs.Deployments, structs.ScalingPolicies, structs.Volumes, structs.Quotas, structs.Recommendations: continue default: iter, err := getFuzzyResourceIterator(ctx, aclObj, namespace, ws, state) @@ -937,11 +927,6 @@ func filteredSearchContexts(aclObj *acl.ACL, namespace string, context structs.C if volRead { available = append(available, c) } - case structs.HostVolumes: - if acl.NamespaceValidator( - acl.NamespaceCapabilityHostVolumeRead)(aclObj, namespace) { - available = append(available, c) - } case structs.Plugins: if aclObj.AllowPluginList() { available = append(available, c) diff --git a/nomad/search_endpoint_test.go b/nomad/search_endpoint_test.go index 5f9695f3d20..e06688ac927 100644 --- a/nomad/search_endpoint_test.go +++ b/nomad/search_endpoint_test.go @@ -1039,53 +1039,6 @@ func TestSearch_PrefixSearch_CSIVolume(t *testing.T) { require.False(t, resp.Truncations[structs.Volumes]) } -func TestSearch_PrefixSearch_HostVolume(t *testing.T) { - ci.Parallel(t) - - srv, cleanup := TestServer(t, func(c *Config) { - c.NumSchedulers = 0 - }) - defer cleanup() - codec := rpcClient(t, srv) - testutil.WaitForLeader(t, srv.RPC) - - store := srv.fsm.State() - index, _ := store.LatestIndex() - - node := mock.Node() - index++ - must.NoError(t, store.UpsertNode(structs.MsgTypeTestSetup, index, node)) - - id := uuid.Generate() - index++ - err := store.UpsertHostVolume(index, &structs.HostVolume{ - ID: id, - Name: "example", - Namespace: structs.DefaultNamespace, - PluginID: "glade", - NodeID: node.ID, - NodePool: node.NodePool, - }) - must.NoError(t, err) - - req := &structs.SearchRequest{ - Prefix: id[:6], - Context: structs.HostVolumes, - QueryOptions: structs.QueryOptions{ - Region: "global", - Namespace: structs.DefaultNamespace, - }, - } - - var resp structs.SearchResponse - must.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp)) - - must.Len(t, 1, resp.Matches[structs.HostVolumes]) - must.Len(t, 0, resp.Matches[structs.Volumes]) - must.Eq(t, id, resp.Matches[structs.HostVolumes][0]) - must.False(t, resp.Truncations[structs.HostVolumes]) -} - func TestSearch_PrefixSearch_Namespace(t *testing.T) { ci.Parallel(t) @@ -1979,52 +1932,6 @@ func TestSearch_FuzzySearch_CSIVolume(t *testing.T) { require.False(t, resp.Truncations[structs.Volumes]) } -func TestSearch_FuzzySearch_HostVolume(t *testing.T) { - ci.Parallel(t) - - srv, cleanup := TestServer(t, func(c *Config) { - c.NumSchedulers = 0 - }) - defer cleanup() - codec := rpcClient(t, srv) - testutil.WaitForLeader(t, srv.RPC) - - store := srv.fsm.State() - index, _ := store.LatestIndex() - - node := mock.Node() - index++ - must.NoError(t, store.UpsertNode(structs.MsgTypeTestSetup, index, node)) - - id := uuid.Generate() - index++ - err := store.UpsertHostVolume(index, &structs.HostVolume{ - ID: id, - Name: "example", - Namespace: structs.DefaultNamespace, - PluginID: "glade", - NodeID: node.ID, - NodePool: node.NodePool, - }) - must.NoError(t, err) - - req := &structs.FuzzySearchRequest{ - Text: id[0:3], // volumes are prefix searched - Context: structs.HostVolumes, - QueryOptions: structs.QueryOptions{ - Region: "global", - Namespace: structs.DefaultNamespace, - }, - } - - var resp structs.FuzzySearchResponse - must.NoError(t, msgpackrpc.CallWithCodec(codec, "Search.FuzzySearch", req, &resp)) - - must.Len(t, 1, resp.Matches[structs.HostVolumes]) - must.Eq(t, id, resp.Matches[structs.HostVolumes][0].ID) - must.False(t, resp.Truncations[structs.HostVolumes]) -} - func TestSearch_FuzzySearch_Namespace(t *testing.T) { ci.Parallel(t) diff --git a/nomad/server.go b/nomad/server.go index f77243d8867..32638b2d304 100644 --- a/nomad/server.go +++ b/nomad/server.go @@ -21,9 +21,9 @@ import ( "sync/atomic" "time" + "github.com/armon/go-metrics" consulapi "github.com/hashicorp/consul/api" log "github.com/hashicorp/go-hclog" - metrics "github.com/hashicorp/go-metrics/compat" multierror "github.com/hashicorp/go-multierror" "github.com/hashicorp/raft" autopilot "github.com/hashicorp/raft-autopilot" @@ -1356,8 +1356,6 @@ func (s *Server) setupRpcServer(server *rpc.Server, ctx *RPCContext) { _ = server.Register(NewStatusEndpoint(s, ctx)) _ = server.Register(NewSystemEndpoint(s, ctx)) _ = server.Register(NewVariablesEndpoint(s, ctx, s.encrypter)) - _ = server.Register(NewHostVolumeEndpoint(s, ctx)) - _ = server.Register(NewClientHostVolumeEndpoint(s, ctx)) // Register non-streaming diff --git a/nomad/service_registration_endpoint.go b/nomad/service_registration_endpoint.go index 1ed805f6734..eede684ce8c 100644 --- a/nomad/service_registration_endpoint.go +++ b/nomad/service_registration_endpoint.go @@ -11,8 +11,8 @@ import ( "strings" "time" + "github.com/armon/go-metrics" "github.com/hashicorp/go-memdb" - metrics "github.com/hashicorp/go-metrics/compat" "github.com/hashicorp/go-multierror" "github.com/hashicorp/go-set/v3" diff --git a/nomad/state/deployment_events_test.go b/nomad/state/deployment_events_test.go index 2946679fb25..156ed73df49 100644 --- a/nomad/state/deployment_events_test.go +++ b/nomad/state/deployment_events_test.go @@ -83,7 +83,7 @@ func WaitForEvents(t *testing.T, s *StateStore, index uint64, minEvents int, tim } maxAttempts-- if maxAttempts == 0 { - require.Failf(t, "reached max attempts waiting for desired event count", "count %d got: %+v", len(got), got) + require.Failf(t, "reached max attempts waiting for desired event count", "count %d", len(got)) } time.Sleep(10 * time.Millisecond) } diff --git a/nomad/state/events.go b/nomad/state/events.go index 29abd6d9fab..827b5ff931b 100644 --- a/nomad/state/events.go +++ b/nomad/state/events.go @@ -41,11 +41,6 @@ var MsgTypeEvents = map[structs.MessageType]string{ structs.ServiceRegistrationUpsertRequestType: structs.TypeServiceRegistration, structs.ServiceRegistrationDeleteByIDRequestType: structs.TypeServiceDeregistration, structs.ServiceRegistrationDeleteByNodeIDRequestType: structs.TypeServiceDeregistration, - structs.HostVolumeRegisterRequestType: structs.TypeHostVolumeRegistered, - structs.HostVolumeDeleteRequestType: structs.TypeHostVolumeDeleted, - structs.CSIVolumeRegisterRequestType: structs.TypeCSIVolumeRegistered, - structs.CSIVolumeDeregisterRequestType: structs.TypeCSIVolumeDeregistered, - structs.CSIVolumeClaimRequestType: structs.TypeCSIVolumeClaim, } func eventsFromChanges(tx ReadTxn, changes Changes) *structs.Events { @@ -186,57 +181,6 @@ func eventFromChange(change memdb.Change) (structs.Event, bool) { Service: before, }, }, true - case TableHostVolumes: - before, ok := change.Before.(*structs.HostVolume) - if !ok { - return structs.Event{}, false - } - return structs.Event{ - Topic: structs.TopicHostVolume, - FilterKeys: []string{ - before.ID, - before.Name, - before.PluginID, - }, - Namespace: before.Namespace, - Payload: &structs.HostVolumeEvent{ - Volume: before, - }, - }, true - case TableCSIVolumes: - before, ok := change.Before.(*structs.CSIVolume) - if !ok { - return structs.Event{}, false - } - return structs.Event{ - Topic: structs.TopicCSIVolume, - Key: before.ID, - FilterKeys: []string{ - before.ID, - before.Name, - before.PluginID, - }, - Namespace: before.Namespace, - Payload: &structs.CSIVolumeEvent{ - Volume: before, - }, - }, true - case TableCSIPlugins: - // note: there is no CSIPlugin event type, because CSI plugins don't - // have their own write RPCs; they are always created/removed via - // node updates - before, ok := change.Before.(*structs.CSIPlugin) - if !ok { - return structs.Event{}, false - } - return structs.Event{ - Topic: structs.TopicCSIPlugin, - Key: before.ID, - FilterKeys: []string{before.ID}, - Payload: &structs.CSIPluginEvent{ - Plugin: before, - }, - }, true } return structs.Event{}, false } @@ -414,58 +358,6 @@ func eventFromChange(change memdb.Change) (structs.Event, bool) { Service: after, }, }, true - case TableHostVolumes: - after, ok := change.After.(*structs.HostVolume) - if !ok { - return structs.Event{}, false - } - return structs.Event{ - Topic: structs.TopicHostVolume, - Key: after.ID, - FilterKeys: []string{ - after.ID, - after.Name, - after.PluginID, - }, - Namespace: after.Namespace, - Payload: &structs.HostVolumeEvent{ - Volume: after, - }, - }, true - case TableCSIVolumes: - after, ok := change.After.(*structs.CSIVolume) - if !ok { - return structs.Event{}, false - } - return structs.Event{ - Topic: structs.TopicCSIVolume, - Key: after.ID, - FilterKeys: []string{ - after.ID, - after.Name, - after.PluginID, - }, - Namespace: after.Namespace, - Payload: &structs.CSIVolumeEvent{ - Volume: after, - }, - }, true - case TableCSIPlugins: - // note: there is no CSIPlugin event type, because CSI plugins don't - // have their own write RPCs; they are always created/removed via - // node updates - after, ok := change.After.(*structs.CSIPlugin) - if !ok { - return structs.Event{}, false - } - return structs.Event{ - Topic: structs.TopicCSIPlugin, - Key: after.ID, - FilterKeys: []string{after.ID}, - Payload: &structs.CSIPluginEvent{ - Plugin: after, - }, - }, true } return structs.Event{}, false diff --git a/nomad/state/events_test.go b/nomad/state/events_test.go index 47fc17f75d6..8e15e27fb9d 100644 --- a/nomad/state/events_test.go +++ b/nomad/state/events_test.go @@ -1215,126 +1215,6 @@ func Test_eventsFromChanges_ACLBindingRule(t *testing.T) { must.Eq(t, bindingRule, receivedDeleteChange.Events[0].Payload.(*structs.ACLBindingRuleEvent).ACLBindingRule) } -func TestEvents_HostVolumes(t *testing.T) { - ci.Parallel(t) - store := TestStateStoreCfg(t, TestStateStorePublisher(t)) - defer store.StopEventBroker() - - index, err := store.LatestIndex() - must.NoError(t, err) - - node := mock.Node() - index++ - must.NoError(t, store.UpsertNode(structs.NodeRegisterRequestType, index, node, NodeUpsertWithNodePool)) - - vol := mock.HostVolume() - vol.NodeID = node.ID - index++ - must.NoError(t, store.UpsertHostVolume(index, vol)) - - node = node.Copy() - node.HostVolumes = map[string]*structs.ClientHostVolumeConfig{vol.Name: { - Name: vol.Name, - Path: "/var/nomad/alloc_mounts" + uuid.Generate(), - }} - index++ - must.NoError(t, store.UpsertNode(structs.NodeRegisterRequestType, index, node, NodeUpsertWithNodePool)) - - index++ - must.NoError(t, store.DeleteHostVolume(index, vol.Namespace, vol.ID)) - - events := WaitForEvents(t, store, 0, 5, 1*time.Second) - must.Len(t, 5, events) - must.Eq(t, "Node", events[0].Topic) - must.Eq(t, "NodeRegistration", events[0].Type) - must.Eq(t, "HostVolume", events[1].Topic) - must.Eq(t, "HostVolumeRegistered", events[1].Type) - must.Eq(t, "Node", events[2].Topic) - must.Eq(t, "NodeRegistration", events[2].Type) - must.Eq(t, "HostVolume", events[3].Topic) - must.Eq(t, "NodeRegistration", events[3].Type) - must.Eq(t, "HostVolume", events[4].Topic) - must.Eq(t, "HostVolumeDeleted", events[4].Type) -} - -func TestEvents_CSIVolumes(t *testing.T) { - ci.Parallel(t) - store := TestStateStoreCfg(t, TestStateStorePublisher(t)) - defer store.StopEventBroker() - - index, err := store.LatestIndex() - must.NoError(t, err) - - plugin := mock.CSIPlugin() - vol := mock.CSIVolume(plugin) - - index++ - must.NoError(t, store.UpsertCSIVolume(index, []*structs.CSIVolume{vol})) - - alloc := mock.Alloc() - index++ - store.UpsertAllocs(structs.MsgTypeTestSetup, index, []*structs.Allocation{alloc}) - - claim := &structs.CSIVolumeClaim{ - AllocationID: alloc.ID, - NodeID: uuid.Generate(), - Mode: structs.CSIVolumeClaimGC, - AccessMode: structs.CSIVolumeAccessModeSingleNodeWriter, - AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem, - State: structs.CSIVolumeClaimStateReadyToFree, - } - index++ - must.NoError(t, store.CSIVolumeClaim(index, time.Now().UnixNano(), vol.Namespace, vol.ID, claim)) - - index++ - must.NoError(t, store.CSIVolumeDeregister(index, vol.Namespace, []string{vol.ID}, false)) - - events := WaitForEvents(t, store, 0, 3, 1*time.Second) - must.Len(t, 3, events) - must.Eq(t, "CSIVolume", events[0].Topic) - must.Eq(t, "CSIVolumeRegistered", events[0].Type) - must.Eq(t, "CSIVolume", events[1].Topic) - must.Eq(t, "CSIVolumeClaim", events[1].Type) - must.Eq(t, "CSIVolume", events[2].Topic) - must.Eq(t, "CSIVolumeDeregistered", events[2].Type) - -} - -func TestEvents_CSIPlugins(t *testing.T) { - ci.Parallel(t) - store := TestStateStoreCfg(t, TestStateStorePublisher(t)) - defer store.StopEventBroker() - - index, err := store.LatestIndex() - must.NoError(t, err) - - node := mock.Node() - plugin := mock.CSIPlugin() - - index++ - must.NoError(t, store.UpsertNode(structs.NodeRegisterRequestType, index, node)) - - node = node.Copy() - node.CSINodePlugins = map[string]*structs.CSIInfo{ - plugin.ID: { - PluginID: plugin.ID, - Healthy: true, - UpdateTime: time.Now(), - }, - } - index++ - must.NoError(t, store.UpsertNode(structs.NodeRegisterRequestType, index, node)) - - events := WaitForEvents(t, store, 0, 3, 1*time.Second) - must.Len(t, 3, events) - must.Eq(t, "Node", events[0].Topic) - must.Eq(t, "NodeRegistration", events[0].Type) - must.Eq(t, "Node", events[1].Topic) - must.Eq(t, "NodeRegistration", events[1].Type) - must.Eq(t, "CSIPlugin", events[2].Topic) - must.Eq(t, "NodeRegistration", events[2].Type) -} - func requireNodeRegistrationEventEqual(t *testing.T, want, got structs.Event) { t.Helper() diff --git a/nomad/state/schema.go b/nomad/state/schema.go index 88fb1598b03..2c798b06fbe 100644 --- a/nomad/state/schema.go +++ b/nomad/state/schema.go @@ -15,21 +15,17 @@ import ( const ( tableIndex = "index" - TableNamespaces = "namespaces" - TableNodePools = "node_pools" - TableServiceRegistrations = "service_registrations" - TableVariables = "variables" - TableVariablesQuotas = "variables_quota" - TableRootKeys = "root_keys" - TableACLRoles = "acl_roles" - TableACLAuthMethods = "acl_auth_methods" - TableACLBindingRules = "acl_binding_rules" - TableAllocs = "allocs" - TableJobSubmission = "job_submission" - TableHostVolumes = "host_volumes" - TableCSIVolumes = "csi_volumes" - TableCSIPlugins = "csi_plugins" - TableTaskGroupHostVolumeClaim = "task_volume" + TableNamespaces = "namespaces" + TableNodePools = "node_pools" + TableServiceRegistrations = "service_registrations" + TableVariables = "variables" + TableVariablesQuotas = "variables_quota" + TableRootKeys = "root_keys" + TableACLRoles = "acl_roles" + TableACLAuthMethods = "acl_auth_methods" + TableACLBindingRules = "acl_binding_rules" + TableAllocs = "allocs" + TableJobSubmission = "job_submission" ) const ( @@ -45,7 +41,6 @@ const ( indexName = "name" indexSigningKey = "signing_key" indexAuthMethod = "auth_method" - indexNodePool = "node_pool" ) var ( @@ -102,8 +97,6 @@ func init() { aclRolesTableSchema, aclAuthMethodsTableSchema, bindingRulesTableSchema, - hostVolumeTableSchema, - taskGroupHostVolumeClaimSchema, }...) } @@ -168,8 +161,8 @@ func nodeTableSchema() *memdb.TableSchema { Field: "SecretID", }, }, - indexNodePool: { - Name: indexNodePool, + "node_pool": { + Name: "node_pool", AllowMissing: false, Unique: false, Indexer: &memdb.StringFieldIndex{ @@ -851,8 +844,8 @@ func vaultAccessorTableSchema() *memdb.TableSchema { }, }, - indexNodeID: { - Name: indexNodeID, + "node_id": { + Name: "node_id", AllowMissing: false, Unique: false, Indexer: &memdb.StringFieldIndex{ @@ -889,8 +882,8 @@ func siTokenAccessorTableSchema() *memdb.TableSchema { }, }, - indexNodeID: { - Name: indexNodeID, + "node_id": { + Name: "node_id", AllowMissing: false, Unique: false, Indexer: &memdb.StringFieldIndex{ @@ -1154,7 +1147,7 @@ func clusterMetaTableSchema() *memdb.TableSchema { // CSIVolumes are identified by id globally, and searchable by driver func csiVolumeTableSchema() *memdb.TableSchema { return &memdb.TableSchema{ - Name: TableCSIVolumes, + Name: "csi_volumes", Indexes: map[string]*memdb.IndexSchema{ "id": { Name: "id", @@ -1186,7 +1179,7 @@ func csiVolumeTableSchema() *memdb.TableSchema { // CSIPlugins are identified by id globally, and searchable by driver func csiPluginTableSchema() *memdb.TableSchema { return &memdb.TableSchema{ - Name: TableCSIPlugins, + Name: "csi_plugins", Indexes: map[string]*memdb.IndexSchema{ "id": { Name: "id", @@ -1650,96 +1643,3 @@ func bindingRulesTableSchema() *memdb.TableSchema { }, } } - -// HostVolumes are identified by id globally, and searchable by namespace+name, -// node, or node_pool -func hostVolumeTableSchema() *memdb.TableSchema { - return &memdb.TableSchema{ - Name: TableHostVolumes, - Indexes: map[string]*memdb.IndexSchema{ - indexID: { - Name: indexID, - AllowMissing: false, - Unique: true, - Indexer: &memdb.CompoundIndex{ - Indexes: []memdb.Indexer{ - &memdb.StringFieldIndex{ - Field: "Namespace", - }, - &memdb.StringFieldIndex{ - Field: "ID", - Lowercase: true, - }, - }, - }, - }, - indexName: { - Name: indexName, - AllowMissing: false, - Unique: false, - Indexer: &memdb.CompoundIndex{ - Indexes: []memdb.Indexer{ - &memdb.StringFieldIndex{ - Field: "Namespace", - }, - &memdb.StringFieldIndex{ - Field: "Name", - }, - }, - }, - }, - indexNodeID: { - Name: indexNodeID, - AllowMissing: false, - Unique: false, - Indexer: &memdb.StringFieldIndex{ - Field: "NodeID", - Lowercase: true, - }, - }, - indexNodePool: { - Name: indexNodePool, - AllowMissing: false, - Unique: false, - Indexer: &memdb.StringFieldIndex{ - Field: "NodePool", - }, - }, - }, - } -} - -func taskGroupHostVolumeClaimSchema() *memdb.TableSchema { - return &memdb.TableSchema{ - Name: TableTaskGroupHostVolumeClaim, - Indexes: map[string]*memdb.IndexSchema{ - indexID: { - Name: indexID, - AllowMissing: false, - Unique: true, - - // Use a compound index so the combination of (Namespace, JobID, TaskGroupName, - // VolumeID) is uniquely identifying - Indexer: &memdb.CompoundIndex{ - Indexes: []memdb.Indexer{ - &memdb.StringFieldIndex{ - Field: "Namespace", - }, - - &memdb.StringFieldIndex{ - Field: "JobID", - }, - - &memdb.StringFieldIndex{ - Field: "TaskGroupName", - }, - - &memdb.StringFieldIndex{ - Field: "VolumeID", - }, - }, - }, - }, - }, - } -} diff --git a/nomad/state/state_store.go b/nomad/state/state_store.go index 7d52cf7c299..9313e68fe87 100644 --- a/nomad/state/state_store.go +++ b/nomad/state/state_store.go @@ -20,7 +20,6 @@ import ( "github.com/hashicorp/go-set/v3" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/helper/pointer" - "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/lib/lang" "github.com/hashicorp/nomad/nomad/stream" "github.com/hashicorp/nomad/nomad/structs" @@ -1040,9 +1039,6 @@ func upsertNodeTxn(txn *txn, index uint64, node *structs.Node) error { if err := upsertCSIPluginsForNode(txn, node, index); err != nil { return fmt.Errorf("csi plugin update failed: %v", err) } - if err := upsertHostVolumeForNode(txn, node, index); err != nil { - return fmt.Errorf("dynamic host volumes update failed: %v", err) - } return nil } @@ -1390,7 +1386,7 @@ func appendNodeEvents(index uint64, node *structs.Node, events []*structs.NodeEv func upsertCSIPluginsForNode(txn *txn, node *structs.Node, index uint64) error { upsertFn := func(info *structs.CSIInfo) error { - raw, err := txn.First(TableCSIPlugins, "id", info.PluginID) + raw, err := txn.First("csi_plugins", "id", info.PluginID) if err != nil { return fmt.Errorf("csi_plugin lookup error: %s %v", info.PluginID, err) } @@ -1421,7 +1417,7 @@ func upsertCSIPluginsForNode(txn *txn, node *structs.Node, index uint64) error { plug.ModifyIndex = index - err = txn.Insert(TableCSIPlugins, plug) + err = txn.Insert("csi_plugins", plug) if err != nil { return fmt.Errorf("csi_plugins insert error: %v", err) } @@ -1450,7 +1446,7 @@ func upsertCSIPluginsForNode(txn *txn, node *structs.Node, index uint64) error { // remove the client node from any plugin that's not // running on it. - iter, err := txn.Get(TableCSIPlugins, "id") + iter, err := txn.Get("csi_plugins", "id") if err != nil { return fmt.Errorf("csi_plugins lookup failed: %v", err) } @@ -1495,7 +1491,7 @@ func upsertCSIPluginsForNode(txn *txn, node *structs.Node, index uint64) error { } } - if err := txn.Insert("index", &IndexEntry{TableCSIPlugins, index}); err != nil { + if err := txn.Insert("index", &IndexEntry{"csi_plugins", index}); err != nil { return fmt.Errorf("index update failed: %v", err) } @@ -1517,7 +1513,7 @@ func deleteNodeCSIPlugins(txn *txn, node *structs.Node, index uint64) error { } for id := range names { - raw, err := txn.First(TableCSIPlugins, "id", id) + raw, err := txn.First("csi_plugins", "id", id) if err != nil { return fmt.Errorf("csi_plugins lookup error %s: %v", id, err) } @@ -1538,7 +1534,7 @@ func deleteNodeCSIPlugins(txn *txn, node *structs.Node, index uint64) error { } } - if err := txn.Insert("index", &IndexEntry{TableCSIPlugins, index}); err != nil { + if err := txn.Insert("index", &IndexEntry{"csi_plugins", index}); err != nil { return fmt.Errorf("index update failed: %v", err) } @@ -1548,13 +1544,13 @@ func deleteNodeCSIPlugins(txn *txn, node *structs.Node, index uint64) error { // updateOrGCPlugin updates a plugin but will delete it if the plugin is empty func updateOrGCPlugin(index uint64, txn Txn, plug *structs.CSIPlugin) error { if plug.IsEmpty() { - err := txn.Delete(TableCSIPlugins, plug) + err := txn.Delete("csi_plugins", plug) if err != nil { return fmt.Errorf("csi_plugins delete error: %v", err) } } else { plug.ModifyIndex = index - err := txn.Insert(TableCSIPlugins, plug) + err := txn.Insert("csi_plugins", plug) if err != nil { return fmt.Errorf("csi_plugins update error %s: %v", plug.ID, err) } @@ -1653,7 +1649,7 @@ func (s *StateStore) deleteJobFromPlugins(index uint64, txn Txn, job *structs.Jo } if len(plugins) > 0 { - if err = txn.Insert("index", &IndexEntry{TableCSIPlugins, index}); err != nil { + if err = txn.Insert("index", &IndexEntry{"csi_plugins", index}); err != nil { return fmt.Errorf("index update failed: %v", err) } } @@ -2026,12 +2022,6 @@ func (s *StateStore) DeleteJobTxn(index uint64, namespace, jobID string, txn Txn if _, err = txn.DeleteAll("scaling_event", "id", namespace, jobID); err != nil { return fmt.Errorf("deleting job scaling events failed: %v", err) } - - // Delete task group volume claims - if err = s.deleteTaskGroupHostVolumeClaim(index, txn, namespace, jobID); err != nil { - return fmt.Errorf("deleting job volume claims failed: %v", err) - } - if err := txn.Insert("index", &IndexEntry{"scaling_event", index}); err != nil { return fmt.Errorf("index update failed: %v", err) } @@ -2559,7 +2549,7 @@ func (s *StateStore) JobSummaryByPrefix(ws memdb.WatchSet, namespace, id string) // UpsertCSIVolume inserts a volume in the state store. func (s *StateStore) UpsertCSIVolume(index uint64, volumes []*structs.CSIVolume) error { - txn := s.db.WriteTxnMsgT(structs.CSIVolumeRegisterRequestType, index) + txn := s.db.WriteTxn(index) defer txn.Abort() for _, v := range volumes { @@ -2569,7 +2559,7 @@ func (s *StateStore) UpsertCSIVolume(index uint64, volumes []*structs.CSIVolume) return fmt.Errorf("volume %s is in nonexistent namespace %s", v.ID, v.Namespace) } - obj, err := txn.First(TableCSIVolumes, "id", v.Namespace, v.ID) + obj, err := txn.First("csi_volumes", "id", v.Namespace, v.ID) if err != nil { return fmt.Errorf("volume existence check error: %v", err) } @@ -2598,13 +2588,13 @@ func (s *StateStore) UpsertCSIVolume(index uint64, volumes []*structs.CSIVolume) v.WriteAllocs[allocID] = nil } - err = txn.Insert(TableCSIVolumes, v) + err = txn.Insert("csi_volumes", v) if err != nil { return fmt.Errorf("volume insert: %v", err) } } - if err := txn.Insert("index", &IndexEntry{TableCSIVolumes, index}); err != nil { + if err := txn.Insert("index", &IndexEntry{"csi_volumes", index}); err != nil { return fmt.Errorf("index update failed: %v", err) } @@ -2617,7 +2607,7 @@ func (s *StateStore) CSIVolumes(ws memdb.WatchSet) (memdb.ResultIterator, error) txn := s.db.ReadTxn() defer txn.Abort() - iter, err := txn.Get(TableCSIVolumes, "id") + iter, err := txn.Get("csi_volumes", "id") if err != nil { return nil, fmt.Errorf("csi_volumes lookup failed: %v", err) } @@ -2633,7 +2623,7 @@ func (s *StateStore) CSIVolumes(ws memdb.WatchSet) (memdb.ResultIterator, error) func (s *StateStore) CSIVolumeByID(ws memdb.WatchSet, namespace, id string) (*structs.CSIVolume, error) { txn := s.db.ReadTxn() - watchCh, obj, err := txn.FirstWatch(TableCSIVolumes, "id", namespace, id) + watchCh, obj, err := txn.FirstWatch("csi_volumes", "id", namespace, id) if err != nil { return nil, fmt.Errorf("volume lookup failed for %s: %v", id, err) } @@ -2654,7 +2644,7 @@ func (s *StateStore) CSIVolumeByID(ws memdb.WatchSet, namespace, id string) (*st func (s *StateStore) CSIVolumesByPluginID(ws memdb.WatchSet, namespace, prefix, pluginID string) (memdb.ResultIterator, error) { txn := s.db.ReadTxn() - iter, err := txn.Get(TableCSIVolumes, "plugin_id", pluginID) + iter, err := txn.Get("csi_volumes", "plugin_id", pluginID) if err != nil { return nil, fmt.Errorf("volume lookup failed: %v", err) } @@ -2682,7 +2672,7 @@ func (s *StateStore) CSIVolumesByIDPrefix(ws memdb.WatchSet, namespace, volumeID txn := s.db.ReadTxn() - iter, err := txn.Get(TableCSIVolumes, "id_prefix", namespace, volumeID) + iter, err := txn.Get("csi_volumes", "id_prefix", namespace, volumeID) if err != nil { return nil, err } @@ -2696,7 +2686,7 @@ func (s *StateStore) csiVolumeByIDPrefixAllNamespaces(ws memdb.WatchSet, prefix txn := s.db.ReadTxn() // Walk the entire csi_volumes table - iter, err := txn.Get(TableCSIVolumes, "id") + iter, err := txn.Get("csi_volumes", "id") if err != nil { return nil, err @@ -2748,7 +2738,7 @@ func (s *StateStore) CSIVolumesByNodeID(ws memdb.WatchSet, prefix, nodeID string txn := s.db.ReadTxn() for id, namespace := range ids { if strings.HasPrefix(id, prefix) { - watchCh, raw, err := txn.FirstWatch(TableCSIVolumes, "id", namespace, id) + watchCh, raw, err := txn.FirstWatch("csi_volumes", "id", namespace, id) if err != nil { return nil, fmt.Errorf("volume lookup failed: %s %v", id, err) } @@ -2769,7 +2759,7 @@ func (s *StateStore) CSIVolumesByNamespace(ws memdb.WatchSet, namespace, prefix func (s *StateStore) csiVolumesByNamespaceImpl(txn *txn, ws memdb.WatchSet, namespace, prefix string) (memdb.ResultIterator, error) { - iter, err := txn.Get(TableCSIVolumes, "id_prefix", namespace, prefix) + iter, err := txn.Get("csi_volumes", "id_prefix", namespace, prefix) if err != nil { return nil, fmt.Errorf("volume lookup failed: %v", err) } @@ -2781,10 +2771,10 @@ func (s *StateStore) csiVolumesByNamespaceImpl(txn *txn, ws memdb.WatchSet, name // CSIVolumeClaim updates the volume's claim count and allocation list func (s *StateStore) CSIVolumeClaim(index uint64, now int64, namespace, id string, claim *structs.CSIVolumeClaim) error { - txn := s.db.WriteTxnMsgT(structs.CSIVolumeClaimRequestType, index) + txn := s.db.WriteTxn(index) defer txn.Abort() - row, err := txn.First(TableCSIVolumes, "id", namespace, id) + row, err := txn.First("csi_volumes", "id", namespace, id) if err != nil { return fmt.Errorf("volume lookup failed: %s: %v", id, err) } @@ -2842,11 +2832,11 @@ func (s *StateStore) CSIVolumeClaim(index uint64, now int64, namespace, id strin volume.WriteAllocs[allocID] = nil } - if err = txn.Insert(TableCSIVolumes, volume); err != nil { + if err = txn.Insert("csi_volumes", volume); err != nil { return fmt.Errorf("volume update failed: %s: %v", id, err) } - if err = txn.Insert("index", &IndexEntry{TableCSIVolumes, index}); err != nil { + if err = txn.Insert("index", &IndexEntry{"csi_volumes", index}); err != nil { return fmt.Errorf("index update failed: %v", err) } @@ -2855,11 +2845,11 @@ func (s *StateStore) CSIVolumeClaim(index uint64, now int64, namespace, id strin // CSIVolumeDeregister removes the volume from the server func (s *StateStore) CSIVolumeDeregister(index uint64, namespace string, ids []string, force bool) error { - txn := s.db.WriteTxnMsgT(structs.CSIVolumeDeregisterRequestType, index) + txn := s.db.WriteTxn(index) defer txn.Abort() for _, id := range ids { - existing, err := txn.First(TableCSIVolumes, "id", namespace, id) + existing, err := txn.First("csi_volumes", "id", namespace, id) if err != nil { return fmt.Errorf("volume lookup failed: %s: %v", id, err) } @@ -2883,12 +2873,12 @@ func (s *StateStore) CSIVolumeDeregister(index uint64, namespace string, ids []s } } - if err = txn.Delete(TableCSIVolumes, existing); err != nil { + if err = txn.Delete("csi_volumes", existing); err != nil { return fmt.Errorf("volume delete failed: %s: %v", id, err) } } - if err := txn.Insert("index", &IndexEntry{TableCSIVolumes, index}); err != nil { + if err := txn.Insert("index", &IndexEntry{"csi_volumes", index}); err != nil { return fmt.Errorf("index update failed: %v", err) } @@ -3070,7 +3060,7 @@ func (s *StateStore) CSIPlugins(ws memdb.WatchSet) (memdb.ResultIterator, error) txn := s.db.ReadTxn() defer txn.Abort() - iter, err := txn.Get(TableCSIPlugins, "id") + iter, err := txn.Get("csi_plugins", "id") if err != nil { return nil, fmt.Errorf("csi_plugins lookup failed: %v", err) } @@ -3084,7 +3074,7 @@ func (s *StateStore) CSIPlugins(ws memdb.WatchSet) (memdb.ResultIterator, error) func (s *StateStore) CSIPluginsByIDPrefix(ws memdb.WatchSet, pluginID string) (memdb.ResultIterator, error) { txn := s.db.ReadTxn() - iter, err := txn.Get(TableCSIPlugins, "id_prefix", pluginID) + iter, err := txn.Get("csi_plugins", "id_prefix", pluginID) if err != nil { return nil, err } @@ -3108,7 +3098,7 @@ func (s *StateStore) CSIPluginByID(ws memdb.WatchSet, id string) (*structs.CSIPl // CSIPluginByIDTxn returns a named CSIPlugin func (s *StateStore) CSIPluginByIDTxn(txn Txn, ws memdb.WatchSet, id string) (*structs.CSIPlugin, error) { - watchCh, obj, err := txn.FirstWatch(TableCSIPlugins, "id", id) + watchCh, obj, err := txn.FirstWatch("csi_plugins", "id", id) if err != nil { return nil, fmt.Errorf("csi_plugin lookup failed: %s %v", id, err) } @@ -3165,7 +3155,7 @@ func (s *StateStore) UpsertCSIPlugin(index uint64, plug *structs.CSIPlugin) erro txn := s.db.WriteTxn(index) defer txn.Abort() - existing, err := txn.First(TableCSIPlugins, "id", plug.ID) + existing, err := txn.First("csi_plugins", "id", plug.ID) if err != nil { return fmt.Errorf("csi_plugin lookup error: %s %v", plug.ID, err) } @@ -3176,11 +3166,11 @@ func (s *StateStore) UpsertCSIPlugin(index uint64, plug *structs.CSIPlugin) erro plug.CreateTime = existing.(*structs.CSIPlugin).CreateTime } - err = txn.Insert(TableCSIPlugins, plug) + err = txn.Insert("csi_plugins", plug) if err != nil { return fmt.Errorf("csi_plugins insert error: %v", err) } - if err := txn.Insert("index", &IndexEntry{TableCSIPlugins, index}); err != nil { + if err := txn.Insert("index", &IndexEntry{"csi_plugins", index}); err != nil { return fmt.Errorf("index update failed: %v", err) } return txn.Commit() @@ -3240,7 +3230,7 @@ func (s *StateStore) DeleteCSIPlugin(index uint64, id string) error { return structs.ErrCSIPluginInUse } - err = txn.Delete(TableCSIPlugins, plug) + err = txn.Delete("csi_plugins", plug) if err != nil { return fmt.Errorf("csi_plugins delete error: %v", err) } @@ -4117,11 +4107,11 @@ func (s *StateStore) upsertAllocsImpl(index uint64, allocs []*structs.Allocation } // Issue https://github.com/hashicorp/nomad/issues/2583 uncovered - // a race between a forced garbage collection and the scheduler + // the a race between a forced garbage collection and the scheduler // marking an allocation as terminal. The issue is that the // allocation from the scheduler has its job normalized and the FSM - // will only denormalize if the allocation is not terminal. However - // if the allocation is garbage collected, that will result in an + // will only denormalize if the allocation is not terminal. However + // if the allocation is garbage collected, that will result in a // allocation being upserted for the first time without a job // attached. By returning an error here, it will cause the FSM to // error, causing the plan_apply to error and thus causing the @@ -4130,55 +4120,6 @@ func (s *StateStore) upsertAllocsImpl(index uint64, allocs []*structs.Allocation if alloc.Job == nil { return fmt.Errorf("attempting to upsert allocation %q without a job", alloc.ID) } - - // Check if the alloc requires sticky volumes. If yes, find a node - // that has the right volume and update the task group volume - // claims table - for _, tg := range alloc.Job.TaskGroups { - for _, v := range tg.Volumes { - if !v.Sticky { - continue - } - sv := &structs.TaskGroupHostVolumeClaim{ - ID: uuid.Generate(), - Namespace: alloc.Namespace, - JobID: alloc.JobID, - TaskGroupName: tg.Name, - AllocID: alloc.ID, - VolumeName: v.Source, - } - - allocNode, err := s.NodeByID(nil, alloc.NodeID) - if err != nil { - return err - } - - // since there's no existing claim, find a volume and register a claim - for _, v := range allocNode.HostVolumes { - if v.Name != sv.VolumeName { - continue - } - - sv.VolumeID = v.ID - - // has this volume been claimed already? - existingClaim, err := s.GetTaskGroupHostVolumeClaim(nil, sv.Namespace, sv.JobID, sv.TaskGroupName, v.ID) - if err != nil { - return err - } - - // if the volume has already been claimed, we don't have to do anything. The - // feasibility checker in the scheduler will verify alloc placement. - if existingClaim != nil { - continue - } - - if err := s.upsertTaskGroupHostVolumeClaimImpl(index, sv, txn); err != nil { - return err - } - } - } - } } else { alloc.CreateIndex = exist.CreateIndex alloc.ModifyIndex = index @@ -5948,13 +5889,13 @@ func (s *StateStore) updateJobCSIPlugins(index uint64, job, prev *structs.Job, t } for _, plugIn := range plugIns { - err = txn.Insert(TableCSIPlugins, plugIn) + err = txn.Insert("csi_plugins", plugIn) if err != nil { return fmt.Errorf("csi_plugins insert error: %v", err) } } - if err := txn.Insert("index", &IndexEntry{TableCSIPlugins, index}); err != nil { + if err := txn.Insert("index", &IndexEntry{"csi_plugins", index}); err != nil { return fmt.Errorf("index update failed: %v", err) } diff --git a/nomad/state/state_store_host_volumes.go b/nomad/state/state_store_host_volumes.go deleted file mode 100644 index 06a5e64c59c..00000000000 --- a/nomad/state/state_store_host_volumes.go +++ /dev/null @@ -1,292 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package state - -import ( - "fmt" - "strings" - - memdb "github.com/hashicorp/go-memdb" - "github.com/hashicorp/nomad/nomad/structs" -) - -// HostVolumeByID retrieve a specific host volume -func (s *StateStore) HostVolumeByID(ws memdb.WatchSet, ns, id string, withAllocs bool) (*structs.HostVolume, error) { - txn := s.db.ReadTxn() - watchCh, obj, err := txn.FirstWatch(TableHostVolumes, indexID, ns, id) - if err != nil { - return nil, err - } - ws.Add(watchCh) - - if obj == nil { - return nil, nil - } - vol := obj.(*structs.HostVolume) - if !withAllocs { - return vol, nil - } - - vol = vol.Copy() - vol.Allocations = []*structs.AllocListStub{} - - // we can't use AllocsByNodeTerminal because we only want to filter out - // allocs that are client-terminal, not server-terminal - allocs, err := s.AllocsByNode(nil, vol.NodeID) - if err != nil { - return nil, fmt.Errorf("could not query allocs to check for host volume claims: %w", err) - } - for _, alloc := range allocs { - if alloc.ClientTerminalStatus() { - continue - } - for _, volClaim := range alloc.Job.LookupTaskGroup(alloc.TaskGroup).Volumes { - if volClaim.Type == structs.VolumeTypeHost && volClaim.Source == vol.Name { - vol.Allocations = append(vol.Allocations, alloc.Stub(nil)) - } - } - } - - return vol, nil -} - -// UpsertHostVolume upserts a host volume -func (s *StateStore) UpsertHostVolume(index uint64, vol *structs.HostVolume) error { - txn := s.db.WriteTxnMsgT(structs.HostVolumeRegisterRequestType, index) - defer txn.Abort() - - if exists, err := s.namespaceExists(txn, vol.Namespace); err != nil { - return err - } else if !exists { - return fmt.Errorf("host volume %s is in nonexistent namespace %s", vol.ID, vol.Namespace) - } - - obj, err := txn.First(TableHostVolumes, indexID, vol.Namespace, vol.ID) - if err != nil { - return err - } - var old *structs.HostVolume - if obj != nil { - old = obj.(*structs.HostVolume) - vol.CreateIndex = old.CreateIndex - vol.CreateTime = old.CreateTime - } else { - vol.CreateIndex = index - } - - err = s.enforceHostVolumeQuotaTxn(txn, index, vol, old, true) - if err != nil { - return err - } - - // If the fingerprint is written from the node before the create RPC handler - // completes, we'll never update from the initial pending, so reconcile that - // here - node, err := s.NodeByID(nil, vol.NodeID) - if err != nil { - return err - } - if node == nil { - return fmt.Errorf("host volume %s has nonexistent node ID %s", vol.ID, vol.NodeID) - } - - // prevent a race between node fingerprint and create RPC that could - // switch a ready volume back to pending - if _, ok := node.HostVolumes[vol.Name]; ok { - vol.State = structs.HostVolumeStateReady - } - - // Register RPCs for new volumes may not have the node pool set - vol.NodePool = node.NodePool - - // Allocations are denormalized on read, so we don't want these to be - // written to the state store. - vol.Allocations = nil - vol.ModifyIndex = index - - err = txn.Insert(TableHostVolumes, vol) - if err != nil { - return fmt.Errorf("host volume insert: %w", err) - } - - if err := txn.Insert(tableIndex, &IndexEntry{TableHostVolumes, index}); err != nil { - return fmt.Errorf("index update failed: %w", err) - } - - return txn.Commit() -} - -// DeleteHostVolume deletes a host volume -func (s *StateStore) DeleteHostVolume(index uint64, ns string, id string) error { - txn := s.db.WriteTxnMsgT(structs.HostVolumeDeleteRequestType, index) - defer txn.Abort() - - obj, err := txn.First(TableHostVolumes, indexID, ns, id) - if err != nil { - return err - } - if obj != nil { - vol := obj.(*structs.HostVolume) - - allocs, err := s.AllocsByNodeTerminal(nil, vol.NodeID, false) - if err != nil { - return fmt.Errorf("could not query allocs to check for host volume claims: %w", err) - } - for _, alloc := range allocs { - for _, volClaim := range alloc.Job.LookupTaskGroup(alloc.TaskGroup).Volumes { - if volClaim.Type == structs.VolumeTypeHost && volClaim.Name == vol.Name { - return fmt.Errorf("could not delete volume %s in use by alloc %s", - vol.ID, alloc.ID) - } - } - } - - err = s.subtractVolumeFromQuotaUsageTxn(txn, index, vol) - if err != nil { - return err - } - - err = txn.Delete(TableHostVolumes, vol) - if err != nil { - return fmt.Errorf("host volume delete: %w", err) - } - } - - if err := txn.Insert(tableIndex, &IndexEntry{TableHostVolumes, index}); err != nil { - return fmt.Errorf("index update failed: %w", err) - } - - return txn.Commit() - -} - -// HostVolumes queries all the host volumes and is mostly used for -// snapshot/restore -func (s *StateStore) HostVolumes(ws memdb.WatchSet, sort SortOption) (memdb.ResultIterator, error) { - return s.hostVolumesIter(ws, indexID, sort) -} - -// HostVolumesByIDPrefix retrieves all host volumes by ID prefix. Because the ID -// index is namespaced, we need to handle the wildcard namespace here as well. -func (s *StateStore) HostVolumesByIDPrefix(ws memdb.WatchSet, ns, prefix string, sort SortOption) (memdb.ResultIterator, error) { - - if ns != structs.AllNamespacesSentinel { - return s.hostVolumesIter(ws, "id_prefix", sort, ns, prefix) - } - - // for wildcard namespace, wrap the iterator in a filter function that - // filters all volumes by prefix - iter, err := s.hostVolumesIter(ws, indexID, sort) - if err != nil { - return nil, err - } - wrappedIter := memdb.NewFilterIterator(iter, func(raw any) bool { - vol, ok := raw.(*structs.HostVolume) - if !ok { - return true - } - return !strings.HasPrefix(vol.ID, prefix) - }) - return wrappedIter, nil -} - -// HostVolumesByName retrieves all host volumes of the same name -func (s *StateStore) HostVolumesByName(ws memdb.WatchSet, ns, name string, sort SortOption) (memdb.ResultIterator, error) { - return s.hostVolumesIter(ws, "name_prefix", sort, ns, name) -} - -// HostVolumesByNodeID retrieves all host volumes on the same node -func (s *StateStore) HostVolumesByNodeID(ws memdb.WatchSet, nodeID string, sort SortOption) (memdb.ResultIterator, error) { - return s.hostVolumesIter(ws, indexNodeID, sort, nodeID) -} - -// HostVolumesByNodePool retrieves all host volumes in the same node pool -func (s *StateStore) HostVolumesByNodePool(ws memdb.WatchSet, nodePool string, sort SortOption) (memdb.ResultIterator, error) { - return s.hostVolumesIter(ws, indexNodePool, sort, nodePool) -} - -func (s *StateStore) hostVolumesIter(ws memdb.WatchSet, index string, sort SortOption, args ...any) (memdb.ResultIterator, error) { - txn := s.db.ReadTxn() - - var iter memdb.ResultIterator - var err error - - switch sort { - case SortReverse: - iter, err = txn.GetReverse(TableHostVolumes, index, args...) - default: - iter, err = txn.Get(TableHostVolumes, index, args...) - } - if err != nil { - return nil, err - } - - ws.Add(iter.WatchCh()) - return iter, nil -} - -// upsertHostVolumeForNode sets newly fingerprinted host volumes to ready state -func upsertHostVolumeForNode(txn *txn, node *structs.Node, index uint64) error { - if len(node.HostVolumes) == 0 { - return nil - } - iter, err := txn.Get(TableHostVolumes, indexNodeID, node.ID) - if err != nil { - return err - } - - var dirty bool // signals we need to update table index - - for { - raw := iter.Next() - if raw == nil { - break - } - vol := raw.(*structs.HostVolume) - volState := vol.State - _, ok := node.HostVolumes[vol.Name] - - switch { - case ok && node.Status == structs.NodeStatusReady && - vol.State != structs.HostVolumeStateReady: - // the fingerprint has been updated on a healthy client - volState = structs.HostVolumeStateReady - - case !ok && vol.State == structs.HostVolumeStateReady: - // the volume was previously fingerprinted but is no longer showing - // up in the fingerprint; this will usually be because of a failed - // restore on the client - volState = structs.HostVolumeStateUnavailable - - case ok && node.Status == structs.NodeStatusDown: - // volumes on down nodes will never pass feasibility checks - volState = structs.HostVolumeStateUnavailable - - case ok && vol.NodePool != node.NodePool: - // the client's node pool has been changed - - default: - // nothing has changed, skip updating this volume - continue - } - - vol = vol.Copy() - vol.State = volState - vol.NodePool = node.NodePool - vol.ModifyIndex = index - err = txn.Insert(TableHostVolumes, vol) - if err != nil { - return fmt.Errorf("host volume insert: %w", err) - } - dirty = true - } - - if dirty { - if err := txn.Insert("index", &IndexEntry{TableHostVolumes, index}); err != nil { - return fmt.Errorf("index update failed: %v", err) - } - } - - return nil -} diff --git a/nomad/state/state_store_host_volumes_ce.go b/nomad/state/state_store_host_volumes_ce.go deleted file mode 100644 index 8c25d2ad47b..00000000000 --- a/nomad/state/state_store_host_volumes_ce.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -//go:build !ent - -package state - -import "github.com/hashicorp/nomad/nomad/structs" - -func (s *StateStore) EnforceHostVolumeQuota(_ *structs.HostVolume, _ *structs.HostVolume) error { - return nil -} - -func (s *StateStore) enforceHostVolumeQuotaTxn(_ Txn, _ uint64, _ *structs.HostVolume, _ *structs.HostVolume, _ bool) error { - return nil -} - -func (s *StateStore) subtractVolumeFromQuotaUsageTxn(_ Txn, _ uint64, _ *structs.HostVolume) error { - return nil -} diff --git a/nomad/state/state_store_host_volumes_test.go b/nomad/state/state_store_host_volumes_test.go deleted file mode 100644 index b390eaa600f..00000000000 --- a/nomad/state/state_store_host_volumes_test.go +++ /dev/null @@ -1,355 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package state - -import ( - "fmt" - "testing" - - memdb "github.com/hashicorp/go-memdb" - "github.com/hashicorp/nomad/ci" - "github.com/hashicorp/nomad/helper/uuid" - "github.com/hashicorp/nomad/nomad/mock" - "github.com/hashicorp/nomad/nomad/structs" - "github.com/shoenig/test/must" -) - -func TestStateStore_HostVolumes_CRUD(t *testing.T) { - ci.Parallel(t) - store := testStateStore(t) - index, err := store.LatestIndex() - must.NoError(t, err) - - nodes := []*structs.Node{ - mock.Node(), - mock.Node(), - mock.Node(), - } - nodes[2].NodePool = "prod" - index++ - must.NoError(t, store.UpsertNode(structs.MsgTypeTestSetup, - index, nodes[0], NodeUpsertWithNodePool)) - must.NoError(t, store.UpsertNode(structs.MsgTypeTestSetup, - index, nodes[1], NodeUpsertWithNodePool)) - must.NoError(t, store.UpsertNode(structs.MsgTypeTestSetup, - index, nodes[2], NodeUpsertWithNodePool)) - - ns := mock.Namespace() - must.NoError(t, store.UpsertNamespaces(index, []*structs.Namespace{ns})) - - vols := []*structs.HostVolume{ - mock.HostVolume(), - mock.HostVolume(), - mock.HostVolume(), - mock.HostVolume(), - } - vols[0].NodeID = nodes[0].ID - vols[1].NodeID = nodes[1].ID - vols[1].Name = "another-example" - vols[2].NodeID = nodes[2].ID - vols[2].NodePool = nodes[2].NodePool - vols[3].Namespace = ns.Name - vols[3].NodeID = nodes[2].ID - vols[3].NodePool = nodes[2].NodePool - - index++ - must.NoError(t, store.UpsertHostVolume(index, vols[0])) - must.NoError(t, store.UpsertHostVolume(index, vols[1])) - must.NoError(t, store.UpsertHostVolume(index, vols[2])) - must.NoError(t, store.UpsertHostVolume(index, vols[3])) - - vol, err := store.HostVolumeByID(nil, vols[0].Namespace, vols[0].ID, true) - must.NoError(t, err) - must.NotNil(t, vol) - must.Eq(t, vols[0].ID, vol.ID) - must.NotNil(t, vol.Allocations) - must.Len(t, 0, vol.Allocations) - - vol, err = store.HostVolumeByID(nil, vols[0].Namespace, vols[0].ID, false) - must.NoError(t, err) - must.NotNil(t, vol) - must.Nil(t, vol.Allocations) - - consumeIter := func(iter memdb.ResultIterator) map[string]*structs.HostVolume { - got := map[string]*structs.HostVolume{} - for raw := iter.Next(); raw != nil; raw = iter.Next() { - vol := raw.(*structs.HostVolume) - got[vol.ID] = vol - } - return got - } - - iter, err := store.HostVolumesByName(nil, structs.DefaultNamespace, "example", SortDefault) - must.NoError(t, err) - got := consumeIter(iter) - must.NotNil(t, got[vols[0].ID], must.Sprint("expected vol0")) - must.NotNil(t, got[vols[2].ID], must.Sprint("expected vol2")) - must.MapLen(t, 2, got, must.Sprint(`expected 2 volumes named "example" in default namespace`)) - - iter, err = store.HostVolumesByNodePool(nil, nodes[2].NodePool, SortDefault) - must.NoError(t, err) - got = consumeIter(iter) - must.NotNil(t, got[vols[2].ID], must.Sprint("expected vol2")) - must.NotNil(t, got[vols[3].ID], must.Sprint("expected vol3")) - must.MapLen(t, 2, got, must.Sprint(`expected 2 volumes in prod node pool`)) - - iter, err = store.HostVolumesByNodeID(nil, nodes[2].ID, SortDefault) - must.NoError(t, err) - got = consumeIter(iter) - must.NotNil(t, got[vols[2].ID], must.Sprint("expected vol2")) - must.NotNil(t, got[vols[3].ID], must.Sprint("expected vol3")) - must.MapLen(t, 2, got, must.Sprint(`expected 2 volumes on node 2`)) - - // simulate a node registering one of the volumes - nodes[2] = nodes[2].Copy() - nodes[2].HostVolumes = map[string]*structs.ClientHostVolumeConfig{"example": { - Name: vols[2].Name, - Path: vols[2].HostPath, - }} - index++ - must.NoError(t, store.UpsertNode(structs.MsgTypeTestSetup, index, nodes[2])) - - // update all the volumes, which should update the state of vol2 as well - index++ - for i, vol := range vols { - vol = vol.Copy() - vol.RequestedCapacityMaxBytes = 300000 - vols[i] = vol - must.NoError(t, store.UpsertHostVolume(index, vol)) - } - - iter, err = store.HostVolumesByName(nil, structs.DefaultNamespace, "example", SortDefault) - must.NoError(t, err) - got = consumeIter(iter) - must.MapLen(t, 2, got, must.Sprint(`expected 2 volumes named "example" in default namespace`)) - - vol0 := got[vols[0].ID] - must.NotNil(t, vol0) - must.Eq(t, index, vol0.ModifyIndex) - vol2 := got[vols[2].ID] - must.NotNil(t, vol2) - must.Eq(t, index, vol2.ModifyIndex) - must.Eq(t, structs.HostVolumeStateReady, vol2.State, must.Sprint( - "expected volume state to be updated because its been fingerprinted by a node")) - - alloc := mock.AllocForNode(nodes[2]) - alloc.Job.TaskGroups[0].Volumes = map[string]*structs.VolumeRequest{"example": { - Name: "example", - Type: structs.VolumeTypeHost, - Source: vols[2].Name, - }} - index++ - must.NoError(t, store.UpsertAllocs(structs.MsgTypeTestSetup, - index, []*structs.Allocation{alloc})) - - index++ - err = store.DeleteHostVolume(index, vol2.Namespace, vols[2].ID) - must.EqError(t, err, fmt.Sprintf( - "could not delete volume %s in use by alloc %s", vols[2].ID, alloc.ID)) - - err = store.DeleteHostVolume(index, vol2.Namespace, vols[1].ID) - must.NoError(t, err) - vol, err = store.HostVolumeByID(nil, vols[1].Namespace, vols[1].ID, true) - must.NoError(t, err) - must.Nil(t, vol) - - vol, err = store.HostVolumeByID(nil, vols[2].Namespace, vols[2].ID, true) - must.NoError(t, err) - must.NotNil(t, vol) - must.Len(t, 1, vol.Allocations) - - iter, err = store.HostVolumes(nil, SortReverse) - must.NoError(t, err) - got = consumeIter(iter) - must.MapLen(t, 3, got, must.Sprint(`expected 3 volumes remain`)) - - prefix := vol.ID[:30] // sufficiently long prefix to avoid flakes - iter, err = store.HostVolumesByIDPrefix(nil, "*", prefix, SortDefault) - must.NoError(t, err) - got = consumeIter(iter) - must.MapLen(t, 1, got, must.Sprint(`expected only one volume to match prefix`)) - - iter, err = store.HostVolumesByIDPrefix(nil, vol.Namespace, prefix, SortDefault) - must.NoError(t, err) - got = consumeIter(iter) - must.MapLen(t, 1, got, must.Sprint(`expected only one volume to match prefix`)) - - alloc = alloc.Copy() - alloc.ClientStatus = structs.AllocClientStatusComplete - index++ - must.NoError(t, store.UpdateAllocsFromClient(structs.MsgTypeTestSetup, - index, []*structs.Allocation{alloc})) - for _, v := range vols { - index++ - must.NoError(t, store.DeleteHostVolume(index, v.Namespace, v.ID)) - } - iter, err = store.HostVolumes(nil, SortDefault) - got = consumeIter(iter) - must.MapLen(t, 0, got, must.Sprint(`expected no volumes to remain`)) -} - -func TestStateStore_UpdateHostVolumesFromFingerprint(t *testing.T) { - ci.Parallel(t) - store := testStateStore(t) - index, err := store.LatestIndex() - must.NoError(t, err) - - node := mock.Node() - node.HostVolumes = map[string]*structs.ClientHostVolumeConfig{ - "static-vol": {Name: "static-vol", Path: "/srv/static"}, - "dhv-zero": {Name: "dhv-zero", Path: "/var/nomad/alloc_mounts" + uuid.Generate()}, - } - index++ - must.NoError(t, store.UpsertNode(structs.MsgTypeTestSetup, - index, node, NodeUpsertWithNodePool)) - - otherNode := mock.Node() - - must.NoError(t, store.UpsertNode(structs.MsgTypeTestSetup, - index, otherNode, NodeUpsertWithNodePool)) - - ns := structs.DefaultNamespace - - vols := []*structs.HostVolume{ - mock.HostVolume(), - mock.HostVolume(), - mock.HostVolume(), - mock.HostVolume(), - } - - // a volume that's been fingerprinted before we can write it to state - vols[0].Name = "dhv-zero" - vols[0].NodeID = node.ID - - // a volume that will match the new fingerprint - vols[1].Name = "dhv-one" - vols[1].NodeID = node.ID - - // a volume that matches the new fingerprint but on the wrong node - vols[2].Name = "dhv-one" - vols[2].NodeID = otherNode.ID - - // a volume that won't be fingerprinted - vols[3].Name = "dhv-two" - vols[3].NodeID = node.ID - - index++ - oldIndex := index - must.NoError(t, store.UpsertHostVolume(index, vols[0])) - must.NoError(t, store.UpsertHostVolume(index, vols[1])) - must.NoError(t, store.UpsertHostVolume(index, vols[2])) - must.NoError(t, store.UpsertHostVolume(index, vols[3])) - - vol0, err := store.HostVolumeByID(nil, ns, vols[0].ID, false) - must.NoError(t, err) - must.Eq(t, structs.HostVolumeStateReady, vol0.State, - must.Sprint("previously-fingerprinted volume should be in ready state")) - - // update the fingerprint - - node = node.Copy() - node.HostVolumes["dhv-one"] = &structs.ClientHostVolumeConfig{ - Name: "dhv-one", - Path: "/var/nomad/alloc_mounts" + uuid.Generate(), - } - - index++ - must.NoError(t, store.UpsertNode(structs.MsgTypeTestSetup, index, node)) - - vol0, err = store.HostVolumeByID(nil, ns, vols[0].ID, false) - must.NoError(t, err) - must.Eq(t, oldIndex, vol0.ModifyIndex, must.Sprint("expected no further update")) - must.Eq(t, structs.HostVolumeStateReady, vol0.State) - - vol1, err := store.HostVolumeByID(nil, ns, vols[1].ID, false) - must.NoError(t, err) - must.Eq(t, index, vol1.ModifyIndex, - must.Sprint("fingerprint should update pending volume")) - must.Eq(t, structs.HostVolumeStateReady, vol1.State) - - vol2, err := store.HostVolumeByID(nil, ns, vols[2].ID, false) - must.NoError(t, err) - must.Eq(t, oldIndex, vol2.ModifyIndex, - must.Sprint("volume on other node should not change")) - must.Eq(t, structs.HostVolumeStatePending, vol2.State) - - vol3, err := store.HostVolumeByID(nil, ns, vols[3].ID, false) - must.NoError(t, err) - must.Eq(t, oldIndex, vol3.ModifyIndex, - must.Sprint("volume not fingerprinted should not change")) - must.Eq(t, structs.HostVolumeStatePending, vol3.State) - - // update the node pool and fingerprint - otherNode = otherNode.Copy() - otherNode.NodePool = "new-node-pool" - otherNode.HostVolumes = map[string]*structs.ClientHostVolumeConfig{ - "dhv-one": {Name: "dhv-one", Path: "/var/nomad/alloc_mounts" + uuid.Generate()}, - } - index++ - must.NoError(t, store.UpsertNode(structs.MsgTypeTestSetup, index, otherNode)) - - vol2, err = store.HostVolumeByID(nil, ns, vols[2].ID, false) - must.NoError(t, err) - must.Eq(t, index, vol2.ModifyIndex, - must.Sprint("node pool change should update pending volume")) - must.Eq(t, "new-node-pool", vol2.NodePool) - must.Eq(t, structs.HostVolumeStateReady, vol2.State) - - // node restarts and fails to restore - node = node.Copy() - node.HostVolumes = map[string]*structs.ClientHostVolumeConfig{ - "static-vol": {Name: "static-vol", Path: "/srv/static"}, - } - index++ - must.NoError(t, store.UpsertNode(structs.MsgTypeTestSetup, index, node)) - - vol0, err = store.HostVolumeByID(nil, ns, vols[0].ID, false) - must.NoError(t, err) - must.Eq(t, index, vol0.ModifyIndex, - must.Sprint("failed restore should update ready volume")) - must.Eq(t, structs.HostVolumeStateUnavailable, vol0.State) - - vol1, err = store.HostVolumeByID(nil, ns, vols[1].ID, false) - must.NoError(t, err) - must.Eq(t, index, vol1.ModifyIndex, - must.Sprint("failed restore should update ready volume")) - must.Eq(t, structs.HostVolumeStateUnavailable, vol1.State) - - // make sure we can go from unavailable to available - - node.HostVolumes = map[string]*structs.ClientHostVolumeConfig{ - "static-vol": {Name: "static-vol", Path: "/srv/static"}, - "dhv-zero": {Name: "dhv-zero", Path: "/var/nomad/alloc_mounts" + uuid.Generate()}, - } - index++ - must.NoError(t, store.UpsertNode(structs.MsgTypeTestSetup, index, node)) - - vol0, err = store.HostVolumeByID(nil, ns, vols[0].ID, false) - must.NoError(t, err) - must.Eq(t, index, vol0.ModifyIndex, - must.Sprint("recovered node should update unavailable volume")) - must.Eq(t, structs.HostVolumeStateReady, vol0.State) - - // down a node - node = node.Copy() - node.Status = structs.NodeStatusDown - index++ - must.NoError(t, store.UpsertNode(structs.MsgTypeTestSetup, index, node)) - vol0, err = store.HostVolumeByID(nil, ns, vols[0].ID, false) - must.NoError(t, err) - must.Eq(t, index, vol0.ModifyIndex, - must.Sprint("downed node should mark volume unavailable")) - must.Eq(t, structs.HostVolumeStateUnavailable, vol0.State) - - // bring the node back up - node = node.Copy() - node.Status = structs.NodeStatusReady - index++ - must.NoError(t, store.UpsertNode(structs.MsgTypeTestSetup, index, node)) - vol0, err = store.HostVolumeByID(nil, ns, vols[0].ID, false) - must.NoError(t, err) - must.Eq(t, index, vol0.ModifyIndex, - must.Sprint("ready node should update unavailable volume")) - must.Eq(t, structs.HostVolumeStateReady, vol0.State) -} diff --git a/nomad/state/state_store_restore.go b/nomad/state/state_store_restore.go index 41a571b71d9..2072ca727d7 100644 --- a/nomad/state/state_store_restore.go +++ b/nomad/state/state_store_restore.go @@ -181,7 +181,7 @@ func (r *StateRestore) ScalingPolicyRestore(scalingPolicy *structs.ScalingPolicy // CSIPluginRestore is used to restore a CSI plugin func (r *StateRestore) CSIPluginRestore(plugin *structs.CSIPlugin) error { - if err := r.txn.Insert(TableCSIPlugins, plugin); err != nil { + if err := r.txn.Insert("csi_plugins", plugin); err != nil { return fmt.Errorf("csi plugin insert failed: %v", err) } return nil @@ -189,7 +189,7 @@ func (r *StateRestore) CSIPluginRestore(plugin *structs.CSIPlugin) error { // CSIVolumeRestore is used to restore a CSI volume func (r *StateRestore) CSIVolumeRestore(volume *structs.CSIVolume) error { - if err := r.txn.Insert(TableCSIVolumes, volume); err != nil { + if err := r.txn.Insert("csi_volumes", volume); err != nil { return fmt.Errorf("csi volume insert failed: %v", err) } return nil @@ -291,11 +291,3 @@ func (r *StateRestore) JobSubmissionRestore(jobSubmission *structs.JobSubmission } return nil } - -// HostVolumeRestore restores a single host volume into the host_volumes table -func (r *StateRestore) HostVolumeRestore(vol *structs.HostVolume) error { - if err := r.txn.Insert(TableHostVolumes, vol); err != nil { - return fmt.Errorf("host volume insert failed: %w", err) - } - return nil -} diff --git a/nomad/state/state_store_task_group_volume_claims.go b/nomad/state/state_store_task_group_volume_claims.go deleted file mode 100644 index 6694a22834b..00000000000 --- a/nomad/state/state_store_task_group_volume_claims.go +++ /dev/null @@ -1,138 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package state - -import ( - "fmt" - - "github.com/hashicorp/go-memdb" - "github.com/hashicorp/nomad/nomad/structs" -) - -// UpsertTaskGroupHostVolumeClaim is used to upsert claims into the state store. -// This method is only used in unit tests. -func (s *StateStore) UpsertTaskGroupHostVolumeClaim(msgType structs.MessageType, index uint64, claim *structs.TaskGroupHostVolumeClaim) error { - // Grab a write transaction. - txn := s.db.WriteTxnMsgT(msgType, index) - defer txn.Abort() - if err := s.upsertTaskGroupHostVolumeClaimImpl(index, claim, txn); err != nil { - return err - } - - return txn.Commit() -} - -// upsertTaskGroupHostVolumeClaimImpl is used to insert a task group volume claim into -// the state store. -func (s *StateStore) upsertTaskGroupHostVolumeClaimImpl( - index uint64, claim *structs.TaskGroupHostVolumeClaim, txn *txn) error { - - existingRaw, err := txn.First(TableTaskGroupHostVolumeClaim, indexID, claim.Namespace, claim.JobID, claim.TaskGroupName, claim.VolumeID) - if err != nil { - return fmt.Errorf("Task group volume association lookup failed: %v", err) - } - - var existing *structs.TaskGroupHostVolumeClaim - if existingRaw != nil { - existing = existingRaw.(*structs.TaskGroupHostVolumeClaim) - } - - if existing != nil { - // do allocation ID and volume ID match? - if existing.ClaimedByAlloc(claim) { - return nil - } - - claim.CreateIndex = existing.CreateIndex - claim.ModifyIndex = index - } else { - claim.CreateIndex = index - claim.ModifyIndex = index - } - - // Insert the claim into the table. - if err := txn.Insert(TableTaskGroupHostVolumeClaim, claim); err != nil { - return fmt.Errorf("Task group volume claim insert failed: %v", err) - } - - // Perform the index table update to mark the new insert. - if err := txn.Insert(tableIndex, &IndexEntry{TableTaskGroupHostVolumeClaim, index}); err != nil { - return fmt.Errorf("index update failed: %v", err) - } - - return nil -} - -// GetTaskGroupHostVolumeClaim returns a volume claim that matches the namespace, -// job id and task group name (there can be only one) -func (s *StateStore) GetTaskGroupHostVolumeClaim(ws memdb.WatchSet, namespace, jobID, taskGroupName, volumeID string) (*structs.TaskGroupHostVolumeClaim, error) { - txn := s.db.ReadTxn() - - watchCh, existing, err := txn.FirstWatch(TableTaskGroupHostVolumeClaim, indexID, namespace, jobID, taskGroupName, volumeID) - if err != nil { - return nil, fmt.Errorf("Task group volume claim lookup failed: %v", err) - } - ws.Add(watchCh) - - if existing != nil { - return existing.(*structs.TaskGroupHostVolumeClaim), nil - } - - return nil, nil -} - -// GetTaskGroupHostVolumeClaims returns all volume claims -func (s *StateStore) GetTaskGroupHostVolumeClaims(ws memdb.WatchSet) (memdb.ResultIterator, error) { - txn := s.db.ReadTxn() - - iter, err := txn.Get(TableTaskGroupHostVolumeClaim, indexID) - if err != nil { - return nil, fmt.Errorf("Task group volume claim lookup failed: %v", err) - } - ws.Add(iter.WatchCh()) - - return iter, nil -} - -// GetTaskGroupHostVolumeClaimsForTaskGroup returns all volume claims for a given -// task group -func (s *StateStore) GetTaskGroupHostVolumeClaimsForTaskGroup(ws memdb.WatchSet, ns, jobID, tg string) (memdb.ResultIterator, error) { - txn := s.db.ReadTxn() - - iter, err := txn.Get(TableTaskGroupHostVolumeClaim, indexID) - if err != nil { - return nil, fmt.Errorf("Task group volume claim lookup failed: %v", err) - } - ws.Add(iter.WatchCh()) - - // Filter out by ns, jobID and tg - filter := memdb.NewFilterIterator(iter, func(raw interface{}) bool { - claim, ok := raw.(*structs.TaskGroupHostVolumeClaim) - if !ok { - return true - } - return claim.Namespace != ns || claim.JobID != jobID || claim.TaskGroupName != tg - }) - - return filter, nil -} - -// deleteTaskGroupHostVolumeClaim deletes all claims for a given namespace and job ID -func (s *StateStore) deleteTaskGroupHostVolumeClaim(index uint64, txn *txn, namespace, jobID string) error { - iter, err := txn.Get(TableTaskGroupHostVolumeClaim, indexID) - if err != nil { - return fmt.Errorf("Task group volume claim lookup failed: %v", err) - } - - for raw := iter.Next(); raw != nil; raw = iter.Next() { - claim := raw.(*structs.TaskGroupHostVolumeClaim) - if claim.JobID == jobID && claim.Namespace == namespace { - if err := txn.Delete(TableTaskGroupHostVolumeClaim, claim); err != nil { - return fmt.Errorf("Task group volume claim deletion failed: %v", err) - } - } - } - - return nil -} diff --git a/nomad/state/state_store_task_group_volume_claims_test.go b/nomad/state/state_store_task_group_volume_claims_test.go deleted file mode 100644 index a0fe75d300a..00000000000 --- a/nomad/state/state_store_task_group_volume_claims_test.go +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package state - -import ( - "testing" - - "github.com/hashicorp/go-memdb" - "github.com/hashicorp/nomad/ci" - "github.com/hashicorp/nomad/helper/uuid" - "github.com/hashicorp/nomad/nomad/mock" - "github.com/hashicorp/nomad/nomad/structs" - "github.com/shoenig/test/must" -) - -func TestStateStore_UpsertTaskGroupHostVolumeClaim(t *testing.T) { - ci.Parallel(t) - testState := testStateStore(t) - - // Mock some objects - stickyJob := mock.Job() - hostVolCapsReadWrite := []*structs.HostVolumeCapability{ - { - AttachmentMode: structs.HostVolumeAttachmentModeFilesystem, - AccessMode: structs.HostVolumeAccessModeSingleNodeReader, - }, - { - AttachmentMode: structs.HostVolumeAttachmentModeFilesystem, - AccessMode: structs.HostVolumeAccessModeSingleNodeWriter, - }, - } - node := mock.Node() - dhv := &structs.HostVolume{ - Namespace: structs.DefaultNamespace, - ID: uuid.Generate(), - Name: "foo", - NodeID: node.ID, - RequestedCapabilities: hostVolCapsReadWrite, - State: structs.HostVolumeStateReady, - } - alloc := mock.Alloc() - alloc.NodeID = node.ID - claim := mock.TaskGroupHostVolumeClaim(stickyJob, alloc, dhv) - - must.NoError(t, testState.UpsertTaskGroupHostVolumeClaim(structs.MsgTypeTestSetup, 10, claim)) - - // Check that the index for the table was modified as expected. - initialIndex, err := testState.Index(TableTaskGroupHostVolumeClaim) - must.NoError(t, err) - must.Eq(t, 10, initialIndex) - - // List all the claims in the table and check the count - ws := memdb.NewWatchSet() - iter, err := testState.GetTaskGroupHostVolumeClaims(ws) - must.NoError(t, err) - - var count int - for raw := iter.Next(); raw != nil; raw = iter.Next() { - count++ - - // Ensure the create and modify indexes are populated correctly. - claim := raw.(*structs.TaskGroupHostVolumeClaim) - must.Eq(t, 10, claim.CreateIndex) - must.Eq(t, 10, claim.ModifyIndex) - } - must.Eq(t, 1, count) - - // Try writing another claim for the same alloc - anotherClaim := mock.TaskGroupHostVolumeClaim(stickyJob, alloc, dhv) - must.NoError(t, testState.UpsertTaskGroupHostVolumeClaim(structs.MsgTypeTestSetup, 10, anotherClaim)) - - // List all the claims in the table and check the count - iter, err = testState.GetTaskGroupHostVolumeClaims(ws) - must.NoError(t, err) - - count = 0 - for raw := iter.Next(); raw != nil; raw = iter.Next() { - count++ - - // Ensure the create and modify indexes are populated correctly. - claim := raw.(*structs.TaskGroupHostVolumeClaim) - must.Eq(t, 10, claim.CreateIndex) - must.Eq(t, 10, claim.ModifyIndex) - } - must.Eq(t, 1, count) -} diff --git a/nomad/state/state_store_test.go b/nomad/state/state_store_test.go index 8cfad206bf6..63498303d29 100644 --- a/nomad/state/state_store_test.go +++ b/nomad/state/state_store_test.go @@ -6437,108 +6437,6 @@ func TestStateStore_UpsertAlloc_No_Job(t *testing.T) { } } -func TestStateStore_UpsertAlloc_StickyVolumes(t *testing.T) { - ci.Parallel(t) - - store := testStateStore(t) - - nodes := []*structs.Node{ - mock.Node(), - mock.Node(), - } - - hostVolCapsReadWrite := []*structs.HostVolumeCapability{ - { - AttachmentMode: structs.HostVolumeAttachmentModeFilesystem, - AccessMode: structs.HostVolumeAccessModeSingleNodeReader, - }, - { - AttachmentMode: structs.HostVolumeAttachmentModeFilesystem, - AccessMode: structs.HostVolumeAccessModeSingleNodeWriter, - }, - } - dhv := &structs.HostVolume{ - Namespace: structs.DefaultNamespace, - ID: uuid.Generate(), - Name: "foo", - NodeID: nodes[1].ID, - RequestedCapabilities: hostVolCapsReadWrite, - State: structs.HostVolumeStateReady, - } - - nodes[0].HostVolumes = map[string]*structs.ClientHostVolumeConfig{} - nodes[1].HostVolumes = map[string]*structs.ClientHostVolumeConfig{"foo": {ID: dhv.ID, Name: dhv.Name}} - - stickyRequest := map[string]*structs.VolumeRequest{ - "foo": { - Type: "host", - Source: "foo", - Sticky: true, - AccessMode: structs.CSIVolumeAccessModeSingleNodeWriter, - AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem, - }, - } - - for _, node := range nodes { - must.NoError(t, store.UpsertNode(structs.MsgTypeTestSetup, 1000, node)) - } - - stickyJob := mock.Job() - stickyJob.TaskGroups[0].Volumes = stickyRequest - - existingClaim := &structs.TaskGroupHostVolumeClaim{ - Namespace: structs.DefaultNamespace, - JobID: stickyJob.ID, - TaskGroupName: stickyJob.TaskGroups[0].Name, - VolumeID: dhv.ID, - VolumeName: dhv.Name, - } - must.NoError(t, store.UpsertTaskGroupHostVolumeClaim(structs.MsgTypeTestSetup, 1000, existingClaim)) - - allocWithClaimedVol := mock.AllocForNode(nodes[1]) - allocWithClaimedVol.Namespace = structs.DefaultNamespace - allocWithClaimedVol.JobID = stickyJob.ID - allocWithClaimedVol.Job = stickyJob - allocWithClaimedVol.NodeID = nodes[1].ID - - must.NoError(t, store.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{allocWithClaimedVol})) - - // there must be exactly one claim in the state - claims := []*structs.TaskGroupHostVolumeClaim{} - iter, err := store.GetTaskGroupHostVolumeClaimsForTaskGroup(nil, structs.DefaultNamespace, stickyJob.ID, stickyJob.TaskGroups[0].Name) - must.Nil(t, err) - for raw := iter.Next(); raw != nil; raw = iter.Next() { - claim := raw.(*structs.TaskGroupHostVolumeClaim) - claims = append(claims, claim) - } - must.Len(t, 1, claims) - - // clean up the state - txn := store.db.WriteTxn(1000) - _, err = txn.DeletePrefix(TableTaskGroupHostVolumeClaim, "id_prefix", stickyJob.ID) - must.Nil(t, err) - must.NoError(t, store.deleteAllocsForJobTxn(txn, 1000, structs.DefaultNamespace, stickyJob.ID)) - must.NoError(t, txn.Commit()) - - // try to upsert an alloc for which there is no existing claim - stickyJob2 := mock.Job() - stickyJob2.TaskGroups[0].Volumes = stickyRequest - allocWithNoClaimedVol := mock.AllocForNode(nodes[1]) - allocWithNoClaimedVol.Namespace = structs.DefaultNamespace - allocWithNoClaimedVol.JobID = stickyJob2.ID - allocWithNoClaimedVol.Job = stickyJob2 - allocWithNoClaimedVol.NodeID = nodes[1].ID - - must.NoError(t, store.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{allocWithNoClaimedVol})) - - // make sure we recorded a claim - claim, err := store.GetTaskGroupHostVolumeClaim(nil, structs.DefaultNamespace, stickyJob2.ID, stickyJob2.TaskGroups[0].Name, dhv.ID) - must.NoError(t, err) - must.Eq(t, claim.Namespace, structs.DefaultNamespace) - must.Eq(t, claim.JobID, stickyJob2.ID) - must.Eq(t, claim.TaskGroupName, stickyJob2.TaskGroups[0].Name) -} - func TestStateStore_UpsertAlloc_ChildJob(t *testing.T) { ci.Parallel(t) diff --git a/nomad/stream/event_broker.go b/nomad/stream/event_broker.go index 733b559bfae..bf9e22c03b0 100644 --- a/nomad/stream/event_broker.go +++ b/nomad/stream/event_broker.go @@ -9,10 +9,10 @@ import ( "sync" "sync/atomic" + "github.com/armon/go-metrics" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/go-hclog" - metrics "github.com/hashicorp/go-metrics/compat" ) const ( diff --git a/nomad/structs/acl.go b/nomad/structs/acl.go index afdea9b6001..f610b97236d 100644 --- a/nomad/structs/acl.go +++ b/nomad/structs/acl.go @@ -474,16 +474,6 @@ func (a *ACLToken) UnmarshalJSON(data []byte) (err error) { return nil } -func (a *ACLToken) Sanitize() *ACLToken { - if a == nil { - return nil - } - - out := a.Copy() - out.SecretID = "" - return out -} - // ACLRole is an abstraction for the ACL system which allows the grouping of // ACL policies into a single object. ACL tokens can be created and linked to // a role; the token then inherits all the permissions granted by the policies. diff --git a/nomad/structs/cni_config_test.go b/nomad/structs/cni_config_test.go index 04bf65ac95b..e7cf78b8819 100644 --- a/nomad/structs/cni_config_test.go +++ b/nomad/structs/cni_config_test.go @@ -4,10 +4,9 @@ package structs import ( - "testing" - "github.com/hashicorp/nomad/ci" "github.com/shoenig/test/must" + "testing" ) func TestCNIConfig_Equal(t *testing.T) { diff --git a/nomad/structs/config/workload_id.go b/nomad/structs/config/workload_id.go index 5a3f92dbee2..872acd81ef9 100644 --- a/nomad/structs/config/workload_id.go +++ b/nomad/structs/config/workload_id.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/nomad/helper/pointer" ) -// WorkloadIdentityConfig is the agent configuration block used to define +// WorkloadIdentityConfig is the agent configuraion block used to define // default workload identities. // // This based on the WorkloadIdentity struct from nomad/structs/workload_id.go diff --git a/nomad/structs/csi.go b/nomad/structs/csi.go index 70efacfe998..4296b67c8c3 100644 --- a/nomad/structs/csi.go +++ b/nomad/structs/csi.go @@ -117,25 +117,33 @@ func (t *TaskCSIPluginConfig) Copy() *TaskCSIPluginConfig { // CSIVolumeCapability is the requested attachment and access mode for a // volume type CSIVolumeCapability struct { - AttachmentMode VolumeAttachmentMode - AccessMode VolumeAccessMode + AttachmentMode CSIVolumeAttachmentMode + AccessMode CSIVolumeAccessMode } +// CSIVolumeAttachmentMode chooses the type of storage api that will be used to +// interact with the device. +type CSIVolumeAttachmentMode string + const ( - CSIVolumeAttachmentModeUnknown VolumeAttachmentMode = "" - CSIVolumeAttachmentModeBlockDevice VolumeAttachmentMode = "block-device" - CSIVolumeAttachmentModeFilesystem VolumeAttachmentMode = "file-system" + CSIVolumeAttachmentModeUnknown CSIVolumeAttachmentMode = "" + CSIVolumeAttachmentModeBlockDevice CSIVolumeAttachmentMode = "block-device" + CSIVolumeAttachmentModeFilesystem CSIVolumeAttachmentMode = "file-system" ) +// CSIVolumeAccessMode indicates how a volume should be used in a storage topology +// e.g whether the provider should make the volume available concurrently. +type CSIVolumeAccessMode string + const ( - CSIVolumeAccessModeUnknown VolumeAccessMode = "" + CSIVolumeAccessModeUnknown CSIVolumeAccessMode = "" - CSIVolumeAccessModeSingleNodeReader VolumeAccessMode = "single-node-reader-only" - CSIVolumeAccessModeSingleNodeWriter VolumeAccessMode = "single-node-writer" + CSIVolumeAccessModeSingleNodeReader CSIVolumeAccessMode = "single-node-reader-only" + CSIVolumeAccessModeSingleNodeWriter CSIVolumeAccessMode = "single-node-writer" - CSIVolumeAccessModeMultiNodeReader VolumeAccessMode = "multi-node-reader-only" - CSIVolumeAccessModeMultiNodeSingleWriter VolumeAccessMode = "multi-node-single-writer" - CSIVolumeAccessModeMultiNodeMultiWriter VolumeAccessMode = "multi-node-multi-writer" + CSIVolumeAccessModeMultiNodeReader CSIVolumeAccessMode = "multi-node-reader-only" + CSIVolumeAccessModeMultiNodeSingleWriter CSIVolumeAccessMode = "multi-node-single-writer" + CSIVolumeAccessModeMultiNodeMultiWriter CSIVolumeAccessMode = "multi-node-multi-writer" ) // CSIMountOptions contain optional additional configuration that can be used @@ -230,8 +238,8 @@ type CSIVolumeClaim struct { NodeID string ExternalNodeID string Mode CSIVolumeClaimMode - AccessMode VolumeAccessMode - AttachmentMode VolumeAttachmentMode + AccessMode CSIVolumeAccessMode + AttachmentMode CSIVolumeAttachmentMode State CSIVolumeClaimState } @@ -265,8 +273,8 @@ type CSIVolume struct { // could support. This value cannot be set by the user. Topologies []*CSITopology - AccessMode VolumeAccessMode // *current* access mode - AttachmentMode VolumeAttachmentMode // *current* attachment mode + AccessMode CSIVolumeAccessMode // *current* access mode + AttachmentMode CSIVolumeAttachmentMode // *current* attachment mode MountOptions *CSIMountOptions Secrets CSISecrets @@ -344,8 +352,8 @@ type CSIVolListStub struct { Name string ExternalID string Topologies []*CSITopology - AccessMode VolumeAccessMode - AttachmentMode VolumeAttachmentMode + AccessMode CSIVolumeAccessMode + AttachmentMode CSIVolumeAttachmentMode CurrentReaders int CurrentWriters int Schedulable bool @@ -925,8 +933,8 @@ type CSIVolumeClaimRequest struct { NodeID string ExternalNodeID string Claim CSIVolumeClaimMode - AccessMode VolumeAccessMode - AttachmentMode VolumeAttachmentMode + AccessMode CSIVolumeAccessMode + AttachmentMode CSIVolumeAttachmentMode State CSIVolumeClaimState Timestamp int64 // UnixNano WriteRequest diff --git a/nomad/structs/diff_test.go b/nomad/structs/diff_test.go index e394545ad1d..0f87387ff6c 100644 --- a/nomad/structs/diff_test.go +++ b/nomad/structs/diff_test.go @@ -10,7 +10,7 @@ import ( "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/pointer" - "github.com/shoenig/test/must" + "github.com/stretchr/testify/require" ) func TestJobDiff(t *testing.T) { @@ -4864,12 +4864,6 @@ func TestTaskGroupDiff(t *testing.T) { Old: "", New: "foo-src", }, - { - Type: DiffTypeAdded, - Name: "Sticky", - Old: "", - New: "false", - }, { Type: DiffTypeAdded, Name: "Type", @@ -5481,17 +5475,17 @@ func TestTaskGroupDiff(t *testing.T) { } for i, c := range cases { - must.NotEq(t, c.TestCase, "", must.Sprintf("case #%d needs a name", i+1)) + require.NotEmpty(t, c.TestCase, "case #%d needs a name", i+1) t.Run(c.TestCase, func(t *testing.T) { result, err := c.Old.Diff(c.New, c.Contextual) switch c.ExpErr { case true: - must.Error(t, err, must.Sprintf("case %q expected error", c.TestCase)) + require.Error(t, err, "case %q expected error", c.TestCase) case false: - must.NoError(t, err, must.Sprintf("case %q expected no error", c.TestCase)) - must.Eq(t, c.Expected, result) + require.NoError(t, err, "case %q expected no error", c.TestCase) + require.Equal(t, c.Expected, result) } }) } @@ -9876,10 +9870,10 @@ func TestTaskDiff(t *testing.T) { t.Run(c.Name, func(t *testing.T) { actual, err := c.Old.Diff(c.New, c.Contextual) if c.Error { - must.Error(t, err) + require.Error(t, err) } else { - must.NoError(t, err) - must.Eq(t, c.Expected, actual) + require.NoError(t, err) + require.Equal(t, c.Expected, actual) } }) } @@ -10854,7 +10848,7 @@ func TestServicesDiff(t *testing.T) { for _, c := range cases { t.Run(c.Name, func(t *testing.T) { actual := serviceDiffs(c.Old, c.New, c.Contextual) - must.Eq(t, c.Expected, actual) + require.Equal(t, c.Expected, actual) }) } } diff --git a/nomad/structs/event.go b/nomad/structs/event.go index 7a2e98fd056..1eca62046f9 100644 --- a/nomad/structs/event.go +++ b/nomad/structs/event.go @@ -31,9 +31,6 @@ const ( TopicACLAuthMethod Topic = "ACLAuthMethod" TopicACLBindingRule Topic = "ACLBindingRule" TopicService Topic = "Service" - TopicHostVolume Topic = "HostVolume" - TopicCSIVolume Topic = "CSIVolume" - TopicCSIPlugin Topic = "CSIPlugin" TopicAll Topic = "*" TypeNodeRegistration = "NodeRegistration" @@ -66,11 +63,6 @@ const ( TypeACLBindingRuleDeleted = "ACLBindingRuleDeleted" TypeServiceRegistration = "ServiceRegistration" TypeServiceDeregistration = "ServiceDeregistration" - TypeHostVolumeRegistered = "HostVolumeRegistered" - TypeHostVolumeDeleted = "HostVolumeDeleted" - TypeCSIVolumeRegistered = "CSIVolumeRegistered" - TypeCSIVolumeDeregistered = "CSIVolumeDeregistered" - TypeCSIVolumeClaim = "CSIVolumeClaim" ) // Event represents a change in Nomads state. @@ -163,7 +155,8 @@ type ServiceRegistrationStreamEvent struct { // NewACLTokenEvent takes a token and creates a new ACLTokenEvent. It creates // a copy of the passed in ACLToken and empties out the copied tokens SecretID func NewACLTokenEvent(token *ACLToken) *ACLTokenEvent { - c := token.Sanitize() + c := token.Copy() + c.SecretID = "" return &ACLTokenEvent{ ACLToken: c, @@ -196,21 +189,3 @@ type ACLAuthMethodEvent struct { type ACLBindingRuleEvent struct { ACLBindingRule *ACLBindingRule } - -// HostVolumeEvent holds a newly updated or deleted dynamic host volume to be -// used as an event in the event stream -type HostVolumeEvent struct { - Volume *HostVolume -} - -// CSIVolumeEvent holds a newly updated or deleted CSI volume to be -// used as an event in the event stream -type CSIVolumeEvent struct { - Volume *CSIVolume -} - -// CSIPluginEvent holds a newly updated or deleted CSI plugin to be -// used as an event in the event stream -type CSIPluginEvent struct { - Plugin *CSIPlugin -} diff --git a/nomad/structs/funcs.go b/nomad/structs/funcs.go index 0b0278ecf69..93d48aaec71 100644 --- a/nomad/structs/funcs.go +++ b/nomad/structs/funcs.go @@ -145,9 +145,6 @@ func AllocsFit(node *Node, allocs []*Allocation, netIdx *NetworkIndex, checkDevi reservedCores := map[uint16]struct{}{} var coreOverlap bool - hostVolumeClaims := map[string]int{} - exclusiveHostVolumeClaims := []string{} - // For each alloc, add the resources for _, alloc := range allocs { // Do not consider the resource impact of terminal allocations @@ -166,18 +163,6 @@ func AllocsFit(node *Node, allocs []*Allocation, netIdx *NetworkIndex, checkDevi reservedCores[core] = struct{}{} } } - - // Job will be nil in the scheduler, where we're not performing this check anyways - if checkDevices && alloc.Job != nil { - group := alloc.Job.LookupTaskGroup(alloc.TaskGroup) - for _, volReq := range group.Volumes { - hostVolumeClaims[volReq.Source]++ - if volReq.AccessMode == - HostVolumeAccessModeSingleNodeSingleWriter { - exclusiveHostVolumeClaims = append(exclusiveHostVolumeClaims, volReq.Source) - } - } - } } if coreOverlap { @@ -213,18 +198,12 @@ func AllocsFit(node *Node, allocs []*Allocation, netIdx *NetworkIndex, checkDevi return false, "bandwidth exceeded", used, nil } - // Check devices and host volumes + // Check devices if checkDevices { accounter := NewDeviceAccounter(node) if accounter.AddAllocs(allocs) { return false, "device oversubscribed", used, nil } - - for _, exclusiveClaim := range exclusiveHostVolumeClaims { - if hostVolumeClaims[exclusiveClaim] > 1 { - return false, "conflicting claims for host volume with single-writer", used, nil - } - } } // Allocations fit! diff --git a/nomad/structs/funcs_test.go b/nomad/structs/funcs_test.go index b27262db0c6..2b9476c8769 100644 --- a/nomad/structs/funcs_test.go +++ b/nomad/structs/funcs_test.go @@ -538,54 +538,6 @@ func TestAllocsFit_Devices(t *testing.T) { require.True(fit) } -// Tests that AllocsFit detects volume collisions for volumes that have -// exclusive access -func TestAllocsFit_ExclusiveVolumes(t *testing.T) { - ci.Parallel(t) - - n := node2k() - a1 := &Allocation{ - TaskGroup: "group", - Job: &Job{TaskGroups: []*TaskGroup{{Name: "group", Volumes: map[string]*VolumeRequest{ - "foo": { - Source: "example", - AccessMode: HostVolumeAccessModeSingleNodeSingleWriter, - }, - }}}}, - AllocatedResources: &AllocatedResources{ - Tasks: map[string]*AllocatedTaskResources{ - "web": { - Cpu: AllocatedCpuResources{CpuShares: 500}, - Memory: AllocatedMemoryResources{MemoryMB: 500}, - }, - }, - }, - } - a2 := a1.Copy() - a2.AllocatedResources.Tasks["web"] = &AllocatedTaskResources{ - Cpu: AllocatedCpuResources{CpuShares: 500}, - Memory: AllocatedMemoryResources{MemoryMB: 500}, - } - a2.Job.TaskGroups[0].Volumes["foo"].AccessMode = HostVolumeAccessModeSingleNodeMultiWriter - - // Should fit one allocation - fit, _, _, err := AllocsFit(n, []*Allocation{a1}, nil, true) - must.NoError(t, err) - must.True(t, fit) - - // Should not fit second allocation - fit, msg, _, err := AllocsFit(n, []*Allocation{a1, a2}, nil, true) - must.NoError(t, err) - must.False(t, fit) - must.Eq(t, "conflicting claims for host volume with single-writer", msg) - - // Should not fit second allocation but won't detect since we disabled - // checking host volumes - fit, _, _, err = AllocsFit(n, []*Allocation{a1, a2}, nil, false) - must.NoError(t, err) - must.True(t, fit) -} - // TestAllocsFit_MemoryOversubscription asserts that only reserved memory is // used for capacity func TestAllocsFit_MemoryOversubscription(t *testing.T) { diff --git a/nomad/structs/host_volumes.go b/nomad/structs/host_volumes.go deleted file mode 100644 index b475a33bce6..00000000000 --- a/nomad/structs/host_volumes.go +++ /dev/null @@ -1,439 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package structs - -import ( - "errors" - "fmt" - "maps" - "strings" - "time" - - "github.com/hashicorp/go-multierror" - "github.com/hashicorp/nomad/helper" - "github.com/hashicorp/nomad/helper/uuid" -) - -type HostVolume struct { - // Namespace is the Nomad namespace for the host volume, which constrains - // which jobs can mount it. - Namespace string - - // ID is a UUID-like string generated by the server. - ID string - - // Name is the name that group.volume will use to identify the volume - // source. Not expected to be unique. - Name string - - // PluginID is the name of the host volume plugin on the client that will be - // used for creating the volume. If omitted, the client will use its default - // built-in plugin. - PluginID string - - // NodePool is the node pool of the node where the volume is placed. If the - // user doesn't provide a node ID, a node will be selected using the - // NodePool and Constraints. If the user provides both NodePool and NodeID, - // NodePool will be used to validate the request. If omitted, the server - // will populate this value in before writing the volume to Raft. - NodePool string - - // NodeID is the node where the volume is placed. If the user doesn't - // provide a NodeID, one will be selected using the NodePool and - // Constraints. If omitted, this field will then be populated by the server - // before writing the volume to Raft. - NodeID string - - // Constraints are optional. If the NodeID is not provided, the NodePool and - // Constraints are used to select a node. If the NodeID is provided, - // Constraints are used to validate that the node meets those constraints at - // the time of volume creation. - Constraints []*Constraint `json:",omitempty"` - - // Because storage may allow only specific intervals of size, we accept a - // min and max and return the actual capacity when the volume is created or - // updated on the client - RequestedCapacityMinBytes int64 - RequestedCapacityMaxBytes int64 - CapacityBytes int64 - - // RequestedCapabilities defines the options available to group.volume - // blocks. The scheduler checks against the listed capability blocks and - // selects a node for placement if *any* capability block works. - RequestedCapabilities []*HostVolumeCapability - - // Parameters are an opaque map of parameters for the host volume plugin. - Parameters map[string]string `json:",omitempty"` - - // HostPath is the path on disk where the volume's mount point was - // created. We record this to make debugging easier. - HostPath string - - // State represents the overall state of the volume. One of pending, ready, - // deleted. - State HostVolumeState - - CreateIndex uint64 - CreateTime int64 // Unix timestamp in nanoseconds since epoch - - ModifyIndex uint64 - ModifyTime int64 // Unix timestamp in nanoseconds since epoch - - // Allocations is the list of non-client-terminal allocations with claims on - // this host volume. They are denormalized on read and this field will be - // never written to Raft - Allocations []*AllocListStub `json:",omitempty"` -} - -type HostVolumeState string - -const ( - HostVolumeStateUnknown HostVolumeState = "" // never write this to Raft - HostVolumeStatePending HostVolumeState = "pending" - HostVolumeStateReady HostVolumeState = "ready" - HostVolumeStateUnavailable HostVolumeState = "unavailable" -) - -func (hv *HostVolume) Copy() *HostVolume { - if hv == nil { - return nil - } - - nhv := *hv - nhv.Constraints = helper.CopySlice(hv.Constraints) - nhv.RequestedCapabilities = helper.CopySlice(hv.RequestedCapabilities) - nhv.Parameters = maps.Clone(hv.Parameters) - return &nhv -} - -func (hv *HostVolume) Stub() *HostVolumeStub { - if hv == nil { - return nil - } - - return &HostVolumeStub{ - Namespace: hv.Namespace, - ID: hv.ID, - Name: hv.Name, - PluginID: hv.PluginID, - NodePool: hv.NodePool, - NodeID: hv.NodeID, - CapacityBytes: hv.CapacityBytes, - State: hv.State, - CreateIndex: hv.CreateIndex, - CreateTime: hv.CreateTime, - ModifyIndex: hv.ModifyIndex, - ModifyTime: hv.ModifyTime, - } -} - -// Validate verifies that the submitted HostVolume spec has valid field values, -// without validating any changes or state (see ValidateUpdate). -func (hv *HostVolume) Validate() error { - - var mErr *multierror.Error - - if hv.ID != "" && !helper.IsUUID(hv.ID) { - mErr = multierror.Append(mErr, fmt.Errorf("invalid ID %q", hv.ID)) - } - - if hv.Name == "" { - mErr = multierror.Append(mErr, errors.New("missing name")) - } - - if hv.RequestedCapacityMaxBytes < hv.RequestedCapacityMinBytes { - mErr = multierror.Append(mErr, fmt.Errorf( - "capacity_max (%d) must be larger than capacity_min (%d)", - hv.RequestedCapacityMaxBytes, hv.RequestedCapacityMinBytes)) - } - - for _, cap := range hv.RequestedCapabilities { - err := cap.Validate() - if err != nil { - mErr = multierror.Append(mErr, err) - } - } - - for _, constraint := range hv.Constraints { - if err := constraint.Validate(); err != nil { - mErr = multierror.Append(mErr, fmt.Errorf("invalid constraint: %v", err)) - } - switch constraint.Operand { - case ConstraintDistinctHosts, ConstraintDistinctProperty: - mErr = multierror.Append(mErr, fmt.Errorf( - "invalid constraint %s: host volumes of the same name are always on distinct hosts", constraint.Operand)) - default: - } - } - - return helper.FlattenMultierror(mErr.ErrorOrNil()) -} - -// ValidateUpdate verifies that an update to a volume is safe to make. -func (hv *HostVolume) ValidateUpdate(existing *HostVolume) error { - if existing == nil { - return nil - } - - var mErr *multierror.Error - if len(existing.Allocations) > 0 { - allocIDs := helper.ConvertSlice(existing.Allocations, - func(a *AllocListStub) string { return a.ID }) - mErr = multierror.Append(mErr, fmt.Errorf( - "cannot update a volume in use: claimed by allocs (%s)", - strings.Join(allocIDs, ", "))) - } - - if hv.NodeID != "" && hv.NodeID != existing.NodeID { - mErr = multierror.Append(mErr, errors.New("node ID cannot be updated")) - } - if hv.NodePool != "" && hv.NodePool != existing.NodePool { - mErr = multierror.Append(mErr, errors.New("node pool cannot be updated")) - } - - if hv.RequestedCapacityMaxBytes > 0 && - hv.RequestedCapacityMaxBytes < existing.CapacityBytes { - mErr = multierror.Append(mErr, fmt.Errorf( - "capacity_max (%d) cannot be less than existing provisioned capacity (%d)", - hv.RequestedCapacityMaxBytes, existing.CapacityBytes)) - } - - return mErr.ErrorOrNil() -} - -const DefaultHostVolumePlugin = "default" - -// CanonicalizeForCreate is called in the RPC handler to ensure we call client -// RPCs with correctly populated fields from the existing volume, even if the -// RPC request includes otherwise valid zero-values. This method should be -// called on request objects or a copy, never on a state store object directly. -func (hv *HostVolume) CanonicalizeForCreate(existing *HostVolume, now time.Time) { - if existing == nil { - hv.ID = uuid.Generate() - if hv.PluginID == "" { - hv.PluginID = DefaultHostVolumePlugin - } - hv.CapacityBytes = 0 // returned by plugin - hv.HostPath = "" // returned by plugin - hv.CreateTime = now.UnixNano() - - if len(hv.RequestedCapabilities) == 0 { - hv.RequestedCapabilities = []*HostVolumeCapability{{ - AttachmentMode: HostVolumeAttachmentModeFilesystem, - AccessMode: HostVolumeAccessModeSingleNodeWriter, - }} - } - - } else { - if hv.PluginID == "" { - hv.PluginID = existing.PluginID - } - if hv.NodePool == "" { - hv.NodePool = existing.NodePool - } - hv.NodeID = existing.NodeID - hv.Constraints = existing.Constraints - hv.CapacityBytes = existing.CapacityBytes - hv.HostPath = existing.HostPath - hv.CreateTime = existing.CreateTime - } - - hv.State = HostVolumeStatePending // reset on any change - hv.ModifyTime = now.UnixNano() - hv.Allocations = nil // set on read only -} - -// CanonicalizeForRegister is called in the RPC handler to ensure we call client -// RPCs with correctly populated fields from the existing volume, even if the -// RPC request includes otherwise valid zero-values. This method should be -// called on request objects or a copy, never on a state store object directly. -func (hv *HostVolume) CanonicalizeForRegister(existing *HostVolume, now time.Time) { - if existing == nil { - hv.ID = uuid.Generate() - hv.CreateTime = now.UnixNano() - - if len(hv.RequestedCapabilities) == 0 { - hv.RequestedCapabilities = []*HostVolumeCapability{{ - AttachmentMode: HostVolumeAttachmentModeFilesystem, - AccessMode: HostVolumeAccessModeSingleNodeWriter, - }} - } - - } else { - if hv.PluginID == "" { - hv.PluginID = existing.PluginID - } - if hv.NodePool == "" { - hv.NodePool = existing.NodePool - } - hv.NodeID = existing.NodeID - hv.Constraints = existing.Constraints - hv.CreateTime = existing.CreateTime - } - - hv.State = HostVolumeStatePending // reset on any change - hv.ModifyTime = now.UnixNano() - hv.Allocations = nil // set on read only -} - -// GetNamespace implements the paginator.NamespaceGetter interface -func (hv *HostVolume) GetNamespace() string { - return hv.Namespace -} - -// GetID implements the paginator.IDGetter interface -func (hv *HostVolume) GetID() string { - return hv.ID -} - -// HostVolumeCapability is the requested attachment and access mode for a volume -type HostVolumeCapability struct { - AttachmentMode VolumeAttachmentMode - AccessMode VolumeAccessMode -} - -func (hvc *HostVolumeCapability) Copy() *HostVolumeCapability { - if hvc == nil { - return nil - } - - nhvc := *hvc - return &nhvc -} - -func (hvc *HostVolumeCapability) Validate() error { - if hvc == nil { - return errors.New("validate called on nil host volume capability") - } - - switch hvc.AttachmentMode { - case HostVolumeAttachmentModeBlockDevice, - HostVolumeAttachmentModeFilesystem: - default: - return fmt.Errorf("invalid attachment mode: %q", hvc.AttachmentMode) - } - - switch hvc.AccessMode { - case HostVolumeAccessModeSingleNodeReader, - HostVolumeAccessModeSingleNodeWriter, - HostVolumeAccessModeSingleNodeSingleWriter, - HostVolumeAccessModeSingleNodeMultiWriter: - default: - return fmt.Errorf("invalid access mode: %q", hvc.AccessMode) - } - - return nil -} - -// HostVolumeAttachmentModes choose the type of storage API that will be used to -// interact with the device. -const ( - HostVolumeAttachmentModeUnknown VolumeAttachmentMode = "" - HostVolumeAttachmentModeBlockDevice VolumeAttachmentMode = "block-device" - HostVolumeAttachmentModeFilesystem VolumeAttachmentMode = "file-system" -) - -// HostVolumeAccessModes indicate how Nomad should make the volume available to -// concurrent allocations. -const ( - HostVolumeAccessModeUnknown VolumeAccessMode = "" - - HostVolumeAccessModeSingleNodeReader VolumeAccessMode = "single-node-reader-only" - HostVolumeAccessModeSingleNodeWriter VolumeAccessMode = "single-node-writer" - HostVolumeAccessModeSingleNodeSingleWriter VolumeAccessMode = "single-node-single-writer" - HostVolumeAccessModeSingleNodeMultiWriter VolumeAccessMode = "single-node-multi-writer" -) - -// HostVolumeStub is used for responses for the list volumes endpoint -type HostVolumeStub struct { - Namespace string - ID string - Name string - PluginID string - NodePool string - NodeID string - CapacityBytes int64 - State HostVolumeState - - CreateIndex uint64 - CreateTime int64 - - ModifyIndex uint64 - ModifyTime int64 -} - -type HostVolumeCreateRequest struct { - Volume *HostVolume - - // PolicyOverride is set when the user is attempting to override any - // Enterprise policy enforcement - PolicyOverride bool - - WriteRequest -} - -type HostVolumeCreateResponse struct { - Volume *HostVolume - - // Warnings are non-fatal messages from Enterprise policy enforcement - Warnings string - WriteMeta -} - -type HostVolumeRegisterRequest struct { - Volume *HostVolume - - // PolicyOverride is set when the user is attempting to override any - // Enterprise policy enforcement - PolicyOverride bool - - WriteRequest -} - -type HostVolumeRegisterResponse struct { - Volume *HostVolume - - // Warnings are non-fatal messages from Enterprise policy enforcement - Warnings string - WriteMeta -} - -type HostVolumeDeleteRequest struct { - VolumeID string - WriteRequest -} - -type HostVolumeDeleteResponse struct { - WriteMeta -} - -type HostVolumeGetRequest struct { - ID string - QueryOptions -} - -type HostVolumeGetResponse struct { - Volume *HostVolume - QueryMeta -} - -type HostVolumeListRequest struct { - NodeID string // filter - NodePool string // filter - QueryOptions -} - -type HostVolumeListResponse struct { - Volumes []*HostVolumeStub - QueryMeta -} - -type TaskGroupVolumeClaimDeleteRequest struct { - ClaimID string - WriteRequest -} - -type TaskGroupVolumeClaimDeleteResponse struct { - WriteMeta -} diff --git a/nomad/structs/host_volumes_test.go b/nomad/structs/host_volumes_test.go deleted file mode 100644 index e0e00b843fc..00000000000 --- a/nomad/structs/host_volumes_test.go +++ /dev/null @@ -1,344 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package structs - -import ( - "testing" - "time" - - "github.com/hashicorp/nomad/ci" - "github.com/hashicorp/nomad/helper/uuid" - "github.com/shoenig/test/must" -) - -func TestHostVolume_Copy(t *testing.T) { - ci.Parallel(t) - - out := (*HostVolume)(nil).Copy() - must.Nil(t, out) - - vol := &HostVolume{ - Namespace: DefaultNamespace, - ID: uuid.Generate(), - Name: "example", - PluginID: "example-plugin", - NodePool: NodePoolDefault, - NodeID: uuid.Generate(), - Constraints: []*Constraint{{ - LTarget: "${meta.rack}", - RTarget: "r1", - Operand: "=", - }}, - CapacityBytes: 150000, - RequestedCapabilities: []*HostVolumeCapability{{ - AttachmentMode: HostVolumeAttachmentModeFilesystem, - AccessMode: HostVolumeAccessModeSingleNodeWriter, - }}, - Parameters: map[string]string{"foo": "bar"}, - } - - out = vol.Copy() - must.Eq(t, vol, out) - - out.Allocations = []*AllocListStub{{ID: uuid.Generate()}} - out.Constraints[0].LTarget = "${meta.node_class}" - out.RequestedCapabilities = append(out.RequestedCapabilities, &HostVolumeCapability{ - AttachmentMode: HostVolumeAttachmentModeBlockDevice, - AccessMode: HostVolumeAccessModeSingleNodeMultiWriter, - }) - out.Parameters["foo"] = "baz" - - must.Nil(t, vol.Allocations) - must.Eq(t, "${meta.rack}", vol.Constraints[0].LTarget) - must.Len(t, 1, vol.RequestedCapabilities) - must.Eq(t, "bar", vol.Parameters["foo"]) -} - -func TestHostVolume_Validate(t *testing.T) { - ci.Parallel(t) - - invalid := &HostVolume{RequestedCapabilities: []*HostVolumeCapability{ - {AttachmentMode: "foo"}}} - err := invalid.Validate() - must.EqError(t, err, `2 errors occurred: - * missing name - * invalid attachment mode: "foo" - -`) - - invalid = &HostVolume{} - err = invalid.Validate() - // single error should be flattened - must.EqError(t, err, "missing name") - - invalid = &HostVolume{ - ID: "../../not-a-uuid", - Name: "example", - PluginID: "example-plugin", - Constraints: []*Constraint{{ - RTarget: "r1", - Operand: "=", - }}, - RequestedCapacityMinBytes: 200000, - RequestedCapacityMaxBytes: 100000, - RequestedCapabilities: []*HostVolumeCapability{ - { - AttachmentMode: HostVolumeAttachmentModeFilesystem, - AccessMode: HostVolumeAccessModeSingleNodeWriter, - }, - { - AttachmentMode: "bad", - AccessMode: "invalid", - }, - }, - } - err = invalid.Validate() - must.EqError(t, err, `4 errors occurred: - * invalid ID "../../not-a-uuid" - * capacity_max (100000) must be larger than capacity_min (200000) - * invalid attachment mode: "bad" - * invalid constraint: 1 error occurred: - * No LTarget provided but is required by constraint - - - -`) - - vol := &HostVolume{ - Namespace: DefaultNamespace, - ID: uuid.Generate(), - Name: "example", - PluginID: "example-plugin", - NodePool: NodePoolDefault, - NodeID: uuid.Generate(), - Constraints: []*Constraint{{ - LTarget: "${meta.rack}", - RTarget: "r1", - Operand: "=", - }}, - RequestedCapacityMinBytes: 100000, - RequestedCapacityMaxBytes: 200000, - CapacityBytes: 150000, - RequestedCapabilities: []*HostVolumeCapability{{ - AttachmentMode: HostVolumeAttachmentModeFilesystem, - AccessMode: HostVolumeAccessModeSingleNodeWriter, - }}, - Parameters: map[string]string{"foo": "bar"}, - } - must.NoError(t, vol.Validate()) -} - -func TestHostVolume_ValidateUpdate(t *testing.T) { - ci.Parallel(t) - - vol := &HostVolume{ - NodePool: NodePoolDefault, - NodeID: uuid.Generate(), - RequestedCapacityMinBytes: 100000, - RequestedCapacityMaxBytes: 120000, - Parameters: map[string]string{"baz": "qux"}, - } - err := vol.ValidateUpdate(nil) - must.NoError(t, err) - - existing := &HostVolume{ - NodePool: "prod", - NodeID: uuid.Generate(), - RequestedCapacityMinBytes: 100000, - RequestedCapacityMaxBytes: 200000, - CapacityBytes: 150000, - RequestedCapabilities: []*HostVolumeCapability{{ - AttachmentMode: HostVolumeAttachmentModeFilesystem, - AccessMode: HostVolumeAccessModeSingleNodeWriter, - }}, - Parameters: map[string]string{"foo": "bar"}, - Allocations: []*AllocListStub{ - {ID: "6bd66bfa"}, - {ID: "7032e570"}, - }, - } - - err = vol.ValidateUpdate(existing) - must.EqError(t, err, `4 errors occurred: - * cannot update a volume in use: claimed by allocs (6bd66bfa, 7032e570) - * node ID cannot be updated - * node pool cannot be updated - * capacity_max (120000) cannot be less than existing provisioned capacity (150000) - -`) - -} - -func TestHostVolume_CanonicalizeForCreate(t *testing.T) { - now := time.Now() - vol := &HostVolume{ - CapacityBytes: 100000, - HostPath: "/etc/passwd", - Allocations: []*AllocListStub{ - {ID: "6bd66bfa"}, - {ID: "7032e570"}, - }, - } - vol.CanonicalizeForCreate(nil, now) - - must.NotEq(t, "", vol.ID) - must.Eq(t, now.UnixNano(), vol.CreateTime) - must.Eq(t, now.UnixNano(), vol.ModifyTime) - must.Eq(t, HostVolumeStatePending, vol.State) - must.Nil(t, vol.Allocations) - must.Eq(t, "", vol.HostPath) - must.Zero(t, vol.CapacityBytes) - - vol = &HostVolume{ - ID: "82f357d6-a5ec-11ef-9e36-3f9884222736", - RequestedCapacityMinBytes: 100000, - RequestedCapacityMaxBytes: 500000, - RequestedCapabilities: []*HostVolumeCapability{{ - AttachmentMode: HostVolumeAttachmentModeFilesystem, - AccessMode: HostVolumeAccessModeSingleNodeMultiWriter, - }}, - } - existing := &HostVolume{ - ID: "82f357d6-a5ec-11ef-9e36-3f9884222736", - PluginID: "example_plugin", - NodePool: "prod", - NodeID: uuid.Generate(), - RequestedCapacityMinBytes: 100000, - RequestedCapacityMaxBytes: 200000, - CapacityBytes: 150000, - RequestedCapabilities: []*HostVolumeCapability{{ - AttachmentMode: HostVolumeAttachmentModeFilesystem, - AccessMode: HostVolumeAccessModeSingleNodeWriter, - }}, - Constraints: []*Constraint{{ - LTarget: "${meta.rack}", - RTarget: "r1", - Operand: "=", - }}, - Parameters: map[string]string{"foo": "bar"}, - Allocations: []*AllocListStub{ - {ID: "6bd66bfa"}, - {ID: "7032e570"}, - }, - HostPath: "/var/nomad/alloc_mounts/82f357d6.ext4", - CreateTime: 1, - } - - vol.CanonicalizeForCreate(existing, now) - must.Eq(t, existing.ID, vol.ID) - must.Eq(t, existing.PluginID, vol.PluginID) - must.Eq(t, existing.NodePool, vol.NodePool) - must.Eq(t, existing.NodeID, vol.NodeID) - must.Eq(t, []*Constraint{{ - LTarget: "${meta.rack}", - RTarget: "r1", - Operand: "=", - }}, vol.Constraints) - must.Eq(t, 100000, vol.RequestedCapacityMinBytes) - must.Eq(t, 500000, vol.RequestedCapacityMaxBytes) - must.Eq(t, 150000, vol.CapacityBytes) - - must.Eq(t, []*HostVolumeCapability{{ - AttachmentMode: HostVolumeAttachmentModeFilesystem, - AccessMode: HostVolumeAccessModeSingleNodeMultiWriter, - }}, vol.RequestedCapabilities) - - must.Eq(t, "/var/nomad/alloc_mounts/82f357d6.ext4", vol.HostPath) - must.Eq(t, HostVolumeStatePending, vol.State) - - must.Eq(t, existing.CreateTime, vol.CreateTime) - must.Eq(t, now.UnixNano(), vol.ModifyTime) - must.Nil(t, vol.Allocations) -} - -func TestHostVolume_CanonicalizeForRegister(t *testing.T) { - now := time.Now() - nodeID := uuid.Generate() - vol := &HostVolume{ - NodeID: nodeID, - CapacityBytes: 100000, - HostPath: "/etc/passwd", - Allocations: []*AllocListStub{ - {ID: "6bd66bfa"}, - {ID: "7032e570"}, - }, - } - vol.CanonicalizeForRegister(nil, now) - - must.NotEq(t, "", vol.ID) - must.Eq(t, now.UnixNano(), vol.CreateTime) - must.Eq(t, now.UnixNano(), vol.ModifyTime) - must.Eq(t, HostVolumeStatePending, vol.State) - must.Nil(t, vol.Allocations) - must.Eq(t, "/etc/passwd", vol.HostPath) - must.Eq(t, nodeID, vol.NodeID) - must.Eq(t, 100000, vol.CapacityBytes) - - vol = &HostVolume{ - ID: "82f357d6-a5ec-11ef-9e36-3f9884222736", - PluginID: "example_plugin.v2", - RequestedCapacityMinBytes: 100000, - RequestedCapacityMaxBytes: 500000, - CapacityBytes: 200000, - NodePool: "infra", - RequestedCapabilities: []*HostVolumeCapability{{ - AttachmentMode: HostVolumeAttachmentModeFilesystem, - AccessMode: HostVolumeAccessModeSingleNodeMultiWriter, - }}, - HostPath: "/var/nomad/alloc_mounts/82f357d6.ext4", - } - existing := &HostVolume{ - ID: "82f357d6-a5ec-11ef-9e36-3f9884222736", - PluginID: "example_plugin.v1", - NodePool: "prod", - NodeID: uuid.Generate(), - RequestedCapacityMinBytes: 100000, - RequestedCapacityMaxBytes: 200000, - CapacityBytes: 150000, - RequestedCapabilities: []*HostVolumeCapability{{ - AttachmentMode: HostVolumeAttachmentModeFilesystem, - AccessMode: HostVolumeAccessModeSingleNodeWriter, - }}, - Constraints: []*Constraint{{ - LTarget: "${meta.rack}", - RTarget: "r1", - Operand: "=", - }}, - Parameters: map[string]string{"foo": "bar"}, - Allocations: []*AllocListStub{ - {ID: "6bd66bfa"}, - {ID: "7032e570"}, - }, - HostPath: "/var/nomad/alloc_mounts/82f357d6.img", - CreateTime: 1, - } - - vol.CanonicalizeForRegister(existing, now) - - must.Eq(t, existing.ID, vol.ID) - must.Eq(t, "example_plugin.v2", vol.PluginID) - must.Eq(t, "infra", vol.NodePool) - must.Eq(t, existing.NodeID, vol.NodeID) - must.Eq(t, []*Constraint{{ - LTarget: "${meta.rack}", - RTarget: "r1", - Operand: "=", - }}, vol.Constraints) - must.Eq(t, 100000, vol.RequestedCapacityMinBytes) - must.Eq(t, 500000, vol.RequestedCapacityMaxBytes) - must.Eq(t, 200000, vol.CapacityBytes) - - must.Eq(t, []*HostVolumeCapability{{ - AttachmentMode: HostVolumeAttachmentModeFilesystem, - AccessMode: HostVolumeAccessModeSingleNodeMultiWriter, - }}, vol.RequestedCapabilities) - - must.Eq(t, "/var/nomad/alloc_mounts/82f357d6.ext4", vol.HostPath) - must.Eq(t, HostVolumeStatePending, vol.State) - - must.Eq(t, existing.CreateTime, vol.CreateTime) - must.Eq(t, now.UnixNano(), vol.ModifyTime) - must.Nil(t, vol.Allocations) - -} diff --git a/nomad/structs/search.go b/nomad/structs/search.go index 53aebc01e2a..b71798c2194 100644 --- a/nomad/structs/search.go +++ b/nomad/structs/search.go @@ -22,7 +22,6 @@ const ( Plugins Context = "plugins" Variables Context = "vars" Volumes Context = "volumes" - HostVolumes Context = "host_volumes" // Subtypes used in fuzzy matching. Groups Context = "groups" diff --git a/nomad/structs/structs.go b/nomad/structs/structs.go index dca5214559b..b3f0e8e4ac2 100644 --- a/nomad/structs/structs.go +++ b/nomad/structs/structs.go @@ -132,11 +132,6 @@ const ( NamespaceUpsertRequestType MessageType = 64 NamespaceDeleteRequestType MessageType = 65 - // MessageTypes 66-74 are in Nomad Enterprise - HostVolumeRegisterRequestType MessageType = 75 - HostVolumeDeleteRequestType MessageType = 76 - TaskGroupHostVolumeClaimDeleteRequestType MessageType = 77 - // NOTE: MessageTypes are shared between CE and ENT. If you need to add a // new type, check that ENT is not already using that value. ) @@ -7775,32 +7770,6 @@ func (tg *TaskGroup) SetConstraints(newConstraints []*Constraint) { tg.Constraints = newConstraints } -// TaskGroupHostVolumeClaim associates a task group with a host volume ID. It's -// used for stateful deployments, i.e., volume requests with "sticky" set to -// true. -type TaskGroupHostVolumeClaim struct { - ID string - Namespace string - JobID string - TaskGroupName string - AllocID string // used for checks to make sure we don't insert duplicate claims for the same alloc - - VolumeID string - VolumeName string - - CreateIndex uint64 - ModifyIndex uint64 -} - -// ClaimedByAlloc checks if there's a match between allocation ID and volume ID -func (tgvc *TaskGroupHostVolumeClaim) ClaimedByAlloc(otherClaim *TaskGroupHostVolumeClaim) bool { - if tgvc == nil || otherClaim == nil { - return tgvc == otherClaim - } - - return tgvc.AllocID == otherClaim.AllocID && tgvc.VolumeID == otherClaim.VolumeID -} - // CheckRestart describes if and when a task should be restarted based on // failing health checks. type CheckRestart struct { @@ -9144,6 +9113,44 @@ type AllocState struct { Time time.Time } +// TaskHandle is optional handle to a task propogated to the servers for use +// by remote tasks. Since remote tasks are not implicitly lost when the node +// they are assigned to is down, their state is migrated to the replacement +// allocation. +// +// Minimal set of fields from plugins/drivers/task_handle.go:TaskHandle +type TaskHandle struct { + // Version of driver state. Used by the driver to gracefully handle + // plugin upgrades. + Version int + + // Driver-specific state containing a handle to the remote task. + DriverState []byte +} + +func (h *TaskHandle) Copy() *TaskHandle { + if h == nil { + return nil + } + + newTH := TaskHandle{ + Version: h.Version, + DriverState: make([]byte, len(h.DriverState)), + } + copy(newTH.DriverState, h.DriverState) + return &newTH +} + +func (h *TaskHandle) Equal(o *TaskHandle) bool { + if h == nil || o == nil { + return h == o + } + if h.Version != o.Version { + return false + } + return bytes.Equal(h.DriverState, o.DriverState) +} + // Set of possible states for a task. const ( TaskStatePending = "pending" // The task is waiting to be run. @@ -9178,9 +9185,9 @@ type TaskState struct { // Series of task events that transition the state of the task. Events []*TaskEvent - // // Experimental - TaskHandle is based on drivers.TaskHandle and used - // // by remote task drivers to migrate task handles between allocations. - // TaskHandle *TaskHandle + // Experimental - TaskHandle is based on drivers.TaskHandle and used + // by remote task drivers to migrate task handles between allocations. + TaskHandle *TaskHandle // Enterprise Only - Paused is set to the paused state of the task. See // task_sched.go @@ -9216,6 +9223,7 @@ func (ts *TaskState) Copy() *TaskState { } } + newTS.TaskHandle = ts.TaskHandle.Copy() return newTS } @@ -9250,6 +9258,9 @@ func (ts *TaskState) Equal(o *TaskState) bool { }) { return false } + if !ts.TaskHandle.Equal(o.TaskHandle) { + return false + } return true } diff --git a/nomad/structs/volumes_test.go b/nomad/structs/volume_test.go similarity index 93% rename from nomad/structs/volumes_test.go rename to nomad/structs/volume_test.go index fb5a1a04d64..02e0715d1a3 100644 --- a/nomad/structs/volumes_test.go +++ b/nomad/structs/volume_test.go @@ -9,6 +9,7 @@ import ( "github.com/hashicorp/nomad/ci" "github.com/shoenig/test/must" + "github.com/stretchr/testify/require" ) func TestVolumeRequest_Validate(t *testing.T) { @@ -31,9 +32,9 @@ func TestVolumeRequest_Validate(t *testing.T) { { name: "host volume with CSI volume config", expected: []string{ - "volume has an empty source", + "host volumes cannot have an access mode", + "host volumes cannot have an attachment mode", "host volumes cannot have mount options", - "single-node-reader-only volumes must be read-only", "volume cannot be per_alloc for system or sysbatch jobs", "volume cannot be per_alloc when canaries are in use", }, @@ -85,24 +86,13 @@ func TestVolumeRequest_Validate(t *testing.T) { PerAlloc: true, }, }, - { - name: "per_alloc sticky", - expected: []string{ - "volume cannot be per_alloc and sticky at the same time", - }, - req: &VolumeRequest{ - Type: VolumeTypeCSI, - PerAlloc: true, - Sticky: true, - }, - }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { err := tc.req.Validate(JobTypeSystem, tc.taskGroupCount, tc.canariesCount) for _, expected := range tc.expected { - must.StrContains(t, err.Error(), expected) + require.Contains(t, err.Error(), expected) } }) } diff --git a/nomad/structs/volumes.go b/nomad/structs/volumes.go index e1987f7d1ca..daacd5d8670 100644 --- a/nomad/structs/volumes.go +++ b/nomad/structs/volumes.go @@ -31,18 +31,6 @@ type ClientHostVolumeConfig struct { Name string `hcl:",key"` Path string `hcl:"path"` ReadOnly bool `hcl:"read_only"` - // ID is set for dynamic host volumes only. - ID string `hcl:"-"` -} - -func (p *ClientHostVolumeConfig) Equal(o *ClientHostVolumeConfig) bool { - if p == nil && o == nil { - return true - } - if p == nil || o == nil { - return false - } - return *p == *o } func (p *ClientHostVolumeConfig) Copy() *ClientHostVolumeConfig { @@ -103,16 +91,14 @@ func HostVolumeSliceMerge(a, b []*ClientHostVolumeConfig) []*ClientHostVolumeCon return n } -// VolumeRequest is a representation of a storage volume that a TaskGroup wishes -// to use. +// VolumeRequest is a representation of a storage volume that a TaskGroup wishes to use. type VolumeRequest struct { Name string Type string Source string ReadOnly bool - Sticky bool - AccessMode VolumeAccessMode - AttachmentMode VolumeAttachmentMode + AccessMode CSIVolumeAccessMode + AttachmentMode CSIVolumeAttachmentMode MountOptions *CSIMountOptions PerAlloc bool } @@ -130,8 +116,6 @@ func (v *VolumeRequest) Equal(o *VolumeRequest) bool { return false case v.ReadOnly != o.ReadOnly: return false - case v.Sticky != o.Sticky: - return false case v.AccessMode != o.AccessMode: return false case v.AttachmentMode != o.AttachmentMode: @@ -165,34 +149,21 @@ func (v *VolumeRequest) Validate(jobType string, taskGroupCount, canaries int) e if canaries > 0 { addErr("volume cannot be per_alloc when canaries are in use") } - if v.Sticky { - addErr("volume cannot be per_alloc and sticky at the same time") - } } switch v.Type { case VolumeTypeHost: + if v.AttachmentMode != CSIVolumeAttachmentModeUnknown { + addErr("host volumes cannot have an attachment mode") + } + if v.AccessMode != CSIVolumeAccessModeUnknown { + addErr("host volumes cannot have an access mode") + } if v.MountOptions != nil { addErr("host volumes cannot have mount options") } - switch v.AccessMode { - case HostVolumeAccessModeSingleNodeReader: - if !v.ReadOnly { - addErr("%s volumes must be read-only", v.AccessMode) - } - case HostVolumeAccessModeSingleNodeWriter, - HostVolumeAccessModeSingleNodeSingleWriter, - HostVolumeAccessModeSingleNodeMultiWriter, - HostVolumeAccessModeUnknown: - // dynamic host volumes are all "per node" so there's no way to - // validate that other access modes work for a given volume until we - // have access to other allocations (in the scheduler) - default: - addErr("host volumes cannot be mounted with %s access mode") - } - case VolumeTypeCSI: switch v.AttachmentMode { @@ -270,14 +241,6 @@ func CopyMapVolumeRequest(s map[string]*VolumeRequest) map[string]*VolumeRequest return c } -// VolumeAttachmentMode chooses the type of storage api that will be used to -// interact with the device. -type VolumeAttachmentMode string - -// VolumeAccessMode indicates how a volume should be used in a storage topology -// e.g whether the provider should make the volume available concurrently. -type VolumeAccessMode string - // VolumeMount represents the relationship between a destination path in a task // and the task group volume that should be mounted there. type VolumeMount struct { diff --git a/nomad/variables_endpoint.go b/nomad/variables_endpoint.go index 819a547d3fa..1261e8fe89d 100644 --- a/nomad/variables_endpoint.go +++ b/nomad/variables_endpoint.go @@ -10,9 +10,9 @@ import ( "strings" "time" + "github.com/armon/go-metrics" "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-memdb" - metrics "github.com/hashicorp/go-metrics/compat" "github.com/hashicorp/nomad/acl" "github.com/hashicorp/nomad/nomad/auth" diff --git a/nomad/vault.go b/nomad/vault.go index 71e70c890e3..e2827a12284 100644 --- a/nomad/vault.go +++ b/nomad/vault.go @@ -18,8 +18,8 @@ import ( "github.com/hashicorp/nomad/helper/useragent" tomb "gopkg.in/tomb.v2" + metrics "github.com/armon/go-metrics" log "github.com/hashicorp/go-hclog" - metrics "github.com/hashicorp/go-metrics/compat" multierror "github.com/hashicorp/go-multierror" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/nomad/structs/config" diff --git a/nomad/volumewatcher/volumes_watcher.go b/nomad/volumewatcher/volumes_watcher.go index a9cf7ae3524..2da49c7d1f6 100644 --- a/nomad/volumewatcher/volumes_watcher.go +++ b/nomad/volumewatcher/volumes_watcher.go @@ -141,9 +141,9 @@ func (w *Watcher) getVolumes(ctx context.Context, minIndex uint64) ([]*structs.C } // getVolumesImpl retrieves all volumes from the passed state store. -func (w *Watcher) getVolumesImpl(ws memdb.WatchSet, store *state.StateStore) (interface{}, uint64, error) { +func (w *Watcher) getVolumesImpl(ws memdb.WatchSet, state *state.StateStore) (interface{}, uint64, error) { - iter, err := store.CSIVolumes(ws) + iter, err := state.CSIVolumes(ws) if err != nil { return nil, 0, err } @@ -159,7 +159,7 @@ func (w *Watcher) getVolumesImpl(ws memdb.WatchSet, store *state.StateStore) (in } // Use the last index that affected the volume table - index, err := store.Index(state.TableCSIVolumes) + index, err := state.Index("csi_volumes") if err != nil { return nil, 0, err } diff --git a/nomad/worker.go b/nomad/worker.go index 0322d7d38d1..b49ebf4a2dc 100644 --- a/nomad/worker.go +++ b/nomad/worker.go @@ -12,9 +12,9 @@ import ( "sync" "time" + metrics "github.com/armon/go-metrics" log "github.com/hashicorp/go-hclog" memdb "github.com/hashicorp/go-memdb" - metrics "github.com/hashicorp/go-metrics/compat" "github.com/hashicorp/go-version" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/helper/uuid" diff --git a/plugins/csi/plugin.go b/plugins/csi/plugin.go index adbe161dc97..e4f5155dfba 100644 --- a/plugins/csi/plugin.go +++ b/plugins/csi/plugin.go @@ -948,7 +948,7 @@ type VolumeCapability struct { MountVolume *structs.CSIMountOptions } -func VolumeCapabilityFromStructs(sAccessType structs.VolumeAttachmentMode, sAccessMode structs.VolumeAccessMode, sMountOptions *structs.CSIMountOptions) (*VolumeCapability, error) { +func VolumeCapabilityFromStructs(sAccessType structs.CSIVolumeAttachmentMode, sAccessMode structs.CSIVolumeAccessMode, sMountOptions *structs.CSIMountOptions) (*VolumeCapability, error) { var accessType VolumeAccessType switch sAccessType { case structs.CSIVolumeAttachmentModeBlockDevice: diff --git a/plugins/drivers/client.go b/plugins/drivers/client.go index b8d468e366e..3c27df33003 100644 --- a/plugins/drivers/client.go +++ b/plugins/drivers/client.go @@ -79,6 +79,7 @@ func (d *driverPluginClient) Capabilities() (*Capabilities, error) { } caps.MountConfigs = MountConfigSupport(resp.Capabilities.MountConfigs) + caps.RemoteTasks = resp.Capabilities.RemoteTasks caps.DisableLogCollection = resp.Capabilities.DisableLogCollection caps.DynamicWorkloadUsers = resp.Capabilities.DynamicWorkloadUsers } diff --git a/plugins/drivers/driver.go b/plugins/drivers/driver.go index 8f9c6418632..e7ec790afcf 100644 --- a/plugins/drivers/driver.go +++ b/plugins/drivers/driver.go @@ -168,6 +168,14 @@ type Capabilities struct { // MountConfigs tells Nomad which mounting config options the driver supports. MountConfigs MountConfigSupport + // RemoteTasks indicates this driver runs tasks on remote systems + // instead of locally. The Nomad client can use this information to + // adjust behavior such as propagating task handles between allocations + // to avoid downtime when a client is lost. + // + // Deprecated: remote tasks drivers are no longer developed or supported. + RemoteTasks bool + // DisableLogCollection indicates this driver has disabled log collection // and the client should not start a logmon process. DisableLogCollection bool diff --git a/plugins/drivers/proto/driver.pb.go b/plugins/drivers/proto/driver.pb.go index b0e017e02cc..86103cd32af 100644 --- a/plugins/drivers/proto/driver.pb.go +++ b/plugins/drivers/proto/driver.pb.go @@ -1849,6 +1849,9 @@ type DriverCapabilities struct { MustCreateNetwork bool `protobuf:"varint,5,opt,name=must_create_network,json=mustCreateNetwork,proto3" json:"must_create_network,omitempty"` // MountConfigs indicates whether the driver supports mount configurations. MountConfigs DriverCapabilities_MountConfigs `protobuf:"varint,6,opt,name=mount_configs,json=mountConfigs,proto3,enum=hashicorp.nomad.plugins.drivers.proto.DriverCapabilities_MountConfigs" json:"mount_configs,omitempty"` + // remote_tasks indicates whether the driver executes tasks remotely such + // on cloud runtimes like AWS ECS. + RemoteTasks bool `protobuf:"varint,7,opt,name=remote_tasks,json=remoteTasks,proto3" json:"remote_tasks,omitempty"` // Deprecated: Do not use. // disable_log_collection indicates whether the driver has the capability of // disabling log collection DisableLogCollection bool `protobuf:"varint,8,opt,name=disable_log_collection,json=disableLogCollection,proto3" json:"disable_log_collection,omitempty"` @@ -1927,6 +1930,14 @@ func (m *DriverCapabilities) GetMountConfigs() DriverCapabilities_MountConfigs { return DriverCapabilities_UNKNOWN_MOUNTS } +// Deprecated: Do not use. +func (m *DriverCapabilities) GetRemoteTasks() bool { + if m != nil { + return m.RemoteTasks + } + return false +} + func (m *DriverCapabilities) GetDisableLogCollection() bool { if m != nil { return m.DisableLogCollection @@ -3758,254 +3769,255 @@ func init() { } var fileDescriptor_4a8f45747846a74d = []byte{ - // 3938 bytes of a gzipped FileDescriptorProto + // 3953 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x5a, 0x4f, 0x73, 0xdb, 0x48, - 0x76, 0x17, 0xf8, 0x4f, 0xe4, 0x23, 0x45, 0x41, 0x2d, 0xc9, 0xa6, 0x39, 0x9b, 0x8c, 0x07, 0x5b, - 0x93, 0x52, 0x76, 0x67, 0xe8, 0x59, 0x6d, 0x32, 0x1e, 0x7b, 0x3d, 0xeb, 0xe1, 0x50, 0xb4, 0x45, - 0x5b, 0xa2, 0x94, 0x26, 0x15, 0xaf, 0xe3, 0x64, 0x10, 0x08, 0x68, 0x53, 0xb0, 0x48, 0x00, 0x83, - 0x06, 0x65, 0x69, 0x53, 0xa9, 0xa4, 0x36, 0x55, 0xa9, 0x4d, 0x55, 0x52, 0xc9, 0x65, 0xb2, 0x97, - 0x9c, 0xb6, 0x2a, 0xa7, 0x54, 0xee, 0xa9, 0x4d, 0xed, 0x29, 0x87, 0x7c, 0x89, 0x1c, 0x92, 0x5b, - 0xae, 0xf9, 0x04, 0xd9, 0xea, 0x3f, 0x00, 0x01, 0x92, 0x1e, 0x83, 0x94, 0x4f, 0xe4, 0x7b, 0xdd, - 0xfd, 0xeb, 0x87, 0xf7, 0x5e, 0xbf, 0x7e, 0xdd, 0xfd, 0x40, 0xf3, 0x86, 0xe3, 0x81, 0xed, 0xd0, - 0x3b, 0x96, 0x6f, 0x5f, 0x10, 0x9f, 0xde, 0xf1, 0x7c, 0x37, 0x70, 0x25, 0xd5, 0xe0, 0x04, 0xfa, - 0xf0, 0xcc, 0xa0, 0x67, 0xb6, 0xe9, 0xfa, 0x5e, 0xc3, 0x71, 0x47, 0x86, 0xd5, 0x90, 0x63, 0x1a, - 0x72, 0x8c, 0xe8, 0x56, 0xff, 0xed, 0x81, 0xeb, 0x0e, 0x86, 0x44, 0x20, 0x9c, 0x8e, 0x5f, 0xde, - 0xb1, 0xc6, 0xbe, 0x11, 0xd8, 0xae, 0x23, 0xdb, 0xdf, 0x9f, 0x6e, 0x0f, 0xec, 0x11, 0xa1, 0x81, - 0x31, 0xf2, 0x64, 0x87, 0x0f, 0x43, 0x59, 0xe8, 0x99, 0xe1, 0x13, 0xeb, 0xce, 0x99, 0x39, 0xa4, - 0x1e, 0x31, 0xd9, 0xaf, 0xce, 0xfe, 0xc8, 0x6e, 0x1f, 0x4d, 0x75, 0xa3, 0x81, 0x3f, 0x36, 0x83, - 0x50, 0x72, 0x23, 0x08, 0x7c, 0xfb, 0x74, 0x1c, 0x10, 0xd1, 0x5b, 0xbb, 0x05, 0x37, 0xfb, 0x06, - 0x3d, 0x6f, 0xb9, 0xce, 0x4b, 0x7b, 0xd0, 0x33, 0xcf, 0xc8, 0xc8, 0xc0, 0xe4, 0xeb, 0x31, 0xa1, - 0x81, 0xf6, 0xc7, 0x50, 0x9b, 0x6d, 0xa2, 0x9e, 0xeb, 0x50, 0x82, 0xbe, 0x80, 0x1c, 0x9b, 0xb2, - 0xa6, 0xdc, 0x56, 0x76, 0xca, 0xbb, 0x1f, 0x35, 0xde, 0xa4, 0x02, 0x21, 0x43, 0x43, 0x8a, 0xda, - 0xe8, 0x79, 0xc4, 0xc4, 0x7c, 0xa4, 0xb6, 0x0d, 0x9b, 0x2d, 0xc3, 0x33, 0x4e, 0xed, 0xa1, 0x1d, - 0xd8, 0x84, 0x86, 0x93, 0x8e, 0x61, 0x2b, 0xc9, 0x96, 0x13, 0xfe, 0x09, 0x54, 0xcc, 0x18, 0x5f, - 0x4e, 0x7c, 0xaf, 0x91, 0x4a, 0xf7, 0x8d, 0x3d, 0x4e, 0x25, 0x80, 0x13, 0x70, 0xda, 0x16, 0xa0, - 0x47, 0xb6, 0x33, 0x20, 0xbe, 0xe7, 0xdb, 0x4e, 0x10, 0x0a, 0xf3, 0xeb, 0x2c, 0x6c, 0x26, 0xd8, - 0x52, 0x98, 0x57, 0x00, 0x91, 0x1e, 0x99, 0x28, 0xd9, 0x9d, 0xf2, 0xee, 0x93, 0x94, 0xa2, 0xcc, - 0xc1, 0x6b, 0x34, 0x23, 0xb0, 0xb6, 0x13, 0xf8, 0x57, 0x38, 0x86, 0x8e, 0xbe, 0x82, 0xc2, 0x19, - 0x31, 0x86, 0xc1, 0x59, 0x2d, 0x73, 0x5b, 0xd9, 0xa9, 0xee, 0x3e, 0xba, 0xc6, 0x3c, 0xfb, 0x1c, - 0xa8, 0x17, 0x18, 0x01, 0xc1, 0x12, 0x15, 0x7d, 0x0c, 0x48, 0xfc, 0xd3, 0x2d, 0x42, 0x4d, 0xdf, - 0xf6, 0x98, 0x4b, 0xd6, 0xb2, 0xb7, 0x95, 0x9d, 0x12, 0xde, 0x10, 0x2d, 0x7b, 0x93, 0x86, 0xba, - 0x07, 0xeb, 0x53, 0xd2, 0x22, 0x15, 0xb2, 0xe7, 0xe4, 0x8a, 0x5b, 0xa4, 0x84, 0xd9, 0x5f, 0xf4, - 0x18, 0xf2, 0x17, 0xc6, 0x70, 0x4c, 0xb8, 0xc8, 0xe5, 0xdd, 0x1f, 0xbc, 0xcd, 0x3d, 0xa4, 0x8b, - 0x4e, 0xf4, 0x80, 0xc5, 0xf8, 0xfb, 0x99, 0xcf, 0x14, 0xed, 0x1e, 0x94, 0x63, 0x72, 0xa3, 0x2a, - 0xc0, 0x49, 0x77, 0xaf, 0xdd, 0x6f, 0xb7, 0xfa, 0xed, 0x3d, 0x75, 0x05, 0xad, 0x41, 0xe9, 0xa4, - 0xbb, 0xdf, 0x6e, 0x1e, 0xf4, 0xf7, 0x9f, 0xab, 0x0a, 0x2a, 0xc3, 0x6a, 0x48, 0x64, 0xb4, 0x4b, - 0x40, 0x98, 0x98, 0xee, 0x05, 0xf1, 0x99, 0x23, 0x4b, 0xab, 0xa2, 0x9b, 0xb0, 0x1a, 0x18, 0xf4, - 0x5c, 0xb7, 0x2d, 0x29, 0x73, 0x81, 0x91, 0x1d, 0x0b, 0x75, 0xa0, 0x70, 0x66, 0x38, 0xd6, 0xf0, - 0xed, 0x72, 0x27, 0x55, 0xcd, 0xc0, 0xf7, 0xf9, 0x40, 0x2c, 0x01, 0x98, 0x77, 0x27, 0x66, 0x16, - 0x06, 0xd0, 0x9e, 0x83, 0xda, 0x0b, 0x0c, 0x3f, 0x88, 0x8b, 0xd3, 0x86, 0x1c, 0x9b, 0x5f, 0x7a, - 0xf4, 0x22, 0x73, 0x8a, 0x95, 0x89, 0xf9, 0x70, 0xed, 0xff, 0x32, 0xb0, 0x11, 0xc3, 0x96, 0x9e, - 0xfa, 0x0c, 0x0a, 0x3e, 0xa1, 0xe3, 0x61, 0xc0, 0xe1, 0xab, 0xbb, 0x0f, 0x53, 0xc2, 0xcf, 0x20, - 0x35, 0x30, 0x87, 0xc1, 0x12, 0x0e, 0xed, 0x80, 0x2a, 0x46, 0xe8, 0xc4, 0xf7, 0x5d, 0x5f, 0x1f, - 0xd1, 0x01, 0xd7, 0x5a, 0x09, 0x57, 0x05, 0xbf, 0xcd, 0xd8, 0x87, 0x74, 0x10, 0xd3, 0x6a, 0xf6, - 0x9a, 0x5a, 0x45, 0x06, 0xa8, 0x0e, 0x09, 0x5e, 0xbb, 0xfe, 0xb9, 0xce, 0x54, 0xeb, 0xdb, 0x16, - 0xa9, 0xe5, 0x38, 0xe8, 0xa7, 0x29, 0x41, 0xbb, 0x62, 0xf8, 0x91, 0x1c, 0x8d, 0xd7, 0x9d, 0x24, - 0x43, 0xfb, 0x3e, 0x14, 0xc4, 0x97, 0x32, 0x4f, 0xea, 0x9d, 0xb4, 0x5a, 0xed, 0x5e, 0x4f, 0x5d, - 0x41, 0x25, 0xc8, 0xe3, 0x76, 0x1f, 0x33, 0x0f, 0x2b, 0x41, 0xfe, 0x51, 0xb3, 0xdf, 0x3c, 0x50, - 0x33, 0xda, 0xf7, 0x60, 0xfd, 0x99, 0x61, 0x07, 0x69, 0x9c, 0x4b, 0x73, 0x41, 0x9d, 0xf4, 0x95, - 0xd6, 0xe9, 0x24, 0xac, 0x93, 0x5e, 0x35, 0xed, 0x4b, 0x3b, 0x98, 0xb2, 0x87, 0x0a, 0x59, 0xe2, - 0xfb, 0xd2, 0x04, 0xec, 0xaf, 0xf6, 0x1a, 0xd6, 0x7b, 0x81, 0xeb, 0xa5, 0xf2, 0xfc, 0x1f, 0xc2, - 0x2a, 0xdb, 0x6d, 0xdc, 0x71, 0x20, 0x5d, 0xff, 0x56, 0x43, 0xec, 0x46, 0x8d, 0x70, 0x37, 0x6a, - 0xec, 0xc9, 0xdd, 0x0a, 0x87, 0x3d, 0xd1, 0x0d, 0x28, 0x50, 0x7b, 0xe0, 0x18, 0x43, 0x19, 0x2d, - 0x24, 0xa5, 0x21, 0xe6, 0xe4, 0xe1, 0xc4, 0xd2, 0xf1, 0x5b, 0x80, 0xf6, 0x08, 0x0d, 0x7c, 0xf7, - 0x2a, 0x95, 0x3c, 0x5b, 0x90, 0x7f, 0xe9, 0xfa, 0xa6, 0x58, 0x88, 0x45, 0x2c, 0x08, 0xb6, 0xa8, - 0x12, 0x20, 0x12, 0xfb, 0x63, 0x40, 0x1d, 0x87, 0xed, 0x29, 0xe9, 0x0c, 0xf1, 0x0f, 0x19, 0xd8, - 0x4c, 0xf4, 0x97, 0xc6, 0x58, 0x7e, 0x1d, 0xb2, 0xc0, 0x34, 0xa6, 0x62, 0x1d, 0xa2, 0x23, 0x28, - 0x88, 0x1e, 0x52, 0x93, 0x77, 0x17, 0x00, 0x12, 0xdb, 0x94, 0x84, 0x93, 0x30, 0x73, 0x9d, 0x3e, - 0xfb, 0x6e, 0x9d, 0xfe, 0x35, 0xa8, 0xe1, 0x77, 0xd0, 0xb7, 0xda, 0xe6, 0x09, 0x6c, 0x9a, 0xee, - 0x70, 0x48, 0x4c, 0xe6, 0x0d, 0xba, 0xed, 0x04, 0xc4, 0xbf, 0x30, 0x86, 0x6f, 0xf7, 0x1b, 0x34, - 0x19, 0xd5, 0x91, 0x83, 0xb4, 0x17, 0xb0, 0x11, 0x9b, 0x58, 0x1a, 0xe2, 0x11, 0xe4, 0x29, 0x63, - 0x48, 0x4b, 0x7c, 0xb2, 0xa0, 0x25, 0x28, 0x16, 0xc3, 0xb5, 0x4d, 0x01, 0xde, 0xbe, 0x20, 0x4e, - 0xf4, 0x59, 0xda, 0x1e, 0x6c, 0xf4, 0xb8, 0x9b, 0xa6, 0xf2, 0xc3, 0x89, 0x8b, 0x67, 0x12, 0x2e, - 0xbe, 0x05, 0x28, 0x8e, 0x22, 0x1d, 0xf1, 0x0a, 0xd6, 0xdb, 0x97, 0xc4, 0x4c, 0x85, 0x5c, 0x83, - 0x55, 0xd3, 0x1d, 0x8d, 0x0c, 0xc7, 0xaa, 0x65, 0x6e, 0x67, 0x77, 0x4a, 0x38, 0x24, 0xe3, 0x6b, - 0x31, 0x9b, 0x76, 0x2d, 0x6a, 0x7f, 0xa7, 0x80, 0x3a, 0x99, 0x5b, 0x2a, 0x92, 0x49, 0x1f, 0x58, - 0x0c, 0x88, 0xcd, 0x5d, 0xc1, 0x92, 0x92, 0xfc, 0x30, 0x5c, 0x08, 0x3e, 0xf1, 0xfd, 0x58, 0x38, - 0xca, 0x5e, 0x33, 0x1c, 0x69, 0xfb, 0xf0, 0x9d, 0x50, 0x9c, 0x5e, 0xe0, 0x13, 0x63, 0x64, 0x3b, - 0x83, 0xce, 0xd1, 0x91, 0x47, 0x84, 0xe0, 0x08, 0x41, 0xce, 0x32, 0x02, 0x43, 0x0a, 0xc6, 0xff, - 0xb3, 0x45, 0x6f, 0x0e, 0x5d, 0x1a, 0x2d, 0x7a, 0x4e, 0x68, 0xff, 0x99, 0x85, 0xda, 0x0c, 0x54, - 0xa8, 0xde, 0x17, 0x90, 0xa7, 0x24, 0x18, 0x7b, 0xd2, 0x55, 0xda, 0xa9, 0x05, 0x9e, 0x8f, 0xd7, - 0xe8, 0x31, 0x30, 0x2c, 0x30, 0xd1, 0x00, 0x8a, 0x41, 0x70, 0xa5, 0x53, 0xfb, 0xa7, 0x61, 0x42, - 0x70, 0x70, 0x5d, 0xfc, 0x3e, 0xf1, 0x47, 0xb6, 0x63, 0x0c, 0x7b, 0xf6, 0x4f, 0x09, 0x5e, 0x0d, - 0x82, 0x2b, 0xf6, 0x07, 0x3d, 0x67, 0x0e, 0x6f, 0xd9, 0x8e, 0x54, 0x7b, 0x6b, 0xd9, 0x59, 0x62, - 0x0a, 0xc6, 0x02, 0xb1, 0x7e, 0x00, 0x79, 0xfe, 0x4d, 0xcb, 0x38, 0xa2, 0x0a, 0xd9, 0x20, 0xb8, - 0xe2, 0x42, 0x15, 0x31, 0xfb, 0x5b, 0x7f, 0x00, 0x95, 0xf8, 0x17, 0x30, 0x47, 0x3a, 0x23, 0xf6, - 0xe0, 0x4c, 0x38, 0x58, 0x1e, 0x4b, 0x8a, 0x59, 0xf2, 0xb5, 0x6d, 0xc9, 0x94, 0x35, 0x8f, 0x05, - 0xa1, 0xfd, 0x5b, 0x06, 0x6e, 0xcd, 0xd1, 0x8c, 0x74, 0xd6, 0x17, 0x09, 0x67, 0x7d, 0x47, 0x5a, - 0x08, 0x3d, 0xfe, 0x45, 0xc2, 0xe3, 0xdf, 0x21, 0x38, 0x5b, 0x36, 0x37, 0xa0, 0x40, 0x2e, 0xed, - 0x80, 0x58, 0x52, 0x55, 0x92, 0x8a, 0x2d, 0xa7, 0xdc, 0x75, 0x97, 0xd3, 0x21, 0x6c, 0xb5, 0x7c, - 0x62, 0x04, 0x44, 0x86, 0xf2, 0xd0, 0xff, 0x6f, 0x41, 0xd1, 0x18, 0x0e, 0x5d, 0x73, 0x62, 0xd6, - 0x55, 0x4e, 0x77, 0x2c, 0x54, 0x87, 0xe2, 0x99, 0x4b, 0x03, 0xc7, 0x18, 0x11, 0x19, 0xbc, 0x22, - 0x5a, 0xfb, 0x46, 0x81, 0xed, 0x29, 0x3c, 0x69, 0x85, 0x53, 0xa8, 0xda, 0xd4, 0x1d, 0xf2, 0x0f, - 0xd4, 0x63, 0x27, 0xbc, 0x1f, 0x2d, 0xb6, 0xd5, 0x74, 0x42, 0x0c, 0x7e, 0xe0, 0x5b, 0xb3, 0xe3, - 0x24, 0xf7, 0x38, 0x3e, 0xb9, 0x25, 0x57, 0x7a, 0x48, 0x6a, 0xff, 0xa8, 0xc0, 0xb6, 0xdc, 0xe1, - 0xd3, 0x7f, 0xe8, 0xac, 0xc8, 0x99, 0x77, 0x2d, 0xb2, 0x56, 0x83, 0x1b, 0xd3, 0x72, 0xc9, 0x98, - 0xff, 0xdf, 0x79, 0x40, 0xb3, 0xa7, 0x4b, 0xf4, 0x01, 0x54, 0x28, 0x71, 0x2c, 0x5d, 0xec, 0x17, - 0x62, 0x2b, 0x2b, 0xe2, 0x32, 0xe3, 0x89, 0x8d, 0x83, 0xb2, 0x10, 0x48, 0x2e, 0xa5, 0xb4, 0x45, - 0xcc, 0xff, 0xa3, 0x33, 0xa8, 0xbc, 0xa4, 0x7a, 0x34, 0x37, 0x77, 0xa8, 0x6a, 0xea, 0xb0, 0x36, - 0x2b, 0x47, 0xe3, 0x51, 0x2f, 0xfa, 0x2e, 0x5c, 0x7e, 0x49, 0x23, 0x02, 0xfd, 0x5c, 0x81, 0x9b, - 0x61, 0x5a, 0x31, 0x51, 0xdf, 0xc8, 0xb5, 0x08, 0xad, 0xe5, 0x6e, 0x67, 0x77, 0xaa, 0xbb, 0xc7, - 0xd7, 0xd0, 0xdf, 0x0c, 0xf3, 0xd0, 0xb5, 0x08, 0xde, 0x76, 0xe6, 0x70, 0x29, 0x6a, 0xc0, 0xe6, - 0x68, 0x4c, 0x03, 0x5d, 0x78, 0x81, 0x2e, 0x3b, 0xd5, 0xf2, 0x5c, 0x2f, 0x1b, 0xac, 0x29, 0xe1, - 0xab, 0xe8, 0x1c, 0xd6, 0x46, 0xee, 0xd8, 0x09, 0x74, 0x93, 0x9f, 0x7f, 0x68, 0xad, 0xb0, 0xd0, - 0xc1, 0x78, 0x8e, 0x96, 0x0e, 0x19, 0x9c, 0x38, 0x4d, 0x51, 0x5c, 0x19, 0xc5, 0x28, 0xf4, 0x7b, - 0x70, 0xc3, 0xb2, 0xa9, 0x71, 0x3a, 0x24, 0xfa, 0xd0, 0x1d, 0xe8, 0x93, 0x1c, 0xa6, 0x56, 0xe4, - 0xf2, 0x6d, 0xc9, 0xd6, 0x03, 0x77, 0xd0, 0x8a, 0xda, 0xf8, 0xa8, 0x2b, 0xc7, 0x18, 0xd9, 0xa6, - 0xce, 0x44, 0x1e, 0xba, 0x86, 0xa5, 0x8f, 0x29, 0xf1, 0x69, 0xad, 0x24, 0x47, 0x89, 0xd6, 0x67, - 0xb2, 0xf1, 0x84, 0xb5, 0x69, 0xf7, 0xa1, 0x1c, 0xb3, 0x17, 0x2a, 0x42, 0xae, 0x7b, 0xd4, 0x6d, - 0xab, 0x2b, 0x08, 0xa0, 0xd0, 0xda, 0xc7, 0x47, 0x47, 0x7d, 0x71, 0xfc, 0xe8, 0x1c, 0x36, 0x1f, - 0xb7, 0xd5, 0x0c, 0x63, 0x9f, 0x74, 0xff, 0xb0, 0xdd, 0x39, 0x50, 0xb3, 0x5a, 0x1b, 0x2a, 0xf1, - 0xaf, 0x40, 0x08, 0xaa, 0x27, 0xdd, 0xa7, 0xdd, 0xa3, 0x67, 0x5d, 0xfd, 0xf0, 0xe8, 0xa4, 0xdb, - 0x67, 0x87, 0x98, 0x2a, 0x40, 0xb3, 0xfb, 0x7c, 0x42, 0xaf, 0x41, 0xa9, 0x7b, 0x14, 0x92, 0x4a, - 0x3d, 0xa3, 0x2a, 0x4f, 0x72, 0xc5, 0x55, 0xb5, 0x88, 0x2b, 0x3e, 0x19, 0xb9, 0x01, 0xd1, 0xd9, - 0x16, 0x41, 0xb5, 0xff, 0xc8, 0xc2, 0xd6, 0x3c, 0x23, 0x23, 0x0b, 0x72, 0xcc, 0x61, 0xe4, 0xd1, - 0xf2, 0xdd, 0xfb, 0x0b, 0x47, 0x67, 0xeb, 0xc4, 0x33, 0xe4, 0x5e, 0x52, 0xc2, 0xfc, 0x3f, 0xd2, - 0xa1, 0x30, 0x34, 0x4e, 0xc9, 0x90, 0xd6, 0xb2, 0xfc, 0xf2, 0xe5, 0xf1, 0x75, 0xe6, 0x3e, 0xe0, - 0x48, 0xe2, 0xe6, 0x45, 0xc2, 0xa2, 0x3e, 0x94, 0x59, 0xb4, 0xa4, 0x42, 0x9d, 0x32, 0x80, 0xef, - 0xa6, 0x9c, 0x65, 0x7f, 0x32, 0x12, 0xc7, 0x61, 0xea, 0xf7, 0xa0, 0x1c, 0x9b, 0x6c, 0xce, 0xc5, - 0xc9, 0x56, 0xfc, 0xe2, 0xa4, 0x14, 0xbf, 0x05, 0x79, 0x38, 0x6b, 0x03, 0xa6, 0x23, 0xe6, 0x24, - 0xfb, 0x47, 0xbd, 0xbe, 0x38, 0xa2, 0x3e, 0xc6, 0x47, 0x27, 0xc7, 0xaa, 0xc2, 0x98, 0xfd, 0x66, - 0xef, 0xa9, 0x9a, 0x89, 0x7c, 0x28, 0xab, 0xb5, 0xa0, 0x1c, 0x93, 0x2b, 0xb1, 0x3d, 0x28, 0xc9, - 0xed, 0x81, 0x05, 0x68, 0xc3, 0xb2, 0x7c, 0x42, 0xa9, 0x94, 0x23, 0x24, 0xb5, 0x17, 0x50, 0xda, - 0xeb, 0xf6, 0x24, 0x44, 0x0d, 0x56, 0x29, 0xf1, 0xd9, 0x77, 0xf3, 0x2b, 0xb0, 0x12, 0x0e, 0x49, - 0x06, 0x4e, 0x89, 0xe1, 0x9b, 0x67, 0x84, 0xca, 0xa4, 0x22, 0xa2, 0xd9, 0x28, 0x97, 0x5f, 0x25, - 0x09, 0xdb, 0x95, 0x70, 0x48, 0x6a, 0xff, 0x5f, 0x04, 0x98, 0x5c, 0x6b, 0xa0, 0x2a, 0x64, 0xa2, - 0x60, 0x9f, 0xb1, 0x2d, 0xe6, 0x07, 0xb1, 0xcd, 0x8c, 0xff, 0x47, 0xbb, 0xb0, 0x3d, 0xa2, 0x03, - 0xcf, 0x30, 0xcf, 0x75, 0x79, 0x1b, 0x21, 0x62, 0x02, 0x0f, 0x9c, 0x15, 0xbc, 0x29, 0x1b, 0xe5, - 0x92, 0x17, 0xb8, 0x07, 0x90, 0x25, 0xce, 0x05, 0x0f, 0x72, 0xe5, 0xdd, 0xfb, 0x0b, 0x5f, 0xb7, - 0x34, 0xda, 0xce, 0x85, 0xf0, 0x15, 0x06, 0x83, 0x74, 0x00, 0x8b, 0x5c, 0xd8, 0x26, 0xd1, 0x19, - 0x68, 0x9e, 0x83, 0x7e, 0xb1, 0x38, 0xe8, 0x1e, 0xc7, 0x88, 0xa0, 0x4b, 0x56, 0x48, 0xa3, 0x2e, - 0x94, 0x7c, 0x42, 0xdd, 0xb1, 0x6f, 0x12, 0x11, 0xe9, 0xd2, 0x9f, 0x88, 0x70, 0x38, 0x0e, 0x4f, - 0x20, 0xd0, 0x1e, 0x14, 0x78, 0x80, 0xa3, 0xb5, 0x55, 0x2e, 0xec, 0x47, 0x29, 0xc1, 0x78, 0x74, - 0xc1, 0x72, 0x2c, 0x7a, 0x0c, 0xab, 0x42, 0x44, 0x5a, 0x2b, 0x72, 0x98, 0x8f, 0xd3, 0x46, 0x5f, - 0x3e, 0x0a, 0x87, 0xa3, 0x99, 0x55, 0x59, 0x60, 0xe4, 0x71, 0xb1, 0x84, 0xf9, 0x7f, 0xf4, 0x1e, - 0x94, 0xc4, 0x66, 0x6f, 0xd9, 0x7e, 0x0d, 0x84, 0x73, 0x72, 0xc6, 0x9e, 0xed, 0xa3, 0xf7, 0xa1, - 0x2c, 0x92, 0x3a, 0x9d, 0x47, 0x85, 0x32, 0x6f, 0x06, 0xc1, 0x3a, 0x66, 0xb1, 0x41, 0x74, 0x20, - 0xbe, 0x2f, 0x3a, 0x54, 0xa2, 0x0e, 0xc4, 0xf7, 0x79, 0x87, 0xdf, 0x81, 0x75, 0x9e, 0x0a, 0x0f, - 0x7c, 0x77, 0xec, 0xe9, 0xdc, 0xa7, 0xd6, 0x78, 0xa7, 0x35, 0xc6, 0x7e, 0xcc, 0xb8, 0x5d, 0xe6, - 0x5c, 0xb7, 0xa0, 0xf8, 0xca, 0x3d, 0x15, 0x1d, 0xaa, 0x62, 0x1d, 0xbc, 0x72, 0x4f, 0xc3, 0xa6, - 0x28, 0x1d, 0x59, 0x4f, 0xa6, 0x23, 0x5f, 0xc3, 0x8d, 0xd9, 0x7d, 0x95, 0xa7, 0x25, 0xea, 0xf5, - 0xd3, 0x92, 0x2d, 0x67, 0x5e, 0x1c, 0xfe, 0x12, 0xb2, 0x96, 0x43, 0x6b, 0x1b, 0x0b, 0x39, 0x47, - 0xb4, 0x8e, 0x31, 0x1b, 0x8c, 0xb6, 0xa1, 0xc0, 0x3e, 0xd6, 0xb6, 0x6a, 0x48, 0x84, 0x9e, 0x57, - 0xee, 0x69, 0xc7, 0x42, 0xdf, 0x81, 0x12, 0xfb, 0x7e, 0xea, 0x19, 0x26, 0xa9, 0x6d, 0xf2, 0x96, - 0x09, 0x83, 0x19, 0xca, 0x71, 0x2d, 0x22, 0x54, 0xb4, 0x25, 0x0c, 0xc5, 0x18, 0x5c, 0x47, 0x37, - 0x61, 0x95, 0x37, 0xda, 0x56, 0x6d, 0x5b, 0x9c, 0x38, 0x18, 0xd9, 0xb1, 0x90, 0x06, 0x6b, 0x9e, - 0xe1, 0x13, 0x27, 0xd0, 0xe5, 0x8c, 0x37, 0x78, 0x73, 0x59, 0x30, 0x9f, 0xb0, 0x79, 0xeb, 0x9f, - 0x42, 0x31, 0x5c, 0x0c, 0x8b, 0x84, 0xc9, 0xfa, 0x03, 0xa8, 0x26, 0x97, 0xd2, 0x42, 0x41, 0xf6, - 0x9f, 0x33, 0x50, 0x8a, 0x16, 0x0d, 0x72, 0x60, 0x93, 0x1b, 0x95, 0xa5, 0xa6, 0xfa, 0x64, 0x0d, - 0x8a, 0x84, 0xf8, 0xf3, 0x94, 0x6a, 0x6e, 0x86, 0x08, 0xf2, 0x64, 0x2e, 0x17, 0x24, 0x8a, 0x90, - 0x27, 0xf3, 0x7d, 0x05, 0xeb, 0x43, 0xdb, 0x19, 0x5f, 0xc6, 0xe6, 0x12, 0x99, 0xec, 0xef, 0xa7, - 0x9c, 0xeb, 0x80, 0x8d, 0x9e, 0xcc, 0x51, 0x1d, 0x26, 0x68, 0xb4, 0x0f, 0x79, 0xcf, 0xf5, 0x83, - 0x70, 0xcf, 0x4c, 0xbb, 0x9b, 0x1d, 0xbb, 0x7e, 0x70, 0x68, 0x78, 0x1e, 0x3b, 0xac, 0x09, 0x00, - 0xed, 0x9b, 0x0c, 0xdc, 0x98, 0xff, 0x61, 0xa8, 0x0b, 0x59, 0xd3, 0x1b, 0x4b, 0x25, 0x3d, 0x58, - 0x54, 0x49, 0x2d, 0x6f, 0x3c, 0x91, 0x9f, 0x01, 0xa1, 0x67, 0x50, 0x18, 0x91, 0x91, 0xeb, 0x5f, - 0x49, 0x5d, 0x3c, 0x5c, 0x14, 0xf2, 0x90, 0x8f, 0x9e, 0xa0, 0x4a, 0x38, 0x84, 0xa1, 0x28, 0x17, - 0x13, 0x95, 0x61, 0x7b, 0xc1, 0xeb, 0xb4, 0x10, 0x12, 0x47, 0x38, 0xda, 0xa7, 0xb0, 0x3d, 0xf7, - 0x53, 0xd0, 0x6f, 0x01, 0x98, 0xde, 0x58, 0xe7, 0xcf, 0x1d, 0xc2, 0x83, 0xb2, 0xb8, 0x64, 0x7a, - 0xe3, 0x1e, 0x67, 0x68, 0x2f, 0xa0, 0xf6, 0x26, 0x79, 0xd9, 0x1a, 0x13, 0x12, 0xeb, 0xa3, 0x53, - 0xae, 0x83, 0x2c, 0x2e, 0x0a, 0xc6, 0xe1, 0x29, 0x5b, 0x4a, 0x61, 0xa3, 0x71, 0xc9, 0x3a, 0x64, - 0x79, 0x87, 0xb2, 0xec, 0x60, 0x5c, 0x1e, 0x9e, 0x6a, 0xbf, 0xc8, 0xc0, 0xfa, 0x94, 0xc8, 0xec, - 0xc8, 0x2a, 0x02, 0x70, 0x78, 0x19, 0x20, 0x28, 0x16, 0x8d, 0x4d, 0xdb, 0x0a, 0xaf, 0x91, 0xf9, - 0x7f, 0xbe, 0x0f, 0x7b, 0xf2, 0x8a, 0x37, 0x63, 0x7b, 0x6c, 0xf9, 0x8c, 0x4e, 0xed, 0x80, 0xf2, - 0xa4, 0x28, 0x8f, 0x05, 0x81, 0x9e, 0x43, 0xd5, 0x27, 0x7c, 0xff, 0xb7, 0x74, 0xe1, 0x65, 0xf9, - 0x85, 0xbc, 0x4c, 0x4a, 0xc8, 0x9c, 0x0d, 0xaf, 0x85, 0x48, 0x8c, 0xa2, 0xe8, 0x19, 0xac, 0x85, - 0xc9, 0xb4, 0x40, 0x2e, 0x2c, 0x8d, 0x5c, 0x91, 0x40, 0x1c, 0x58, 0xbb, 0x07, 0xe5, 0x58, 0x23, - 0xfb, 0x30, 0x9e, 0xfd, 0x49, 0x9d, 0x08, 0x22, 0x19, 0x2d, 0xf2, 0x32, 0x5a, 0x68, 0xa7, 0x50, - 0x8e, 0xad, 0x8b, 0x45, 0x86, 0x32, 0x7d, 0x06, 0x2e, 0xd7, 0x67, 0x1e, 0x67, 0x02, 0x97, 0xc5, - 0x49, 0x96, 0x79, 0xe9, 0xb6, 0xc7, 0x35, 0x5a, 0xc2, 0x05, 0x46, 0x76, 0x3c, 0xed, 0x57, 0x19, - 0xa8, 0x26, 0x97, 0x74, 0xe8, 0x47, 0x1e, 0xf1, 0x6d, 0xd7, 0x8a, 0xf9, 0xd1, 0x31, 0x67, 0x30, - 0x5f, 0x61, 0xcd, 0x5f, 0x8f, 0xdd, 0xc0, 0x08, 0x7d, 0xc5, 0xf4, 0xc6, 0x7f, 0xc0, 0xe8, 0x29, - 0x1f, 0xcc, 0x4e, 0xf9, 0x20, 0xfa, 0x08, 0x90, 0x74, 0xa5, 0xa1, 0x3d, 0xb2, 0x03, 0xfd, 0xf4, - 0x2a, 0x20, 0xc2, 0xc6, 0x59, 0xac, 0x8a, 0x96, 0x03, 0xd6, 0xf0, 0x25, 0xe3, 0x33, 0xc7, 0x73, - 0xdd, 0x91, 0x4e, 0x4d, 0xd7, 0x27, 0xba, 0x61, 0xbd, 0xe2, 0xa7, 0xb5, 0x2c, 0x2e, 0xbb, 0xee, - 0xa8, 0xc7, 0x78, 0x4d, 0xeb, 0x15, 0xdb, 0x88, 0x4d, 0x6f, 0x4c, 0x49, 0xa0, 0xb3, 0x1f, 0x9e, - 0xbb, 0x94, 0x30, 0x08, 0x56, 0xcb, 0x1b, 0x53, 0xf4, 0x5d, 0x58, 0x0b, 0x3b, 0xf0, 0xbd, 0x58, - 0x26, 0x01, 0x15, 0xd9, 0x85, 0xf3, 0x90, 0x06, 0x95, 0x63, 0xe2, 0x9b, 0xc4, 0x09, 0xfa, 0xb6, - 0x79, 0x4e, 0xf9, 0xb1, 0x4b, 0xc1, 0x09, 0x9e, 0x3c, 0xb5, 0x84, 0xb3, 0x8d, 0xc8, 0x88, 0x6a, - 0xff, 0xaa, 0x40, 0x9e, 0xa7, 0x2c, 0x4c, 0x29, 0x7c, 0xbb, 0xe7, 0xd9, 0x80, 0x4c, 0x75, 0x19, - 0x83, 0xe7, 0x02, 0xef, 0x41, 0x89, 0x2b, 0x3f, 0x76, 0xc2, 0xe0, 0x79, 0x30, 0x6f, 0xac, 0x43, - 0xd1, 0x27, 0x86, 0xe5, 0x3a, 0xc3, 0xf0, 0x16, 0x2c, 0xa2, 0xd1, 0xef, 0x82, 0xea, 0xf9, 0xae, - 0x67, 0x0c, 0x26, 0x07, 0x67, 0x69, 0xbe, 0xf5, 0x18, 0x9f, 0xa7, 0xe8, 0xdf, 0x85, 0x35, 0x4a, - 0x44, 0x64, 0x17, 0x4e, 0x92, 0x17, 0x9f, 0x29, 0x99, 0xfc, 0x44, 0xa0, 0x7d, 0x0d, 0x05, 0xb1, - 0x71, 0x5d, 0x43, 0xde, 0x8f, 0x01, 0x09, 0x45, 0x32, 0x07, 0x19, 0xd9, 0x94, 0xca, 0x2c, 0x9b, - 0x3f, 0xe5, 0x8a, 0x96, 0xe3, 0x49, 0x83, 0xf6, 0x5f, 0x8a, 0xc8, 0xb7, 0xc5, 0x23, 0x1b, 0x4b, - 0xcc, 0xd9, 0xaa, 0x61, 0x47, 0x5b, 0x71, 0x9b, 0x17, 0x92, 0xa8, 0x03, 0x05, 0x99, 0x56, 0x67, - 0x96, 0x7d, 0xa3, 0x94, 0x00, 0xe1, 0xdd, 0x3e, 0x91, 0x37, 0x1b, 0x8b, 0xde, 0xed, 0x13, 0x71, - 0xb7, 0x4f, 0xd0, 0x07, 0x50, 0x91, 0x09, 0xbf, 0x80, 0xcb, 0xf1, 0x7c, 0xbf, 0x6c, 0x45, 0x0f, - 0x28, 0x44, 0xfb, 0x5f, 0x25, 0x8a, 0x7b, 0xe1, 0x43, 0x07, 0xfa, 0x0a, 0x8a, 0x2c, 0x84, 0xe8, - 0x23, 0xc3, 0x93, 0xcf, 0xf6, 0xad, 0xe5, 0xde, 0x50, 0xc2, 0x5d, 0x51, 0xa4, 0xeb, 0xab, 0x9e, - 0xa0, 0x58, 0xfc, 0x64, 0x47, 0xa5, 0x30, 0x7e, 0xb2, 0xff, 0xe8, 0x43, 0xa8, 0x1a, 0xe3, 0xc0, - 0xd5, 0x0d, 0xeb, 0x82, 0xf8, 0x81, 0x4d, 0x89, 0xf4, 0xa5, 0x35, 0xc6, 0x6d, 0x86, 0xcc, 0xfa, - 0x7d, 0xa8, 0xc4, 0x31, 0xdf, 0x96, 0xb7, 0xe4, 0xe3, 0x79, 0xcb, 0x9f, 0x02, 0x4c, 0x2e, 0x0d, - 0x99, 0x8f, 0x90, 0x4b, 0x3b, 0xd0, 0xcd, 0xf0, 0x6c, 0x9e, 0xc7, 0x45, 0xc6, 0x68, 0x31, 0x67, - 0x4c, 0xbe, 0x68, 0xe4, 0xc3, 0x17, 0x0d, 0x16, 0x1d, 0xd8, 0x82, 0x3e, 0xb7, 0x87, 0xc3, 0xe8, - 0x22, 0xb3, 0xe4, 0xba, 0xa3, 0xa7, 0x9c, 0xa1, 0xfd, 0x3a, 0x23, 0x7c, 0x45, 0xbc, 0x4d, 0xa5, - 0x3a, 0x9b, 0xbd, 0x2b, 0x53, 0xdf, 0x03, 0xa0, 0x81, 0xe1, 0xb3, 0x24, 0xcc, 0x08, 0xaf, 0x52, - 0xeb, 0x33, 0x4f, 0x22, 0xfd, 0xb0, 0x58, 0x06, 0x97, 0x64, 0xef, 0x66, 0x80, 0x3e, 0x87, 0x8a, - 0xe9, 0x8e, 0xbc, 0x21, 0x91, 0x83, 0xf3, 0x6f, 0x1d, 0x5c, 0x8e, 0xfa, 0x37, 0x83, 0xd8, 0x05, - 0x6e, 0xe1, 0xba, 0x17, 0xb8, 0xbf, 0x52, 0xc4, 0x13, 0x5b, 0xfc, 0x85, 0x0f, 0x0d, 0xe6, 0x94, - 0x91, 0x3c, 0x5e, 0xf2, 0xb9, 0xf0, 0xdb, 0x6a, 0x48, 0xea, 0x9f, 0xa7, 0x29, 0xda, 0x78, 0x73, - 0x5a, 0xfc, 0xef, 0x59, 0x28, 0x45, 0xaf, 0x6b, 0x33, 0xb6, 0xff, 0x0c, 0x4a, 0x51, 0xa5, 0x92, - 0x0c, 0x10, 0xdf, 0x6a, 0x9e, 0xa8, 0x33, 0x7a, 0x09, 0xc8, 0x18, 0x0c, 0xa2, 0x74, 0x57, 0x1f, - 0x53, 0x63, 0x10, 0xbe, 0x6d, 0x7e, 0xb6, 0x80, 0x1e, 0xc2, 0xfd, 0xf1, 0x84, 0x8d, 0xc7, 0xaa, - 0x31, 0x18, 0x24, 0x38, 0xe8, 0xcf, 0x60, 0x3b, 0x39, 0x87, 0x7e, 0x7a, 0xa5, 0x7b, 0xb6, 0x25, - 0xef, 0x00, 0xf6, 0x17, 0x7d, 0x60, 0x6c, 0x24, 0xe0, 0xbf, 0xbc, 0x3a, 0xb6, 0x2d, 0xa1, 0x73, - 0xe4, 0xcf, 0x34, 0xd4, 0xff, 0x02, 0x6e, 0xbe, 0xa1, 0xfb, 0x1c, 0x1b, 0x74, 0x93, 0x85, 0x33, - 0xcb, 0x2b, 0x21, 0x66, 0xbd, 0x5f, 0x2a, 0xe2, 0x1d, 0x34, 0xa9, 0x93, 0x66, 0x3c, 0x4f, 0xbf, - 0x93, 0x72, 0x9e, 0xd6, 0xf1, 0x89, 0x80, 0xe7, 0xa9, 0xf9, 0x93, 0xa9, 0xd4, 0x3c, 0x6d, 0x42, - 0x26, 0x32, 0x5c, 0x01, 0x24, 0x11, 0xb4, 0x7f, 0xc9, 0x42, 0x31, 0x44, 0xe7, 0x27, 0xf8, 0x2b, - 0x1a, 0x90, 0x91, 0x1e, 0x5d, 0x2f, 0x2a, 0x18, 0x04, 0x8b, 0xef, 0xa8, 0xef, 0x41, 0x69, 0x4c, - 0x89, 0x2f, 0x9a, 0x33, 0xbc, 0xb9, 0xc8, 0x18, 0xbc, 0xf1, 0x7d, 0x28, 0x07, 0x6e, 0x60, 0x0c, - 0xf5, 0x80, 0xe7, 0x0b, 0x59, 0x31, 0x9a, 0xb3, 0x78, 0xb6, 0x80, 0xbe, 0x0f, 0x1b, 0xc1, 0x99, - 0xef, 0x06, 0xc1, 0x90, 0xe5, 0xaa, 0x3c, 0x73, 0x12, 0x89, 0x4e, 0x0e, 0xab, 0x51, 0x83, 0xc8, - 0xa8, 0x28, 0x8b, 0xde, 0x93, 0xce, 0xcc, 0x75, 0x79, 0x10, 0xc9, 0xe1, 0xb5, 0x88, 0xcb, 0x5c, - 0x9b, 0x6d, 0x9e, 0x9e, 0xc8, 0x48, 0x78, 0xac, 0x50, 0x70, 0x48, 0x22, 0x1d, 0xd6, 0x47, 0xc4, - 0xa0, 0x63, 0x9f, 0x58, 0xfa, 0x4b, 0x9b, 0x0c, 0x2d, 0x71, 0xf1, 0x52, 0x4d, 0x7d, 0xdc, 0x08, - 0xd5, 0xd2, 0x78, 0xc4, 0x47, 0xe3, 0x6a, 0x08, 0x27, 0x68, 0x96, 0x39, 0x88, 0x7f, 0x68, 0x1d, - 0xca, 0xbd, 0xe7, 0xbd, 0x7e, 0xfb, 0x50, 0x3f, 0x3c, 0xda, 0x6b, 0xcb, 0xda, 0xa8, 0x5e, 0x1b, - 0x0b, 0x52, 0x61, 0xed, 0xfd, 0xa3, 0x7e, 0xf3, 0x40, 0xef, 0x77, 0x5a, 0x4f, 0x7b, 0x6a, 0x06, - 0x6d, 0xc3, 0x46, 0x7f, 0x1f, 0x1f, 0xf5, 0xfb, 0x07, 0xed, 0x3d, 0xfd, 0xb8, 0x8d, 0x3b, 0x47, - 0x7b, 0x3d, 0x35, 0x8b, 0x10, 0x54, 0x27, 0xec, 0x7e, 0xe7, 0xb0, 0xad, 0xe6, 0x50, 0x19, 0x56, - 0x8f, 0xdb, 0xb8, 0xd5, 0xee, 0xf6, 0xd5, 0xbc, 0xf6, 0x8b, 0x2c, 0x94, 0x63, 0x56, 0x64, 0x8e, - 0xec, 0x53, 0x71, 0xae, 0xc9, 0x61, 0xf6, 0x97, 0xbf, 0xe5, 0x1a, 0xe6, 0x99, 0xb0, 0x4e, 0x0e, - 0x0b, 0x82, 0x9f, 0x65, 0x8c, 0xcb, 0xd8, 0x3a, 0xcf, 0xe1, 0xe2, 0xc8, 0xb8, 0x14, 0x20, 0x1f, - 0x40, 0xe5, 0x9c, 0xf8, 0x0e, 0x19, 0xca, 0x76, 0x61, 0x91, 0xb2, 0xe0, 0x89, 0x2e, 0x3b, 0xa0, - 0xca, 0x2e, 0x13, 0x18, 0x61, 0x8e, 0xaa, 0xe0, 0x1f, 0x86, 0x60, 0x5b, 0x90, 0x17, 0xcd, 0xab, - 0x62, 0x7e, 0x4e, 0xb0, 0x6d, 0x8a, 0xbe, 0x36, 0x3c, 0x9e, 0x43, 0xe6, 0x30, 0xff, 0x8f, 0x4e, - 0x67, 0xed, 0x53, 0xe0, 0xf6, 0xb9, 0xb7, 0xb8, 0x3b, 0xbf, 0xc9, 0x44, 0x67, 0x91, 0x89, 0x56, - 0x21, 0x8b, 0xc3, 0x82, 0xa2, 0x56, 0xb3, 0xb5, 0xcf, 0xcc, 0xb2, 0x06, 0xa5, 0xc3, 0xe6, 0x4f, - 0xf4, 0x93, 0x9e, 0xb8, 0xd5, 0x57, 0xa1, 0xf2, 0xb4, 0x8d, 0xbb, 0xed, 0x03, 0xc9, 0xc9, 0xa2, - 0x2d, 0x50, 0x25, 0x67, 0xd2, 0x2f, 0xc7, 0x10, 0xc4, 0xdf, 0x3c, 0x2a, 0x42, 0xae, 0xf7, 0xac, - 0x79, 0xac, 0x16, 0xb4, 0xff, 0xc9, 0xc0, 0xba, 0xd8, 0x16, 0xa2, 0xd2, 0x87, 0x37, 0x3f, 0xfd, - 0xc6, 0x6f, 0xb1, 0x32, 0xc9, 0x5b, 0xac, 0x30, 0x09, 0xe5, 0xbb, 0x7a, 0x76, 0x92, 0x84, 0xf2, - 0x9b, 0x9d, 0x44, 0xc4, 0xcf, 0x2d, 0x12, 0xf1, 0x6b, 0xb0, 0x3a, 0x22, 0x34, 0xb2, 0x5b, 0x09, - 0x87, 0x24, 0xb2, 0xa1, 0x6c, 0x38, 0x8e, 0x1b, 0x18, 0xe2, 0x6a, 0xb8, 0xb0, 0xd0, 0x66, 0x38, - 0xf5, 0xc5, 0x8d, 0xe6, 0x04, 0x49, 0x04, 0xe6, 0x38, 0x76, 0xfd, 0xc7, 0xa0, 0x4e, 0x77, 0x58, - 0x64, 0x3b, 0xfc, 0xde, 0x0f, 0x26, 0xbb, 0x21, 0x61, 0xeb, 0x42, 0xbe, 0xb3, 0xa8, 0x2b, 0x8c, - 0xc0, 0x27, 0xdd, 0x6e, 0xa7, 0xfb, 0x58, 0x55, 0x10, 0x40, 0xa1, 0xfd, 0x93, 0x4e, 0xbf, 0xbd, - 0xa7, 0x66, 0x76, 0x7f, 0xb9, 0x01, 0x05, 0x21, 0x24, 0xfa, 0x46, 0x66, 0x02, 0xf1, 0xb2, 0x5a, - 0xf4, 0xe3, 0x85, 0x33, 0xea, 0x44, 0xa9, 0x6e, 0xfd, 0xe1, 0xd2, 0xe3, 0xe5, 0x33, 0xe6, 0x0a, - 0xfa, 0x1b, 0x05, 0x2a, 0x89, 0x27, 0xcc, 0xb4, 0x57, 0xe3, 0x73, 0xaa, 0x78, 0xeb, 0x3f, 0x5a, - 0x6a, 0x6c, 0x24, 0xcb, 0xcf, 0x15, 0x28, 0xc7, 0xea, 0x57, 0xd1, 0xbd, 0x65, 0x6a, 0x5e, 0x85, - 0x24, 0xf7, 0x97, 0x2f, 0x97, 0xd5, 0x56, 0x3e, 0x51, 0xd0, 0x5f, 0x2b, 0x50, 0x8e, 0x55, 0x72, - 0xa6, 0x16, 0x65, 0xb6, 0xee, 0x34, 0xb5, 0x28, 0xf3, 0x0a, 0x47, 0x57, 0xd0, 0x5f, 0x2a, 0x50, - 0x8a, 0xaa, 0x32, 0xd1, 0xdd, 0xc5, 0xeb, 0x38, 0x85, 0x10, 0x9f, 0x2d, 0x5b, 0x00, 0xaa, 0xad, - 0xa0, 0x3f, 0x87, 0x62, 0x58, 0xc2, 0x88, 0xd2, 0xee, 0x5e, 0x53, 0xf5, 0x91, 0xf5, 0xbb, 0x0b, - 0x8f, 0x8b, 0x4f, 0x1f, 0xd6, 0x15, 0xa6, 0x9e, 0x7e, 0xaa, 0x02, 0xb2, 0x7e, 0x77, 0xe1, 0x71, - 0xd1, 0xf4, 0xcc, 0x13, 0x62, 0xe5, 0x87, 0xa9, 0x3d, 0x61, 0xb6, 0xee, 0x31, 0xb5, 0x27, 0xcc, - 0xab, 0x76, 0x14, 0x82, 0xc4, 0x0a, 0x18, 0x53, 0x0b, 0x32, 0x5b, 0x24, 0x99, 0x5a, 0x90, 0x39, - 0xf5, 0x92, 0xda, 0x0a, 0xfa, 0x99, 0x12, 0x3f, 0x17, 0xdc, 0x5d, 0xb8, 0x4e, 0x6f, 0x41, 0x97, - 0x9c, 0xa9, 0x14, 0xe4, 0x0b, 0xf4, 0x67, 0xf2, 0x16, 0x43, 0x94, 0xf9, 0xa1, 0x45, 0xc0, 0x12, - 0x95, 0x81, 0xf5, 0x4f, 0x97, 0xdb, 0x6c, 0xb8, 0x10, 0x7f, 0xa5, 0x00, 0x4c, 0x0a, 0x02, 0x53, - 0x0b, 0x31, 0x53, 0x89, 0x58, 0xbf, 0xb7, 0xc4, 0xc8, 0xf8, 0x02, 0x09, 0x0b, 0x96, 0x52, 0x2f, - 0x90, 0xa9, 0x82, 0xc5, 0xd4, 0x0b, 0x64, 0xba, 0xd8, 0x50, 0x5b, 0x41, 0xff, 0xa4, 0xc0, 0xc6, - 0x4c, 0xc1, 0x14, 0x7a, 0x78, 0xcd, 0x9a, 0xb9, 0xfa, 0x17, 0xcb, 0x03, 0x84, 0xa2, 0xed, 0x28, - 0x9f, 0x28, 0xe8, 0x6f, 0x15, 0x58, 0x4b, 0x16, 0x92, 0xa4, 0xde, 0xa5, 0xe6, 0x94, 0x5e, 0xd5, - 0x1f, 0x2c, 0x37, 0x38, 0xd2, 0xd6, 0xdf, 0x2b, 0x50, 0x4d, 0xd6, 0x14, 0xa1, 0x07, 0x8b, 0x85, - 0x85, 0x29, 0x81, 0x3e, 0x5f, 0x72, 0x74, 0x28, 0xd1, 0x97, 0xab, 0x7f, 0x94, 0x17, 0xd9, 0x5b, - 0x81, 0xff, 0xfc, 0xf0, 0x37, 0x01, 0x00, 0x00, 0xff, 0xff, 0x4f, 0xda, 0xef, 0xe8, 0xfd, 0x34, - 0x00, 0x00, + 0x76, 0x17, 0x08, 0x92, 0x22, 0x1f, 0x29, 0x0a, 0x6a, 0x4b, 0x1e, 0x9a, 0xb3, 0xc9, 0x78, 0xb0, + 0x35, 0x29, 0x67, 0x77, 0x86, 0x9e, 0xd5, 0x26, 0xe3, 0xb1, 0xd7, 0xb3, 0x1e, 0x9a, 0xa2, 0x2d, + 0xda, 0x12, 0xa5, 0x34, 0xa9, 0x78, 0x1d, 0x27, 0x83, 0x40, 0x40, 0x9b, 0x82, 0x45, 0x02, 0x18, + 0x34, 0x28, 0x4b, 0x9b, 0x4a, 0x25, 0xb5, 0xa9, 0x4a, 0x6d, 0xaa, 0x92, 0x4a, 0x2e, 0x93, 0xbd, + 0xe4, 0xb4, 0x55, 0x39, 0xa5, 0x72, 0x4f, 0x6d, 0x6a, 0x4f, 0x39, 0xe4, 0x4b, 0xe4, 0x92, 0x5b, + 0xae, 0xa9, 0x7c, 0x80, 0x6c, 0xf5, 0x1f, 0x80, 0x80, 0x48, 0x8f, 0x41, 0xca, 0x27, 0xf2, 0xbd, + 0xee, 0xfe, 0xf5, 0xc3, 0x7b, 0xaf, 0x5f, 0xbf, 0xee, 0x7e, 0xa0, 0xfb, 0xa3, 0xc9, 0xd0, 0x71, + 0xe9, 0x6d, 0x3b, 0x70, 0xce, 0x48, 0x40, 0x6f, 0xfb, 0x81, 0x17, 0x7a, 0x92, 0x6a, 0x72, 0x02, + 0x7d, 0x74, 0x62, 0xd2, 0x13, 0xc7, 0xf2, 0x02, 0xbf, 0xe9, 0x7a, 0x63, 0xd3, 0x6e, 0xca, 0x31, + 0x4d, 0x39, 0x46, 0x74, 0x6b, 0xfc, 0xf6, 0xd0, 0xf3, 0x86, 0x23, 0x22, 0x10, 0x8e, 0x27, 0x2f, + 0x6f, 0xdb, 0x93, 0xc0, 0x0c, 0x1d, 0xcf, 0x95, 0xed, 0x1f, 0x5c, 0x6e, 0x0f, 0x9d, 0x31, 0xa1, + 0xa1, 0x39, 0xf6, 0x65, 0x87, 0x8f, 0x22, 0x59, 0xe8, 0x89, 0x19, 0x10, 0xfb, 0xf6, 0x89, 0x35, + 0xa2, 0x3e, 0xb1, 0xd8, 0xaf, 0xc1, 0xfe, 0xc8, 0x6e, 0x1f, 0x5f, 0xea, 0x46, 0xc3, 0x60, 0x62, + 0x85, 0x91, 0xe4, 0x66, 0x18, 0x06, 0xce, 0xf1, 0x24, 0x24, 0xa2, 0xb7, 0x7e, 0x03, 0xde, 0x1b, + 0x98, 0xf4, 0xb4, 0xed, 0xb9, 0x2f, 0x9d, 0x61, 0xdf, 0x3a, 0x21, 0x63, 0x13, 0x93, 0xaf, 0x27, + 0x84, 0x86, 0xfa, 0x1f, 0x43, 0x7d, 0xb6, 0x89, 0xfa, 0x9e, 0x4b, 0x09, 0xfa, 0x12, 0xf2, 0x6c, + 0xca, 0xba, 0x72, 0x53, 0xb9, 0x55, 0xd9, 0xfe, 0xb8, 0xf9, 0x26, 0x15, 0x08, 0x19, 0x9a, 0x52, + 0xd4, 0x66, 0xdf, 0x27, 0x16, 0xe6, 0x23, 0xf5, 0x2d, 0xb8, 0xd6, 0x36, 0x7d, 0xf3, 0xd8, 0x19, + 0x39, 0xa1, 0x43, 0x68, 0x34, 0xe9, 0x04, 0x36, 0xd3, 0x6c, 0x39, 0xe1, 0x9f, 0x40, 0xd5, 0x4a, + 0xf0, 0xe5, 0xc4, 0x77, 0x9b, 0x99, 0x74, 0xdf, 0xdc, 0xe1, 0x54, 0x0a, 0x38, 0x05, 0xa7, 0x6f, + 0x02, 0x7a, 0xe4, 0xb8, 0x43, 0x12, 0xf8, 0x81, 0xe3, 0x86, 0x91, 0x30, 0xbf, 0x56, 0xe1, 0x5a, + 0x8a, 0x2d, 0x85, 0x79, 0x05, 0x10, 0xeb, 0x91, 0x89, 0xa2, 0xde, 0xaa, 0x6c, 0x3f, 0xc9, 0x28, + 0xca, 0x1c, 0xbc, 0x66, 0x2b, 0x06, 0xeb, 0xb8, 0x61, 0x70, 0x81, 0x13, 0xe8, 0xe8, 0x2b, 0x28, + 0x9e, 0x10, 0x73, 0x14, 0x9e, 0xd4, 0x73, 0x37, 0x95, 0x5b, 0xb5, 0xed, 0x47, 0x57, 0x98, 0x67, + 0x97, 0x03, 0xf5, 0x43, 0x33, 0x24, 0x58, 0xa2, 0xa2, 0x4f, 0x00, 0x89, 0x7f, 0x86, 0x4d, 0xa8, + 0x15, 0x38, 0x3e, 0x73, 0xc9, 0xba, 0x7a, 0x53, 0xb9, 0x55, 0xc6, 0x1b, 0xa2, 0x65, 0x67, 0xda, + 0xd0, 0xf0, 0x61, 0xfd, 0x92, 0xb4, 0x48, 0x03, 0xf5, 0x94, 0x5c, 0x70, 0x8b, 0x94, 0x31, 0xfb, + 0x8b, 0x1e, 0x43, 0xe1, 0xcc, 0x1c, 0x4d, 0x08, 0x17, 0xb9, 0xb2, 0xfd, 0x83, 0xb7, 0xb9, 0x87, + 0x74, 0xd1, 0xa9, 0x1e, 0xb0, 0x18, 0x7f, 0x2f, 0xf7, 0xb9, 0xa2, 0xdf, 0x85, 0x4a, 0x42, 0x6e, + 0x54, 0x03, 0x38, 0xea, 0xed, 0x74, 0x06, 0x9d, 0xf6, 0xa0, 0xb3, 0xa3, 0xad, 0xa0, 0x35, 0x28, + 0x1f, 0xf5, 0x76, 0x3b, 0xad, 0xbd, 0xc1, 0xee, 0x73, 0x4d, 0x41, 0x15, 0x58, 0x8d, 0x88, 0x9c, + 0x7e, 0x0e, 0x08, 0x13, 0xcb, 0x3b, 0x23, 0x01, 0x73, 0x64, 0x69, 0x55, 0xf4, 0x1e, 0xac, 0x86, + 0x26, 0x3d, 0x35, 0x1c, 0x5b, 0xca, 0x5c, 0x64, 0x64, 0xd7, 0x46, 0x5d, 0x28, 0x9e, 0x98, 0xae, + 0x3d, 0x7a, 0xbb, 0xdc, 0x69, 0x55, 0x33, 0xf0, 0x5d, 0x3e, 0x10, 0x4b, 0x00, 0xe6, 0xdd, 0xa9, + 0x99, 0x85, 0x01, 0xf4, 0xe7, 0xa0, 0xf5, 0x43, 0x33, 0x08, 0x93, 0xe2, 0x74, 0x20, 0xcf, 0xe6, + 0x97, 0x1e, 0xbd, 0xc8, 0x9c, 0x62, 0x65, 0x62, 0x3e, 0x5c, 0xff, 0xdf, 0x1c, 0x6c, 0x24, 0xb0, + 0xa5, 0xa7, 0x3e, 0x83, 0x62, 0x40, 0xe8, 0x64, 0x14, 0x72, 0xf8, 0xda, 0xf6, 0x83, 0x8c, 0xf0, + 0x33, 0x48, 0x4d, 0xcc, 0x61, 0xb0, 0x84, 0x43, 0xb7, 0x40, 0x13, 0x23, 0x0c, 0x12, 0x04, 0x5e, + 0x60, 0x8c, 0xe9, 0x90, 0x6b, 0xad, 0x8c, 0x6b, 0x82, 0xdf, 0x61, 0xec, 0x7d, 0x3a, 0x4c, 0x68, + 0x55, 0xbd, 0xa2, 0x56, 0x91, 0x09, 0x9a, 0x4b, 0xc2, 0xd7, 0x5e, 0x70, 0x6a, 0x30, 0xd5, 0x06, + 0x8e, 0x4d, 0xea, 0x79, 0x0e, 0xfa, 0x59, 0x46, 0xd0, 0x9e, 0x18, 0x7e, 0x20, 0x47, 0xe3, 0x75, + 0x37, 0xcd, 0xd0, 0xbf, 0x0f, 0x45, 0xf1, 0xa5, 0xcc, 0x93, 0xfa, 0x47, 0xed, 0x76, 0xa7, 0xdf, + 0xd7, 0x56, 0x50, 0x19, 0x0a, 0xb8, 0x33, 0xc0, 0xcc, 0xc3, 0xca, 0x50, 0x78, 0xd4, 0x1a, 0xb4, + 0xf6, 0xb4, 0x9c, 0xfe, 0x3d, 0x58, 0x7f, 0x66, 0x3a, 0x61, 0x16, 0xe7, 0xd2, 0x3d, 0xd0, 0xa6, + 0x7d, 0xa5, 0x75, 0xba, 0x29, 0xeb, 0x64, 0x57, 0x4d, 0xe7, 0xdc, 0x09, 0x2f, 0xd9, 0x43, 0x03, + 0x95, 0x04, 0x81, 0x34, 0x01, 0xfb, 0xab, 0xbf, 0x86, 0xf5, 0x7e, 0xe8, 0xf9, 0x99, 0x3c, 0xff, + 0x87, 0xb0, 0xca, 0x76, 0x1b, 0x6f, 0x12, 0x4a, 0xd7, 0xbf, 0xd1, 0x14, 0xbb, 0x51, 0x33, 0xda, + 0x8d, 0x9a, 0x3b, 0x72, 0xb7, 0xc2, 0x51, 0x4f, 0x74, 0x1d, 0x8a, 0xd4, 0x19, 0xba, 0xe6, 0x48, + 0x46, 0x0b, 0x49, 0xe9, 0x88, 0x39, 0x79, 0x34, 0xb1, 0x74, 0xfc, 0x36, 0xa0, 0x1d, 0x42, 0xc3, + 0xc0, 0xbb, 0xc8, 0x24, 0xcf, 0x26, 0x14, 0x5e, 0x7a, 0x81, 0x25, 0x16, 0x62, 0x09, 0x0b, 0x82, + 0x2d, 0xaa, 0x14, 0x88, 0xc4, 0xfe, 0x04, 0x50, 0xd7, 0x65, 0x7b, 0x4a, 0x36, 0x43, 0xfc, 0x43, + 0x0e, 0xae, 0xa5, 0xfa, 0x4b, 0x63, 0x2c, 0xbf, 0x0e, 0x59, 0x60, 0x9a, 0x50, 0xb1, 0x0e, 0xd1, + 0x01, 0x14, 0x45, 0x0f, 0xa9, 0xc9, 0x3b, 0x0b, 0x00, 0x89, 0x6d, 0x4a, 0xc2, 0x49, 0x98, 0xb9, + 0x4e, 0xaf, 0xbe, 0x5b, 0xa7, 0x7f, 0x0d, 0x5a, 0xf4, 0x1d, 0xf4, 0xad, 0xb6, 0x79, 0x02, 0xd7, + 0x2c, 0x6f, 0x34, 0x22, 0x16, 0xf3, 0x06, 0xc3, 0x71, 0x43, 0x12, 0x9c, 0x99, 0xa3, 0xb7, 0xfb, + 0x0d, 0x9a, 0x8e, 0xea, 0xca, 0x41, 0xfa, 0x0b, 0xd8, 0x48, 0x4c, 0x2c, 0x0d, 0xf1, 0x08, 0x0a, + 0x94, 0x31, 0xa4, 0x25, 0x3e, 0x5d, 0xd0, 0x12, 0x14, 0x8b, 0xe1, 0xfa, 0x35, 0x01, 0xde, 0x39, + 0x23, 0x6e, 0xfc, 0x59, 0xfa, 0x0e, 0x6c, 0xf4, 0xb9, 0x9b, 0x66, 0xf2, 0xc3, 0xa9, 0x8b, 0xe7, + 0x52, 0x2e, 0xbe, 0x09, 0x28, 0x89, 0x22, 0x1d, 0xf1, 0x02, 0xd6, 0x3b, 0xe7, 0xc4, 0xca, 0x84, + 0x5c, 0x87, 0x55, 0xcb, 0x1b, 0x8f, 0x4d, 0xd7, 0xae, 0xe7, 0x6e, 0xaa, 0xb7, 0xca, 0x38, 0x22, + 0x93, 0x6b, 0x51, 0xcd, 0xba, 0x16, 0xf5, 0xbf, 0x53, 0x40, 0x9b, 0xce, 0x2d, 0x15, 0xc9, 0xa4, + 0x0f, 0x6d, 0x06, 0xc4, 0xe6, 0xae, 0x62, 0x49, 0x49, 0x7e, 0x14, 0x2e, 0x04, 0x9f, 0x04, 0x41, + 0x22, 0x1c, 0xa9, 0x57, 0x0c, 0x47, 0xfa, 0x2e, 0x7c, 0x27, 0x12, 0xa7, 0x1f, 0x06, 0xc4, 0x1c, + 0x3b, 0xee, 0xb0, 0x7b, 0x70, 0xe0, 0x13, 0x21, 0x38, 0x42, 0x90, 0xb7, 0xcd, 0xd0, 0x94, 0x82, + 0xf1, 0xff, 0x6c, 0xd1, 0x5b, 0x23, 0x8f, 0xc6, 0x8b, 0x9e, 0x13, 0xfa, 0x7f, 0xaa, 0x50, 0x9f, + 0x81, 0x8a, 0xd4, 0xfb, 0x02, 0x0a, 0x94, 0x84, 0x13, 0x5f, 0xba, 0x4a, 0x27, 0xb3, 0xc0, 0xf3, + 0xf1, 0x9a, 0x7d, 0x06, 0x86, 0x05, 0x26, 0x1a, 0x42, 0x29, 0x0c, 0x2f, 0x0c, 0xea, 0xfc, 0x34, + 0x4a, 0x08, 0xf6, 0xae, 0x8a, 0x3f, 0x20, 0xc1, 0xd8, 0x71, 0xcd, 0x51, 0xdf, 0xf9, 0x29, 0xc1, + 0xab, 0x61, 0x78, 0xc1, 0xfe, 0xa0, 0xe7, 0xcc, 0xe1, 0x6d, 0xc7, 0x95, 0x6a, 0x6f, 0x2f, 0x3b, + 0x4b, 0x42, 0xc1, 0x58, 0x20, 0x36, 0xf6, 0xa0, 0xc0, 0xbf, 0x69, 0x19, 0x47, 0xd4, 0x40, 0x0d, + 0xc3, 0x0b, 0x2e, 0x54, 0x09, 0xb3, 0xbf, 0x8d, 0xfb, 0x50, 0x4d, 0x7e, 0x01, 0x73, 0xa4, 0x13, + 0xe2, 0x0c, 0x4f, 0x84, 0x83, 0x15, 0xb0, 0xa4, 0x98, 0x25, 0x5f, 0x3b, 0xb6, 0x4c, 0x59, 0x0b, + 0x58, 0x10, 0xfa, 0xbf, 0xe5, 0xe0, 0xc6, 0x1c, 0xcd, 0x48, 0x67, 0x7d, 0x91, 0x72, 0xd6, 0x77, + 0xa4, 0x85, 0xc8, 0xe3, 0x5f, 0xa4, 0x3c, 0xfe, 0x1d, 0x82, 0xb3, 0x65, 0x73, 0x1d, 0x8a, 0xe4, + 0xdc, 0x09, 0x89, 0x2d, 0x55, 0x25, 0xa9, 0xc4, 0x72, 0xca, 0x5f, 0x75, 0x39, 0xed, 0xc3, 0x66, + 0x3b, 0x20, 0x66, 0x48, 0x64, 0x28, 0x8f, 0xfc, 0xff, 0x06, 0x94, 0xcc, 0xd1, 0xc8, 0xb3, 0xa6, + 0x66, 0x5d, 0xe5, 0x74, 0xd7, 0x46, 0x0d, 0x28, 0x9d, 0x78, 0x34, 0x74, 0xcd, 0x31, 0x91, 0xc1, + 0x2b, 0xa6, 0xf5, 0x6f, 0x14, 0xd8, 0xba, 0x84, 0x27, 0xad, 0x70, 0x0c, 0x35, 0x87, 0x7a, 0x23, + 0xfe, 0x81, 0x46, 0xe2, 0x84, 0xf7, 0xa3, 0xc5, 0xb6, 0x9a, 0x6e, 0x84, 0xc1, 0x0f, 0x7c, 0x6b, + 0x4e, 0x92, 0xe4, 0x1e, 0xc7, 0x27, 0xb7, 0xe5, 0x4a, 0x8f, 0x48, 0xfd, 0x1f, 0x15, 0xd8, 0x92, + 0x3b, 0x7c, 0xf6, 0x0f, 0x9d, 0x15, 0x39, 0xf7, 0xae, 0x45, 0xd6, 0xeb, 0x70, 0xfd, 0xb2, 0x5c, + 0x32, 0xe6, 0xff, 0x5f, 0x01, 0xd0, 0xec, 0xe9, 0x12, 0x7d, 0x08, 0x55, 0x4a, 0x5c, 0xdb, 0x10, + 0xfb, 0x85, 0xd8, 0xca, 0x4a, 0xb8, 0xc2, 0x78, 0x62, 0xe3, 0xa0, 0x2c, 0x04, 0x92, 0x73, 0x29, + 0x6d, 0x09, 0xf3, 0xff, 0xe8, 0x04, 0xaa, 0x2f, 0xa9, 0x11, 0xcf, 0xcd, 0x1d, 0xaa, 0x96, 0x39, + 0xac, 0xcd, 0xca, 0xd1, 0x7c, 0xd4, 0x8f, 0xbf, 0x0b, 0x57, 0x5e, 0xd2, 0x98, 0x40, 0x3f, 0x57, + 0xe0, 0xbd, 0x28, 0xad, 0x98, 0xaa, 0x6f, 0xec, 0xd9, 0x84, 0xd6, 0xf3, 0x37, 0xd5, 0x5b, 0xb5, + 0xed, 0xc3, 0x2b, 0xe8, 0x6f, 0x86, 0xb9, 0xef, 0xd9, 0x04, 0x6f, 0xb9, 0x73, 0xb8, 0x14, 0x35, + 0xe1, 0xda, 0x78, 0x42, 0x43, 0x43, 0x78, 0x81, 0x21, 0x3b, 0xd5, 0x0b, 0x5c, 0x2f, 0x1b, 0xac, + 0x29, 0xe5, 0xab, 0xe8, 0x14, 0xd6, 0xc6, 0xde, 0xc4, 0x0d, 0x0d, 0x8b, 0x9f, 0x7f, 0x68, 0xbd, + 0xb8, 0xd0, 0xc1, 0x78, 0x8e, 0x96, 0xf6, 0x19, 0x9c, 0x38, 0x4d, 0x51, 0x5c, 0x1d, 0x27, 0x28, + 0xf4, 0x11, 0x54, 0x03, 0x32, 0xf6, 0x42, 0x62, 0xb0, 0x78, 0x49, 0xeb, 0xab, 0x4c, 0xaa, 0x87, + 0xb9, 0xba, 0x82, 0x2b, 0x82, 0xcf, 0xc2, 0x03, 0x45, 0xbf, 0x07, 0xd7, 0x6d, 0x87, 0x9a, 0xc7, + 0x23, 0x62, 0x8c, 0xbc, 0xa1, 0x31, 0x4d, 0x75, 0xea, 0x25, 0xfe, 0x19, 0x9b, 0xb2, 0x75, 0xcf, + 0x1b, 0xb6, 0xe3, 0x36, 0x3e, 0xea, 0xc2, 0x35, 0xc7, 0x8e, 0x65, 0xb0, 0x2f, 0x1b, 0x79, 0xa6, + 0x6d, 0x4c, 0x28, 0x09, 0x68, 0xbd, 0x2c, 0x47, 0x89, 0xd6, 0x67, 0xb2, 0xf1, 0x88, 0xb5, 0xe9, + 0xf7, 0xa0, 0x92, 0x30, 0x2b, 0x2a, 0x41, 0xbe, 0x77, 0xd0, 0xeb, 0x68, 0x2b, 0x08, 0xa0, 0xd8, + 0xde, 0xc5, 0x07, 0x07, 0x03, 0x71, 0x4a, 0xe9, 0xee, 0xb7, 0x1e, 0x77, 0xb4, 0x1c, 0x63, 0x1f, + 0xf5, 0xfe, 0xb0, 0xd3, 0xdd, 0xd3, 0x54, 0xbd, 0x03, 0xd5, 0xe4, 0xc7, 0x22, 0x04, 0xb5, 0xa3, + 0xde, 0xd3, 0xde, 0xc1, 0xb3, 0x9e, 0xb1, 0x7f, 0x70, 0xd4, 0x1b, 0xb0, 0xb3, 0x4e, 0x0d, 0xa0, + 0xd5, 0x7b, 0x3e, 0xa5, 0xd7, 0xa0, 0xdc, 0x3b, 0x88, 0x48, 0xa5, 0x91, 0xd3, 0x14, 0xfd, 0x3f, + 0x54, 0xd8, 0x9c, 0x67, 0x77, 0x64, 0x43, 0x9e, 0xf9, 0x90, 0x3c, 0x6d, 0xbe, 0x7b, 0x17, 0xe2, + 0xe8, 0x6c, 0xe9, 0xf8, 0xa6, 0xdc, 0x5e, 0xca, 0x98, 0xff, 0x47, 0x06, 0x14, 0x47, 0xe6, 0x31, + 0x19, 0xd1, 0xba, 0xca, 0xef, 0x63, 0x1e, 0x5f, 0x65, 0xee, 0x3d, 0x8e, 0x24, 0x2e, 0x63, 0x24, + 0x2c, 0x1a, 0x40, 0x85, 0x05, 0x50, 0x2a, 0x54, 0x27, 0x63, 0xfa, 0x76, 0xc6, 0x59, 0x76, 0xa7, + 0x23, 0x71, 0x12, 0xa6, 0x71, 0x17, 0x2a, 0x89, 0xc9, 0xe6, 0xdc, 0xa5, 0x6c, 0x26, 0xef, 0x52, + 0xca, 0xc9, 0x8b, 0x91, 0x07, 0xb3, 0x36, 0x60, 0x3a, 0x62, 0x0e, 0xb1, 0x7b, 0xd0, 0x1f, 0x88, + 0x53, 0xeb, 0x63, 0x7c, 0x70, 0x74, 0xa8, 0x29, 0x8c, 0x39, 0x68, 0xf5, 0x9f, 0x6a, 0xb9, 0xd8, + 0x5f, 0x54, 0xbd, 0x0d, 0x95, 0x84, 0x5c, 0xa9, 0x1d, 0x43, 0x49, 0xef, 0x18, 0x2c, 0x66, 0x9b, + 0xb6, 0x1d, 0x10, 0x4a, 0xa5, 0x1c, 0x11, 0xa9, 0xbf, 0x80, 0xf2, 0x4e, 0xaf, 0x2f, 0x21, 0xea, + 0xb0, 0x4a, 0x49, 0xc0, 0xbe, 0x9b, 0xdf, 0x8a, 0x95, 0x71, 0x44, 0x32, 0x70, 0x4a, 0xcc, 0xc0, + 0x3a, 0x21, 0x54, 0xe6, 0x19, 0x31, 0xcd, 0x46, 0x79, 0xfc, 0x76, 0x49, 0xd8, 0xae, 0x8c, 0x23, + 0x52, 0xff, 0xff, 0x12, 0xc0, 0xf4, 0xa6, 0x03, 0xd5, 0x20, 0x17, 0xc7, 0xff, 0x9c, 0x63, 0x33, + 0x3f, 0x48, 0xec, 0x6f, 0xfc, 0x3f, 0xda, 0x86, 0xad, 0x31, 0x1d, 0xfa, 0xa6, 0x75, 0x6a, 0xc8, + 0x0b, 0x0a, 0x11, 0x26, 0x78, 0x2c, 0xad, 0xe2, 0x6b, 0xb2, 0x51, 0x46, 0x01, 0x81, 0xbb, 0x07, + 0x2a, 0x71, 0xcf, 0x78, 0xdc, 0xab, 0x6c, 0xdf, 0x5b, 0xf8, 0x06, 0xa6, 0xd9, 0x71, 0xcf, 0x84, + 0xaf, 0x30, 0x18, 0x64, 0x00, 0xd8, 0xe4, 0xcc, 0xb1, 0x88, 0xc1, 0x40, 0x0b, 0x1c, 0xf4, 0xcb, + 0xc5, 0x41, 0x77, 0x38, 0x46, 0x0c, 0x5d, 0xb6, 0x23, 0x1a, 0xf5, 0xa0, 0x1c, 0x10, 0xea, 0x4d, + 0x02, 0x8b, 0x88, 0xe0, 0x97, 0xfd, 0x90, 0x84, 0xa3, 0x71, 0x78, 0x0a, 0x81, 0x76, 0xa0, 0xc8, + 0x63, 0x1e, 0x8b, 0x6e, 0xea, 0xb7, 0x5e, 0xe7, 0xa6, 0xc1, 0x78, 0x24, 0xc1, 0x72, 0x2c, 0x7a, + 0x0c, 0xab, 0x42, 0x44, 0x5a, 0x2f, 0x71, 0x98, 0x4f, 0xb2, 0x06, 0x64, 0x3e, 0x0a, 0x47, 0xa3, + 0x99, 0x55, 0x59, 0x10, 0xe4, 0x31, 0xb0, 0x8c, 0xf9, 0x7f, 0xf4, 0x3e, 0x94, 0xc5, 0xfe, 0x6f, + 0x3b, 0x41, 0x1d, 0x84, 0x73, 0x72, 0xc6, 0x8e, 0x13, 0xa0, 0x0f, 0xa0, 0x22, 0xf2, 0x3c, 0x83, + 0x47, 0x85, 0x0a, 0x6f, 0x06, 0xc1, 0x3a, 0x64, 0xb1, 0x41, 0x74, 0x20, 0x41, 0x20, 0x3a, 0x54, + 0xe3, 0x0e, 0x24, 0x08, 0x78, 0x87, 0xdf, 0x81, 0x75, 0x9e, 0x1d, 0x0f, 0x03, 0x6f, 0xe2, 0x1b, + 0xdc, 0xa7, 0xd6, 0x78, 0xa7, 0x35, 0xc6, 0x7e, 0xcc, 0xb8, 0x3d, 0xe6, 0x5c, 0x37, 0xa0, 0xf4, + 0xca, 0x3b, 0x16, 0x1d, 0x6a, 0x62, 0x1d, 0xbc, 0xf2, 0x8e, 0xa3, 0xa6, 0x38, 0x43, 0x59, 0x4f, + 0x67, 0x28, 0x5f, 0xc3, 0xf5, 0xd9, 0xad, 0x96, 0x67, 0x2a, 0xda, 0xd5, 0x33, 0x95, 0x4d, 0x77, + 0x5e, 0x1c, 0x7e, 0x08, 0xaa, 0xed, 0xd2, 0xfa, 0xc6, 0x42, 0xce, 0x11, 0xaf, 0x63, 0xcc, 0x06, + 0xa3, 0x2d, 0x28, 0xb2, 0x8f, 0x75, 0xec, 0x3a, 0x12, 0xa1, 0xe7, 0x95, 0x77, 0xdc, 0xb5, 0xd1, + 0x77, 0xa0, 0xcc, 0xbe, 0x9f, 0xfa, 0xa6, 0x45, 0xea, 0xd7, 0x78, 0xcb, 0x94, 0xc1, 0x0c, 0xe5, + 0x7a, 0x36, 0x11, 0x2a, 0xda, 0x14, 0x86, 0x62, 0x0c, 0xae, 0xa3, 0xf7, 0x60, 0x95, 0x37, 0x3a, + 0x76, 0x7d, 0x4b, 0x1c, 0x42, 0x18, 0xd9, 0xb5, 0x91, 0x0e, 0x6b, 0xbe, 0x19, 0x10, 0x37, 0x34, + 0xe4, 0x8c, 0xd7, 0x79, 0x73, 0x45, 0x30, 0x9f, 0xb0, 0x79, 0x1b, 0x9f, 0x41, 0x29, 0x5a, 0x0c, + 0x8b, 0x84, 0xc9, 0xc6, 0x7d, 0xa8, 0xa5, 0x97, 0xd2, 0x42, 0x41, 0xf6, 0x9f, 0x73, 0x50, 0x8e, + 0x17, 0x0d, 0x72, 0xe1, 0x1a, 0x37, 0x2a, 0xcb, 0x56, 0x8d, 0xe9, 0x1a, 0x14, 0x39, 0xf2, 0x17, + 0x19, 0xd5, 0xdc, 0x8a, 0x10, 0xe4, 0x61, 0x5d, 0x2e, 0x48, 0x14, 0x23, 0x4f, 0xe7, 0xfb, 0x0a, + 0xd6, 0x47, 0x8e, 0x3b, 0x39, 0x4f, 0xcc, 0x25, 0x92, 0xdb, 0xdf, 0xcf, 0x38, 0xd7, 0x1e, 0x1b, + 0x3d, 0x9d, 0xa3, 0x36, 0x4a, 0xd1, 0x68, 0x17, 0x0a, 0xbe, 0x17, 0x84, 0xd1, 0x9e, 0x99, 0x75, + 0x37, 0x3b, 0xf4, 0x82, 0x70, 0xdf, 0xf4, 0x7d, 0x76, 0x7e, 0x13, 0x00, 0xfa, 0x37, 0x39, 0xb8, + 0x3e, 0xff, 0xc3, 0x50, 0x0f, 0x54, 0xcb, 0x9f, 0x48, 0x25, 0xdd, 0x5f, 0x54, 0x49, 0x6d, 0x7f, + 0x32, 0x95, 0x9f, 0x01, 0xa1, 0x67, 0x50, 0x1c, 0x93, 0xb1, 0x17, 0x5c, 0x48, 0x5d, 0x3c, 0x58, + 0x14, 0x72, 0x9f, 0x8f, 0x9e, 0xa2, 0x4a, 0x38, 0x84, 0xa1, 0x24, 0x17, 0x13, 0x95, 0x61, 0x7b, + 0xc1, 0x1b, 0xb6, 0x08, 0x12, 0xc7, 0x38, 0xfa, 0x67, 0xb0, 0x35, 0xf7, 0x53, 0xd0, 0x6f, 0x01, + 0x58, 0xfe, 0xc4, 0xe0, 0x2f, 0x20, 0xc2, 0x83, 0x54, 0x5c, 0xb6, 0xfc, 0x49, 0x9f, 0x33, 0xf4, + 0x17, 0x50, 0x7f, 0x93, 0xbc, 0x6c, 0x8d, 0x09, 0x89, 0x8d, 0xf1, 0x31, 0xd7, 0x81, 0x8a, 0x4b, + 0x82, 0xb1, 0x7f, 0xcc, 0x96, 0x52, 0xd4, 0x68, 0x9e, 0xb3, 0x0e, 0x2a, 0xef, 0x50, 0x91, 0x1d, + 0xcc, 0xf3, 0xfd, 0x63, 0xfd, 0x17, 0x39, 0x58, 0xbf, 0x24, 0x32, 0x3b, 0xc5, 0x8a, 0x00, 0x1c, + 0xdd, 0x0f, 0x08, 0x8a, 0x45, 0x63, 0xcb, 0xb1, 0xa3, 0x9b, 0x65, 0xfe, 0x9f, 0xef, 0xc3, 0xbe, + 0xbc, 0xf5, 0xcd, 0x39, 0x3e, 0x5b, 0x3e, 0xe3, 0x63, 0x27, 0xa4, 0x3c, 0x29, 0x2a, 0x60, 0x41, + 0xa0, 0xe7, 0x50, 0x0b, 0x08, 0xdf, 0xff, 0x6d, 0x43, 0x78, 0x59, 0x61, 0x21, 0x2f, 0x93, 0x12, + 0x32, 0x67, 0xc3, 0x6b, 0x11, 0x12, 0xa3, 0x28, 0x7a, 0x06, 0x6b, 0x51, 0xe2, 0x2c, 0x90, 0x8b, + 0x4b, 0x23, 0x57, 0x25, 0x10, 0x07, 0xd6, 0xef, 0x42, 0x25, 0xd1, 0xc8, 0x3e, 0x8c, 0x67, 0x7f, + 0x52, 0x27, 0x82, 0x48, 0x47, 0x8b, 0x82, 0x8c, 0x16, 0xfa, 0x31, 0x54, 0x12, 0xeb, 0x62, 0x91, + 0xa1, 0x4c, 0x9f, 0xa1, 0xc7, 0xf5, 0x59, 0xc0, 0xb9, 0xd0, 0x63, 0x71, 0x92, 0x65, 0x5e, 0x86, + 0xe3, 0x73, 0x8d, 0x96, 0x71, 0x91, 0x91, 0x5d, 0x5f, 0xff, 0x55, 0x0e, 0x6a, 0xe9, 0x25, 0x1d, + 0xf9, 0x91, 0x4f, 0x02, 0xc7, 0xb3, 0x13, 0x7e, 0x74, 0xc8, 0x19, 0xcc, 0x57, 0x58, 0xf3, 0xd7, + 0x13, 0x2f, 0x34, 0x23, 0x5f, 0xb1, 0xfc, 0xc9, 0x1f, 0x30, 0xfa, 0x92, 0x0f, 0xaa, 0x97, 0x7c, + 0x10, 0x7d, 0x0c, 0x48, 0xba, 0xd2, 0xc8, 0x19, 0x3b, 0xa1, 0x71, 0x7c, 0x11, 0x12, 0x61, 0x63, + 0x15, 0x6b, 0xa2, 0x65, 0x8f, 0x35, 0x3c, 0x64, 0x7c, 0xe6, 0x78, 0x9e, 0x37, 0x36, 0xa8, 0xe5, + 0x05, 0xc4, 0x30, 0xed, 0x57, 0xfc, 0x00, 0xa7, 0xe2, 0x8a, 0xe7, 0x8d, 0xfb, 0x8c, 0xd7, 0xb2, + 0x5f, 0xb1, 0x8d, 0xd8, 0xf2, 0x27, 0x94, 0x84, 0x06, 0xfb, 0xe1, 0xb9, 0x4b, 0x19, 0x83, 0x60, + 0xb5, 0xfd, 0x09, 0x45, 0xdf, 0x85, 0xb5, 0xa8, 0x03, 0xdf, 0x8b, 0x65, 0x12, 0x50, 0x95, 0x5d, + 0x38, 0x0f, 0xe9, 0x50, 0x3d, 0x24, 0x81, 0x45, 0xdc, 0x70, 0xe0, 0x58, 0xa7, 0x94, 0x1f, 0xb1, + 0x14, 0x9c, 0xe2, 0x3d, 0xc9, 0x97, 0x56, 0xb5, 0x12, 0x8e, 0x66, 0x1b, 0x93, 0x31, 0xd5, 0xff, + 0x55, 0x81, 0x02, 0x4f, 0x59, 0x98, 0x52, 0xf8, 0x76, 0xcf, 0xb3, 0x01, 0x99, 0xea, 0x32, 0x06, + 0xcf, 0x05, 0xde, 0x87, 0x32, 0x57, 0x7e, 0xe2, 0x84, 0xc1, 0xf3, 0x60, 0xde, 0xd8, 0x80, 0x52, + 0x40, 0x4c, 0xdb, 0x73, 0x47, 0xd1, 0xc5, 0x58, 0x4c, 0xa3, 0xdf, 0x05, 0xcd, 0x0f, 0x3c, 0xdf, + 0x1c, 0x4e, 0xcf, 0xd2, 0xd2, 0x7c, 0xeb, 0x09, 0x3e, 0x4f, 0xd1, 0xbf, 0x0b, 0x6b, 0x94, 0x88, + 0xc8, 0x2e, 0x9c, 0xa4, 0x20, 0x3e, 0x53, 0x32, 0xf9, 0x89, 0x40, 0xff, 0x1a, 0x8a, 0x62, 0xe3, + 0xba, 0x82, 0xbc, 0x9f, 0x00, 0x12, 0x8a, 0x64, 0x0e, 0x32, 0x76, 0x28, 0x95, 0x59, 0x36, 0x7f, + 0xdd, 0x15, 0x2d, 0x87, 0xd3, 0x06, 0xfd, 0xbf, 0x14, 0x91, 0x6f, 0x8b, 0x77, 0x37, 0x96, 0x98, + 0xb3, 0x55, 0xc3, 0x8e, 0xb1, 0xe2, 0x82, 0x2f, 0x22, 0x51, 0x17, 0x8a, 0x32, 0xad, 0xce, 0x2d, + 0xfb, 0x6c, 0x29, 0x01, 0xa2, 0xeb, 0x7e, 0x22, 0x2f, 0x3b, 0x16, 0xbd, 0xee, 0x27, 0xe2, 0xba, + 0x9f, 0xa0, 0x0f, 0xa1, 0x2a, 0x13, 0x7e, 0x01, 0x97, 0xe7, 0xf9, 0x7e, 0xc5, 0x8e, 0xdf, 0x54, + 0x88, 0xfe, 0x3f, 0x4a, 0x1c, 0xf7, 0xa2, 0xb7, 0x0f, 0xf4, 0x15, 0x94, 0x58, 0x08, 0x31, 0xc6, + 0xa6, 0x2f, 0x5f, 0xf2, 0xdb, 0xcb, 0x3d, 0xab, 0x44, 0xbb, 0xa2, 0x48, 0xd7, 0x57, 0x7d, 0x41, + 0xb1, 0xf8, 0xc9, 0x8e, 0x4a, 0x51, 0xfc, 0x64, 0xff, 0xd1, 0x47, 0x50, 0x33, 0x27, 0xa1, 0x67, + 0x98, 0xf6, 0x19, 0x09, 0x42, 0x87, 0x12, 0xe9, 0x4b, 0x6b, 0x8c, 0xdb, 0x8a, 0x98, 0x8d, 0x7b, + 0x50, 0x4d, 0x62, 0xbe, 0x2d, 0x6f, 0x29, 0x24, 0xf3, 0x96, 0x3f, 0x05, 0x98, 0xde, 0x23, 0x32, + 0x1f, 0x21, 0xe7, 0x4e, 0x68, 0x58, 0xd1, 0xd9, 0xbc, 0x80, 0x4b, 0x8c, 0xd1, 0x66, 0xce, 0x98, + 0x7e, 0xe4, 0x28, 0x44, 0x8f, 0x1c, 0x2c, 0x3a, 0xb0, 0x05, 0x7d, 0xea, 0x8c, 0x46, 0xf1, 0xdd, + 0x66, 0xd9, 0xf3, 0xc6, 0x4f, 0x39, 0x43, 0xff, 0x75, 0x4e, 0xf8, 0x8a, 0x78, 0xae, 0xca, 0x74, + 0x36, 0x7b, 0x57, 0xa6, 0xbe, 0x0b, 0x40, 0x43, 0x33, 0x60, 0x49, 0x98, 0x19, 0xdd, 0xae, 0x36, + 0x66, 0x5e, 0x49, 0x06, 0x51, 0xfd, 0x0c, 0x2e, 0xcb, 0xde, 0xad, 0x10, 0x7d, 0x01, 0x55, 0xcb, + 0x1b, 0xfb, 0x23, 0x22, 0x07, 0x17, 0xde, 0x3a, 0xb8, 0x12, 0xf7, 0x6f, 0x85, 0x89, 0x3b, 0xdd, + 0xe2, 0x55, 0xef, 0x74, 0x7f, 0xa5, 0x88, 0x57, 0xb7, 0xe4, 0xa3, 0x1f, 0x1a, 0xce, 0xa9, 0x2c, + 0x79, 0xbc, 0xe4, 0x0b, 0xe2, 0xb7, 0x95, 0x95, 0x34, 0xbe, 0xc8, 0x52, 0xc7, 0xf1, 0xe6, 0xb4, + 0xf8, 0xdf, 0x55, 0x28, 0xc7, 0x0f, 0x6e, 0x33, 0xb6, 0xff, 0x1c, 0xca, 0x71, 0xf1, 0x92, 0x0c, + 0x10, 0xdf, 0x6a, 0x9e, 0xb8, 0x33, 0x7a, 0x09, 0xc8, 0x1c, 0x0e, 0xe3, 0x74, 0xd7, 0x98, 0x50, + 0x73, 0x18, 0x3d, 0x77, 0x7e, 0xbe, 0x80, 0x1e, 0xa2, 0xfd, 0xf1, 0x88, 0x8d, 0xc7, 0x9a, 0x39, + 0x1c, 0xa6, 0x38, 0xe8, 0xcf, 0x60, 0x2b, 0x3d, 0x87, 0x71, 0x7c, 0x61, 0xf8, 0x8e, 0x2d, 0xef, + 0x00, 0x76, 0x17, 0x7d, 0x73, 0x6c, 0xa6, 0xe0, 0x1f, 0x5e, 0x1c, 0x3a, 0xb6, 0xd0, 0x39, 0x0a, + 0x66, 0x1a, 0x1a, 0x7f, 0x01, 0xef, 0xbd, 0xa1, 0xfb, 0x1c, 0x1b, 0xf4, 0xd2, 0xb5, 0x34, 0xcb, + 0x2b, 0x21, 0x61, 0xbd, 0x5f, 0x2a, 0xe2, 0x69, 0x34, 0xad, 0x93, 0x56, 0x32, 0x4f, 0xbf, 0x9d, + 0x71, 0x9e, 0xf6, 0xe1, 0x91, 0x80, 0xe7, 0xa9, 0xf9, 0x93, 0x4b, 0xa9, 0x79, 0xd6, 0x84, 0x4c, + 0x64, 0xb8, 0x02, 0x48, 0x22, 0xe8, 0xff, 0xa2, 0x42, 0x29, 0x42, 0xe7, 0x27, 0xf8, 0x0b, 0x1a, + 0x92, 0xb1, 0x11, 0x5f, 0x2f, 0x2a, 0x18, 0x04, 0x8b, 0xef, 0xa8, 0xef, 0x43, 0x79, 0x42, 0x49, + 0x20, 0x9a, 0x73, 0xbc, 0xb9, 0xc4, 0x18, 0xbc, 0xf1, 0x03, 0xa8, 0x84, 0x5e, 0x68, 0x8e, 0x8c, + 0x90, 0xe7, 0x0b, 0xaa, 0x18, 0xcd, 0x59, 0x3c, 0x5b, 0x40, 0xdf, 0x87, 0x8d, 0xf0, 0x24, 0xf0, + 0xc2, 0x70, 0xc4, 0x72, 0x55, 0x9e, 0x39, 0x89, 0x44, 0x27, 0x8f, 0xb5, 0xb8, 0x41, 0x64, 0x54, + 0x94, 0x45, 0xef, 0x69, 0x67, 0xe6, 0xba, 0x3c, 0x88, 0xe4, 0xf1, 0x5a, 0xcc, 0x65, 0xae, 0xcd, + 0x36, 0x4f, 0x5f, 0x64, 0x24, 0x3c, 0x56, 0x28, 0x38, 0x22, 0x91, 0x01, 0xeb, 0x63, 0x62, 0xd2, + 0x49, 0x40, 0x6c, 0xe3, 0xa5, 0x43, 0x46, 0xb6, 0xb8, 0x78, 0xa9, 0x65, 0x3e, 0x6e, 0x44, 0x6a, + 0x69, 0x3e, 0xe2, 0xa3, 0x71, 0x2d, 0x82, 0x13, 0x34, 0xcb, 0x1c, 0xc4, 0x3f, 0xb4, 0x0e, 0x95, + 0xfe, 0xf3, 0xfe, 0xa0, 0xb3, 0x6f, 0xec, 0x1f, 0xec, 0x74, 0x64, 0xb9, 0x54, 0xbf, 0x83, 0x05, + 0xa9, 0xb0, 0xf6, 0xc1, 0xc1, 0xa0, 0xb5, 0x67, 0x0c, 0xba, 0xed, 0xa7, 0x7d, 0x2d, 0x87, 0xb6, + 0x60, 0x63, 0xb0, 0x8b, 0x0f, 0x06, 0x83, 0xbd, 0xce, 0x8e, 0x71, 0xd8, 0xc1, 0xdd, 0x83, 0x9d, + 0xbe, 0xa6, 0x22, 0x04, 0xb5, 0x29, 0x7b, 0xd0, 0xdd, 0xef, 0x68, 0x79, 0x54, 0x81, 0xd5, 0xc3, + 0x0e, 0x6e, 0x77, 0x7a, 0x03, 0xad, 0xa0, 0xff, 0x42, 0x85, 0x4a, 0xc2, 0x8a, 0xcc, 0x91, 0x03, + 0x2a, 0xce, 0x35, 0x79, 0xcc, 0xfe, 0xf2, 0xe7, 0x5d, 0xd3, 0x3a, 0x11, 0xd6, 0xc9, 0x63, 0x41, + 0xf0, 0xb3, 0x8c, 0x79, 0x9e, 0x58, 0xe7, 0x79, 0x5c, 0x1a, 0x9b, 0xe7, 0x02, 0xe4, 0x43, 0xa8, + 0x9e, 0x92, 0xc0, 0x25, 0x23, 0xd9, 0x2e, 0x2c, 0x52, 0x11, 0x3c, 0xd1, 0xe5, 0x16, 0x68, 0xb2, + 0xcb, 0x14, 0x46, 0x98, 0xa3, 0x26, 0xf8, 0xfb, 0x11, 0xd8, 0x26, 0x14, 0x44, 0xf3, 0xaa, 0x98, + 0x9f, 0x13, 0x6c, 0x9b, 0xa2, 0xaf, 0x4d, 0x9f, 0xe7, 0x90, 0x79, 0xcc, 0xff, 0xa3, 0xe3, 0x59, + 0xfb, 0x14, 0xb9, 0x7d, 0xee, 0x2e, 0xee, 0xce, 0x6f, 0x32, 0xd1, 0x49, 0x6c, 0xa2, 0x55, 0x50, + 0x71, 0x54, 0x63, 0xd4, 0x6e, 0xb5, 0x77, 0x99, 0x59, 0xd6, 0xa0, 0xbc, 0xdf, 0xfa, 0x89, 0x71, + 0xd4, 0x17, 0x37, 0xf8, 0x1a, 0x54, 0x9f, 0x76, 0x70, 0xaf, 0xb3, 0x27, 0x39, 0x2a, 0xda, 0x04, + 0x4d, 0x72, 0xa6, 0xfd, 0xf2, 0x0c, 0x41, 0xfc, 0x2d, 0xa0, 0x12, 0xe4, 0xfb, 0xcf, 0x5a, 0x87, + 0x5a, 0x51, 0xff, 0xef, 0x1c, 0xac, 0x8b, 0x6d, 0x21, 0xae, 0x86, 0x78, 0xf3, 0x6b, 0x70, 0xf2, + 0x16, 0x2b, 0x97, 0xbe, 0xc5, 0x8a, 0x92, 0x50, 0xbe, 0xab, 0xab, 0xd3, 0x24, 0x94, 0xdf, 0xec, + 0xa4, 0x22, 0x7e, 0x7e, 0x91, 0x88, 0x5f, 0x87, 0xd5, 0x31, 0xa1, 0xb1, 0xdd, 0xca, 0x38, 0x22, + 0x91, 0x03, 0x15, 0xd3, 0x75, 0xbd, 0xd0, 0x14, 0x57, 0xc3, 0xc5, 0x85, 0x36, 0xc3, 0x4b, 0x5f, + 0xdc, 0x6c, 0x4d, 0x91, 0x44, 0x60, 0x4e, 0x62, 0x37, 0x7e, 0x0c, 0xda, 0xe5, 0x0e, 0x8b, 0x6c, + 0x87, 0xdf, 0xfb, 0xc1, 0x74, 0x37, 0x24, 0x6c, 0x5d, 0xc8, 0x37, 0x15, 0x6d, 0x85, 0x11, 0xf8, + 0xa8, 0xd7, 0xeb, 0xf6, 0x1e, 0x6b, 0x0a, 0x02, 0x28, 0x76, 0x7e, 0xd2, 0x1d, 0x74, 0x76, 0xb4, + 0xdc, 0xf6, 0x2f, 0x37, 0xa0, 0x28, 0x84, 0x44, 0xdf, 0xc8, 0x4c, 0x20, 0x59, 0x69, 0x8b, 0x7e, + 0xbc, 0x70, 0x46, 0x9d, 0xaa, 0xde, 0x6d, 0x3c, 0x58, 0x7a, 0xbc, 0x7c, 0xd9, 0x5c, 0x41, 0x7f, + 0xa3, 0x40, 0x35, 0xf5, 0xaa, 0x99, 0xf5, 0x6a, 0x7c, 0x4e, 0x61, 0x6f, 0xe3, 0x47, 0x4b, 0x8d, + 0x8d, 0x65, 0xf9, 0xb9, 0x02, 0x95, 0x44, 0x49, 0x2b, 0xba, 0xbb, 0x4c, 0x19, 0xac, 0x90, 0xe4, + 0xde, 0xf2, 0x15, 0xb4, 0xfa, 0xca, 0xa7, 0x0a, 0xfa, 0x6b, 0x05, 0x2a, 0x89, 0xe2, 0xce, 0xcc, + 0xa2, 0xcc, 0x96, 0xa2, 0x66, 0x16, 0x65, 0x5e, 0x2d, 0xe9, 0x0a, 0xfa, 0x4b, 0x05, 0xca, 0x71, + 0xa1, 0x26, 0xba, 0xb3, 0x78, 0x69, 0xa7, 0x10, 0xe2, 0xf3, 0x65, 0x6b, 0x42, 0xf5, 0x15, 0xf4, + 0xe7, 0x50, 0x8a, 0xaa, 0x1a, 0x51, 0xd6, 0xdd, 0xeb, 0x52, 0xc9, 0x64, 0xe3, 0xce, 0xc2, 0xe3, + 0x92, 0xd3, 0x47, 0xa5, 0x86, 0x99, 0xa7, 0xbf, 0x54, 0x14, 0xd9, 0xb8, 0xb3, 0xf0, 0xb8, 0x78, + 0x7a, 0xe6, 0x09, 0x89, 0x8a, 0xc4, 0xcc, 0x9e, 0x30, 0x5b, 0x0a, 0x99, 0xd9, 0x13, 0xe6, 0x15, + 0x40, 0x0a, 0x41, 0x12, 0x35, 0x8d, 0x99, 0x05, 0x99, 0xad, 0x9b, 0xcc, 0x2c, 0xc8, 0x9c, 0x12, + 0x4a, 0x7d, 0x05, 0xfd, 0x4c, 0x49, 0x9e, 0x0b, 0xee, 0x2c, 0x5c, 0xba, 0xb7, 0xa0, 0x4b, 0xce, + 0x14, 0x0f, 0xf2, 0x05, 0xfa, 0x33, 0x79, 0x8b, 0x21, 0x2a, 0xff, 0xd0, 0x22, 0x60, 0xa9, 0x62, + 0xc1, 0xc6, 0x67, 0xcb, 0x6d, 0x36, 0x5c, 0x88, 0xbf, 0x52, 0x00, 0xa6, 0x35, 0x82, 0x99, 0x85, + 0x98, 0x29, 0x4e, 0x6c, 0xdc, 0x5d, 0x62, 0x64, 0x72, 0x81, 0x44, 0x35, 0x4c, 0x99, 0x17, 0xc8, + 0xa5, 0x1a, 0xc6, 0xcc, 0x0b, 0xe4, 0x72, 0xfd, 0xa1, 0xbe, 0x82, 0xfe, 0x49, 0x81, 0x8d, 0x99, + 0x1a, 0x2a, 0xf4, 0xe0, 0x8a, 0x65, 0x74, 0x8d, 0x2f, 0x97, 0x07, 0x88, 0x44, 0xbb, 0xa5, 0x7c, + 0xaa, 0xa0, 0xbf, 0x55, 0x60, 0x2d, 0x5d, 0x5b, 0x92, 0x79, 0x97, 0x9a, 0x53, 0x8d, 0xd5, 0xb8, + 0xbf, 0xdc, 0xe0, 0x58, 0x5b, 0x7f, 0xaf, 0x40, 0x2d, 0x5d, 0x66, 0x84, 0xee, 0x2f, 0x16, 0x16, + 0x2e, 0x09, 0xf4, 0xc5, 0x92, 0xa3, 0x23, 0x89, 0x1e, 0xae, 0xfe, 0x51, 0x41, 0x64, 0x6f, 0x45, + 0xfe, 0xf3, 0xc3, 0xdf, 0x04, 0x00, 0x00, 0xff, 0xff, 0x51, 0x2f, 0x79, 0xbc, 0x10, 0x35, 0x00, + 0x00, } // Reference imports to suppress errors if they are not otherwise used. diff --git a/plugins/drivers/proto/driver.proto b/plugins/drivers/proto/driver.proto index 1a8ca941a36..707aeee9444 100644 --- a/plugins/drivers/proto/driver.proto +++ b/plugins/drivers/proto/driver.proto @@ -390,9 +390,9 @@ message DriverCapabilities { // MountConfigs indicates whether the driver supports mount configurations. MountConfigs mount_configs = 6; - // previous remote_tasks field no longer used by nomad clients - reserved 7; - reserved "remote_tasks"; + // remote_tasks indicates whether the driver executes tasks remotely such + // on cloud runtimes like AWS ECS. + bool remote_tasks = 7 [deprecated=true]; // disable_log_collection indicates whether the driver has the capability of // disabling log collection diff --git a/plugins/drivers/server.go b/plugins/drivers/server.go index 1bd105e81e2..36a9d96dc5b 100644 --- a/plugins/drivers/server.go +++ b/plugins/drivers/server.go @@ -49,6 +49,7 @@ func (b *driverPluginServer) Capabilities(ctx context.Context, req *proto.Capabi Exec: caps.Exec, MustCreateNetwork: caps.MustInitiateNetwork, NetworkIsolationModes: []proto.NetworkIsolationSpec_NetworkIsolationMode{}, + RemoteTasks: caps.RemoteTasks, DynamicWorkloadUsers: caps.DynamicWorkloadUsers, }, } diff --git a/plugins/drivers/task_handle.go b/plugins/drivers/task_handle.go index 488134ae7f3..701fb12a8a4 100644 --- a/plugins/drivers/task_handle.go +++ b/plugins/drivers/task_handle.go @@ -4,6 +4,7 @@ package drivers import ( + "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/plugins/base" ) @@ -48,3 +49,34 @@ func (h *TaskHandle) Copy() *TaskHandle { copy(handle.DriverState, h.DriverState) return handle } + +// Store this TaskHandle on the given TaskState. +func (h *TaskHandle) Store(ts *structs.TaskState) { + if h == nil || len(h.DriverState) == 0 { + // No handle or state, clear existing state + ts.TaskHandle = nil + return + } + + ds := make([]byte, len(h.DriverState)) + copy(ds, h.DriverState) + ts.TaskHandle = &structs.TaskHandle{ + Version: h.Version, + DriverState: ds, + } +} + +// NewTaskHandleFromState returns the TaskHandle stored in a TaskState or nil +// if no handle was stored. +func NewTaskHandleFromState(ts *structs.TaskState) *TaskHandle { + if ts.TaskHandle == nil { + return nil + } + + th := TaskHandle{ + Version: ts.TaskHandle.Version, + DriverState: make([]byte, len(ts.TaskHandle.DriverState)), + } + copy(th.DriverState, ts.TaskHandle.DriverState) + return &th +} diff --git a/scheduler/annotate.go b/scheduler/annotate.go index 4f50a7d8714..04187fd2c4a 100644 --- a/scheduler/annotate.go +++ b/scheduler/annotate.go @@ -182,13 +182,13 @@ FieldsLoop: } // Object changes that can be done in-place are log configs, services, - // constraints, affinity or spread. + // constraints. if !destructive { ObjectsLoop: for _, oDiff := range diff.Objects { switch oDiff.Name { - case "Service", "Constraint", "Affinity", "Spread": + case "Service", "Constraint": continue case "LogConfig": for _, fDiff := range oDiff.Fields { diff --git a/scheduler/annotate_test.go b/scheduler/annotate_test.go index 09d8880b659..35cf2f61b8d 100644 --- a/scheduler/annotate_test.go +++ b/scheduler/annotate_test.go @@ -309,48 +309,6 @@ func TestAnnotateTask(t *testing.T) { Parent: &structs.TaskGroupDiff{Type: structs.DiffTypeEdited}, Desired: AnnotationForcesInplaceUpdate, }, - { - Diff: &structs.TaskDiff{ - Type: structs.DiffTypeEdited, - Objects: []*structs.ObjectDiff{ - { - Type: structs.DiffTypeAdded, - Name: "Affinity", - Fields: []*structs.FieldDiff{ - { - Type: structs.DiffTypeAdded, - Name: "LTarget", - Old: "", - New: "baz", - }, - }, - }, - }, - }, - Parent: &structs.TaskGroupDiff{Type: structs.DiffTypeEdited}, - Desired: AnnotationForcesInplaceUpdate, - }, - { - Diff: &structs.TaskDiff{ - Type: structs.DiffTypeEdited, - Objects: []*structs.ObjectDiff{ - { - Type: structs.DiffTypeAdded, - Name: "Spread", - Fields: []*structs.FieldDiff{ - { - Type: structs.DiffTypeAdded, - Name: "LTarget", - Old: "", - New: "baz", - }, - }, - }, - }, - }, - Parent: &structs.TaskGroupDiff{Type: structs.DiffTypeEdited}, - Desired: AnnotationForcesInplaceUpdate, - }, { Diff: &structs.TaskDiff{ Type: structs.DiffTypeEdited, diff --git a/scheduler/context.go b/scheduler/context.go index e48cefc3918..887607cf3be 100644 --- a/scheduler/context.go +++ b/scheduler/context.go @@ -51,13 +51,6 @@ type Context interface { SendEvent(event interface{}) } -type ConstraintContext interface { - Metrics() *structs.AllocMetric - RegexpCache() map[string]*regexp.Regexp - VersionConstraintCache() map[string]VerConstraints - SemverConstraintCache() map[string]VerConstraints -} - // EvalCache is used to cache certain things during an evaluation type EvalCache struct { reCache map[string]*regexp.Regexp diff --git a/scheduler/feasible.go b/scheduler/feasible.go index 9b655880103..9ff3878baac 100644 --- a/scheduler/feasible.go +++ b/scheduler/feasible.go @@ -8,7 +8,6 @@ import ( "fmt" "reflect" "regexp" - "slices" "strconv" "strings" @@ -138,53 +137,40 @@ func NewRandomIterator(ctx Context, nodes []*structs.Node) *StaticIterator { // HostVolumeChecker is a FeasibilityChecker which returns whether a node has // the host volumes necessary to schedule a task group. type HostVolumeChecker struct { - ctx Context - volumeReqs []*structs.VolumeRequest - namespace string - jobID string - taskGroupName string - claims []*structs.TaskGroupHostVolumeClaim + ctx Context + + // volumes is a map[HostVolumeName][]RequestedVolume. The requested volumes are + // a slice because a single task group may request the same volume multiple times. + volumes map[string][]*structs.VolumeRequest } // NewHostVolumeChecker creates a HostVolumeChecker from a set of volumes func NewHostVolumeChecker(ctx Context) *HostVolumeChecker { - hostVolumeChecker := &HostVolumeChecker{ - ctx: ctx, - claims: []*structs.TaskGroupHostVolumeClaim{}, - volumeReqs: []*structs.VolumeRequest{}, + return &HostVolumeChecker{ + ctx: ctx, } - - return hostVolumeChecker } // SetVolumes takes the volumes required by a task group and updates the checker. -func (h *HostVolumeChecker) SetVolumes(allocName, ns, jobID, taskGroupName string, volumes map[string]*structs.VolumeRequest) { - h.namespace = ns - h.jobID = jobID - h.taskGroupName = taskGroupName - h.volumeReqs = []*structs.VolumeRequest{} - - storedClaims, _ := h.ctx.State().GetTaskGroupHostVolumeClaimsForTaskGroup(nil, ns, jobID, taskGroupName) - - for raw := storedClaims.Next(); raw != nil; raw = storedClaims.Next() { - claim := raw.(*structs.TaskGroupHostVolumeClaim) - h.claims = append(h.claims, claim) - } - +func (h *HostVolumeChecker) SetVolumes(allocName string, volumes map[string]*structs.VolumeRequest) { + lookupMap := make(map[string][]*structs.VolumeRequest) + // Convert the map from map[DesiredName]Request to map[Source][]Request to improve + // lookup performance. Also filter non-host volumes. for _, req := range volumes { if req.Type != structs.VolumeTypeHost { - continue // filter CSI volumes + continue } if req.PerAlloc { // provide a unique volume source per allocation copied := req.Copy() copied.Source = copied.Source + structs.AllocSuffix(allocName) - h.volumeReqs = append(h.volumeReqs, copied) + lookupMap[copied.Source] = append(lookupMap[copied.Source], copied) } else { - h.volumeReqs = append(h.volumeReqs, req) + lookupMap[req.Source] = append(lookupMap[req.Source], req) } } + h.volumes = lookupMap } func (h *HostVolumeChecker) Feasible(candidate *structs.Node) bool { @@ -197,150 +183,38 @@ func (h *HostVolumeChecker) Feasible(candidate *structs.Node) bool { } func (h *HostVolumeChecker) hasVolumes(n *structs.Node) bool { + rLen := len(h.volumes) + hLen := len(n.HostVolumes) + // Fast path: Requested no volumes. No need to check further. - if len(h.volumeReqs) == 0 { + if rLen == 0 { return true } - proposed, err := h.ctx.ProposedAllocs(n.ID) - if err != nil { - return false // only hit this on state store invariant failure + // Fast path: Requesting more volumes than the node has, can't meet the criteria. + if rLen > hLen { + return false } - for _, req := range h.volumeReqs { - volCfg, ok := n.HostVolumes[req.Source] + for source, requests := range h.volumes { + nodeVolume, ok := n.HostVolumes[source] if !ok { return false } - if volCfg.ID != "" { // dynamic host volume - vol, err := h.ctx.State().HostVolumeByID(nil, h.namespace, volCfg.ID, false) - if err != nil || vol == nil { - // the dynamic host volume in the node fingerprint does not - // belong to the namespace we need. or the volume is no longer - // in the state store because the batched fingerprint update - // from a delete RPC is written before the delete RPC's raft - // entry completes - return false - } - if !h.hostVolumeIsAvailable(vol, - req.AccessMode, - req.AttachmentMode, - req.ReadOnly, - proposed, - ) { - return false - } - - if req.Sticky { - // the node is feasible if there are no remaining claims to - // fulfill or if there's an exact match - if len(h.claims) == 0 { - return true - } - - for _, c := range h.claims { - if c.VolumeID == vol.ID { - // if we have a match for a volume claim, delete this - // claim from the claims list in the feasibility - // checker. This is needed for situations when jobs get - // scaled up and new allocations need to be placed on - // the same node. - h.claims = slices.DeleteFunc(h.claims, func(c *structs.TaskGroupHostVolumeClaim) bool { - return c.VolumeID == vol.ID - }) - return true - } - } - return false - } - } else if !req.ReadOnly { - // this is a static host volume and can only be mounted ReadOnly, - // validate that no requests for it are ReadWrite. - if volCfg.ReadOnly { - return false - } - } - } - - return true -} - -// hostVolumeIsAvailable determines if a dynamic host volume is available for a request -func (h *HostVolumeChecker) hostVolumeIsAvailable( - vol *structs.HostVolume, - reqAccess structs.VolumeAccessMode, - reqAttach structs.VolumeAttachmentMode, - readOnly bool, - proposed []*structs.Allocation) bool { - - if vol.State != structs.HostVolumeStateReady { - return false - } - - // pick a default capability based on the read-only flag. this happens here - // in the scheduler rather than job submit because we don't know whether a - // host volume is dynamic or not until we try to schedule it (ex. the same - // name could be static on one node and dynamic on another) - if reqAccess == structs.HostVolumeAccessModeUnknown { - if readOnly { - reqAccess = structs.HostVolumeAccessModeSingleNodeReader - } else { - reqAccess = structs.HostVolumeAccessModeSingleNodeWriter - } - } - if reqAttach == structs.HostVolumeAttachmentModeUnknown { - reqAttach = structs.HostVolumeAttachmentModeFilesystem - } - - // check that the volume has the requested capability at all - var capOk bool - for _, cap := range vol.RequestedCapabilities { - if reqAccess == cap.AccessMode && - reqAttach == cap.AttachmentMode { - capOk = true - break + // If the volume supports being mounted as ReadWrite, we do not need to + // do further validation for readonly placement. + if !nodeVolume.ReadOnly { + continue } - } - if !capOk { - return false - } - switch reqAccess { - case structs.HostVolumeAccessModeSingleNodeReader: - return readOnly - case structs.HostVolumeAccessModeSingleNodeWriter: - return !readOnly - case structs.HostVolumeAccessModeSingleNodeSingleWriter: - // examine all proposed allocs on the node, including those that might - // not have yet been persisted. they have nil pointers to their Job, so - // we have to go back to the state store to get them - seen := map[string]struct{}{} - for _, alloc := range proposed { - uniqueGroup := alloc.JobNamespacedID().String() + alloc.TaskGroup - if _, ok := seen[uniqueGroup]; ok { - // all allocs for the same group will have the same read-only - // flag and capabilities, so we only need to check a given group - // once - continue - } - seen[uniqueGroup] = struct{}{} - job, err := h.ctx.State().JobByID(nil, alloc.Namespace, alloc.JobID) - if err != nil { + // The Volume can only be mounted ReadOnly, validate that no requests for + // it are ReadWrite. + for _, req := range requests { + if !req.ReadOnly { return false } - tg := job.LookupTaskGroup(alloc.TaskGroup) - for _, req := range tg.Volumes { - if req.Type == structs.VolumeTypeHost && req.Source == vol.Name { - if !req.ReadOnly { - return false - } - } - } } - - case structs.HostVolumeAccessModeSingleNodeMultiWriter: - // no contraint } return true @@ -878,12 +752,12 @@ func (iter *DistinctPropertyIterator) Reset() { // given set of constraints. This is used to filter on job, task group, and task // constraints. type ConstraintChecker struct { - ctx ConstraintContext + ctx Context constraints []*structs.Constraint } // NewConstraintChecker creates a ConstraintChecker for a set of constraints -func NewConstraintChecker(ctx ConstraintContext, constraints []*structs.Constraint) *ConstraintChecker { +func NewConstraintChecker(ctx Context, constraints []*structs.Constraint) *ConstraintChecker { return &ConstraintChecker{ ctx: ctx, constraints: constraints, @@ -956,7 +830,7 @@ func resolveTarget(target string, node *structs.Node) (string, bool) { // checkConstraint checks if a constraint is satisfied. The lVal and rVal // interfaces may be nil. -func checkConstraint(ctx ConstraintContext, operand string, lVal, rVal interface{}, lFound, rFound bool) bool { +func checkConstraint(ctx Context, operand string, lVal, rVal interface{}, lFound, rFound bool) bool { // Check for constraints not handled by this checker. switch operand { case structs.ConstraintDistinctHosts, structs.ConstraintDistinctProperty: @@ -978,14 +852,14 @@ func checkConstraint(ctx ConstraintContext, operand string, lVal, rVal interface return !lFound case structs.ConstraintVersion: parser := newVersionConstraintParser(ctx) - return lFound && rFound && checkVersionMatch(parser, lVal, rVal) + return lFound && rFound && checkVersionMatch(ctx, parser, lVal, rVal) case structs.ConstraintSemver: parser := newSemverConstraintParser(ctx) - return lFound && rFound && checkVersionMatch(parser, lVal, rVal) + return lFound && rFound && checkVersionMatch(ctx, parser, lVal, rVal) case structs.ConstraintRegex: return lFound && rFound && checkRegexpMatch(ctx, lVal, rVal) case structs.ConstraintSetContains, structs.ConstraintSetContainsAll: - return lFound && rFound && checkSetContainsAll(lVal, rVal) + return lFound && rFound && checkSetContainsAll(ctx, lVal, rVal) case structs.ConstraintSetContainsAny: return lFound && rFound && checkSetContainsAny(lVal, rVal) default: @@ -1069,7 +943,7 @@ func compareOrder[T cmp.Ordered](op string, left, right T) bool { // checkVersionMatch is used to compare a version on the // left hand side with a set of constraints on the right hand side -func checkVersionMatch(parse verConstraintParser, lVal, rVal interface{}) bool { +func checkVersionMatch(_ Context, parse verConstraintParser, lVal, rVal interface{}) bool { // Parse the version var versionStr string switch v := lVal.(type) { @@ -1105,7 +979,7 @@ func checkVersionMatch(parse verConstraintParser, lVal, rVal interface{}) bool { // checkAttributeVersionMatch is used to compare a version on the // left hand side with a set of constraints on the right hand side -func checkAttributeVersionMatch(parse verConstraintParser, lVal, rVal *psstructs.Attribute) bool { +func checkAttributeVersionMatch(_ Context, parse verConstraintParser, lVal, rVal *psstructs.Attribute) bool { // Parse the version var versionStr string if s, ok := lVal.GetString(); ok { @@ -1140,7 +1014,7 @@ func checkAttributeVersionMatch(parse verConstraintParser, lVal, rVal *psstructs // checkRegexpMatch is used to compare a value on the // left hand side with a regexp on the right hand side -func checkRegexpMatch(ctx ConstraintContext, lVal, rVal interface{}) bool { +func checkRegexpMatch(ctx Context, lVal, rVal interface{}) bool { // Ensure left-hand is string lStr, ok := lVal.(string) if !ok { @@ -1173,7 +1047,7 @@ func checkRegexpMatch(ctx ConstraintContext, lVal, rVal interface{}) bool { // checkSetContainsAll is used to see if the left hand side contains the // string on the right hand side -func checkSetContainsAll(lVal, rVal interface{}) bool { +func checkSetContainsAll(_ Context, lVal, rVal interface{}) bool { // Ensure left-hand is string lStr, ok := lVal.(string) if !ok { @@ -1550,7 +1424,7 @@ func resolveDeviceTarget(target string, d *structs.NodeDeviceResource) (*psstruc // checkAttributeConstraint checks if a constraint is satisfied. nil equality // comparisons are considered to be false. -func checkAttributeConstraint(ctx ConstraintContext, operand string, lVal, rVal *psstructs.Attribute, lFound, rFound bool) bool { +func checkAttributeConstraint(ctx Context, operand string, lVal, rVal *psstructs.Attribute, lFound, rFound bool) bool { // Check for constraints not handled by this checker. switch operand { case structs.ConstraintDistinctHosts, structs.ConstraintDistinctProperty: @@ -1610,7 +1484,7 @@ func checkAttributeConstraint(ctx ConstraintContext, operand string, lVal, rVal } parser := newVersionConstraintParser(ctx) - return checkAttributeVersionMatch(parser, lVal, rVal) + return checkAttributeVersionMatch(ctx, parser, lVal, rVal) case structs.ConstraintSemver: if !(lFound && rFound) { @@ -1618,7 +1492,7 @@ func checkAttributeConstraint(ctx ConstraintContext, operand string, lVal, rVal } parser := newSemverConstraintParser(ctx) - return checkAttributeVersionMatch(parser, lVal, rVal) + return checkAttributeVersionMatch(ctx, parser, lVal, rVal) case structs.ConstraintRegex: if !(lFound && rFound) { @@ -1642,7 +1516,7 @@ func checkAttributeConstraint(ctx ConstraintContext, operand string, lVal, rVal return false } - return checkSetContainsAll(ls, rs) + return checkSetContainsAll(ctx, ls, rs) case structs.ConstraintSetContainsAny: if !(lFound && rFound) { return false @@ -1676,7 +1550,7 @@ type VerConstraints interface { // or semver). type verConstraintParser func(verConstraint string) VerConstraints -func newVersionConstraintParser(ctx ConstraintContext) verConstraintParser { +func newVersionConstraintParser(ctx Context) verConstraintParser { cache := ctx.VersionConstraintCache() return func(cstr string) VerConstraints { @@ -1694,7 +1568,7 @@ func newVersionConstraintParser(ctx ConstraintContext) verConstraintParser { } } -func newSemverConstraintParser(ctx ConstraintContext) verConstraintParser { +func newSemverConstraintParser(ctx Context) verConstraintParser { cache := ctx.SemverConstraintCache() return func(cstr string) VerConstraints { diff --git a/scheduler/feasible_test.go b/scheduler/feasible_test.go index bd495676875..f552b70c9f3 100644 --- a/scheduler/feasible_test.go +++ b/scheduler/feasible_test.go @@ -91,7 +91,7 @@ func TestRandomIterator(t *testing.T) { } } -func TestHostVolumeChecker_Static(t *testing.T) { +func TestHostVolumeChecker(t *testing.T) { ci.Parallel(t) _, ctx := testContext(t) @@ -176,68 +176,21 @@ func TestHostVolumeChecker_Static(t *testing.T) { alloc := mock.Alloc() alloc.NodeID = nodes[2].ID - job := mock.Job() - taskGroup := job.TaskGroups[0] - for i, c := range cases { - checker.SetVolumes(alloc.Name, structs.DefaultNamespace, job.ID, taskGroup.Name, c.RequestedVolumes) + checker.SetVolumes(alloc.Name, c.RequestedVolumes) if act := checker.Feasible(c.Node); act != c.Result { t.Fatalf("case(%d) failed: got %v; want %v", i, act, c.Result) } } } -func TestHostVolumeChecker_Dynamic(t *testing.T) { +func TestHostVolumeChecker_ReadOnly(t *testing.T) { ci.Parallel(t) - store, ctx := testContext(t) - + _, ctx := testContext(t) nodes := []*structs.Node{ mock.Node(), mock.Node(), - mock.Node(), - mock.Node(), - mock.Node(), - } - - hostVolCapsReadWrite := []*structs.HostVolumeCapability{ - { - AttachmentMode: structs.HostVolumeAttachmentModeFilesystem, - AccessMode: structs.HostVolumeAccessModeSingleNodeReader, - }, - { - AttachmentMode: structs.HostVolumeAttachmentModeFilesystem, - AccessMode: structs.HostVolumeAccessModeSingleNodeWriter, - }, - } - hostVolCapsReadOnly := []*structs.HostVolumeCapability{{ - AttachmentMode: structs.HostVolumeAttachmentModeFilesystem, - AccessMode: structs.HostVolumeAccessModeSingleNodeReader, - }} - - dhvNotReady := &structs.HostVolume{ - Namespace: structs.DefaultNamespace, - ID: uuid.Generate(), - Name: "foo", - NodeID: nodes[2].ID, - RequestedCapabilities: hostVolCapsReadOnly, - State: structs.HostVolumeStateUnavailable, - } - dhvReadOnly := &structs.HostVolume{ - Namespace: structs.DefaultNamespace, - ID: uuid.Generate(), - Name: "foo", - NodeID: nodes[3].ID, - RequestedCapabilities: hostVolCapsReadOnly, - State: structs.HostVolumeStateReady, - } - dhvReadWrite := &structs.HostVolume{ - Namespace: structs.DefaultNamespace, - ID: uuid.Generate(), - Name: "foo", - NodeID: nodes[4].ID, - RequestedCapabilities: hostVolCapsReadWrite, - State: structs.HostVolumeStateReady, } nodes[0].HostVolumes = map[string]*structs.ClientHostVolumeConfig{ @@ -250,25 +203,6 @@ func TestHostVolumeChecker_Dynamic(t *testing.T) { ReadOnly: false, }, } - nodes[2].HostVolumes = map[string]*structs.ClientHostVolumeConfig{} - nodes[3].HostVolumes = map[string]*structs.ClientHostVolumeConfig{ - "foo": {ID: dhvReadOnly.ID}, - } - nodes[4].HostVolumes = map[string]*structs.ClientHostVolumeConfig{ - "foo": {ID: dhvReadWrite.ID}, - } - - for _, node := range nodes { - must.NoError(t, store.UpsertNode(structs.MsgTypeTestSetup, 1000, node)) - } - - must.NoError(t, store.UpsertHostVolume(1000, dhvNotReady)) - must.NoError(t, store.UpsertHostVolume(1000, dhvReadOnly)) - must.NoError(t, store.UpsertHostVolume(1000, dhvReadWrite)) - - // reinsert unavailable node to set the correct state on the unavailable - // volume - must.NoError(t, store.UpsertNode(structs.MsgTypeTestSetup, 1000, nodes[2])) readwriteRequest := map[string]*structs.VolumeRequest{ "foo": { @@ -285,390 +219,43 @@ func TestHostVolumeChecker_Dynamic(t *testing.T) { }, } - dhvReadOnlyRequest := map[string]*structs.VolumeRequest{ - "foo": { - Type: "host", - Source: "foo", - ReadOnly: true, - AccessMode: structs.CSIVolumeAccessModeSingleNodeReader, - AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem, - }, - } - dhvReadWriteRequest := map[string]*structs.VolumeRequest{ - "foo": { - Type: "host", - Source: "foo", - AccessMode: structs.CSIVolumeAccessModeSingleNodeWriter, - AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem, - }, - } - checker := NewHostVolumeChecker(ctx) cases := []struct { - name string - node *structs.Node - requestedVolumes map[string]*structs.VolumeRequest - expect bool + Node *structs.Node + RequestedVolumes map[string]*structs.VolumeRequest + Result bool }{ - { - name: "read-write request / read-only host", - node: nodes[0], - requestedVolumes: readwriteRequest, - expect: false, - }, - { - name: "read-only request / read-only host", - node: nodes[0], - requestedVolumes: readonlyRequest, - expect: true, - }, - { - name: "read-only request / read-write host", - node: nodes[1], - requestedVolumes: readonlyRequest, - expect: true, - }, - { - name: "read-write request / read-write host", - node: nodes[1], - requestedVolumes: readwriteRequest, - expect: true, - }, - { - name: "dynamic single-reader request / host not ready", - node: nodes[2], - requestedVolumes: dhvReadOnlyRequest, - expect: false, + { // ReadWrite Request, ReadOnly Host + Node: nodes[0], + RequestedVolumes: readwriteRequest, + Result: false, }, - { - name: "dynamic single-reader request / caps match", - node: nodes[3], - requestedVolumes: dhvReadOnlyRequest, - expect: true, + { // ReadOnly Request, ReadOnly Host + Node: nodes[0], + RequestedVolumes: readonlyRequest, + Result: true, }, - { - name: "dynamic single-reader request / no matching cap", - node: nodes[4], - requestedVolumes: dhvReadOnlyRequest, - expect: true, + { // ReadOnly Request, ReadWrite Host + Node: nodes[1], + RequestedVolumes: readonlyRequest, + Result: true, }, - { - name: "dynamic single-writer request / caps match", - node: nodes[4], - requestedVolumes: dhvReadWriteRequest, - expect: true, + { // ReadWrite Request, ReadWrite Host + Node: nodes[1], + RequestedVolumes: readwriteRequest, + Result: true, }, } alloc := mock.Alloc() alloc.NodeID = nodes[1].ID - job := mock.Job() - taskGroupName := job.TaskGroups[0].Name - - for _, tc := range cases { - t.Run(tc.name, func(t *testing.T) { - checker.SetVolumes(alloc.Name, structs.DefaultNamespace, job.ID, taskGroupName, tc.requestedVolumes) - actual := checker.Feasible(tc.node) - must.Eq(t, tc.expect, actual) - }) - } -} - -func TestHostVolumeChecker_Sticky(t *testing.T) { - ci.Parallel(t) - - store, ctx := testContext(t) - - nodes := []*structs.Node{ - mock.Node(), - mock.Node(), - mock.Node(), - } - - hostVolCapsReadWrite := []*structs.HostVolumeCapability{ - { - AttachmentMode: structs.HostVolumeAttachmentModeFilesystem, - AccessMode: structs.HostVolumeAccessModeSingleNodeReader, - }, - { - AttachmentMode: structs.HostVolumeAttachmentModeFilesystem, - AccessMode: structs.HostVolumeAccessModeSingleNodeWriter, - }, - } - - dhv1 := &structs.HostVolume{ - Namespace: structs.DefaultNamespace, - ID: uuid.Generate(), - Name: "foo", - NodeID: nodes[1].ID, - RequestedCapabilities: hostVolCapsReadWrite, - State: structs.HostVolumeStateReady, - } - dhv2 := &structs.HostVolume{ - Namespace: structs.DefaultNamespace, - ID: uuid.Generate(), - Name: "foo", - NodeID: nodes[2].ID, - RequestedCapabilities: hostVolCapsReadWrite, - State: structs.HostVolumeStateReady, - } - - // node0 doesn't have the desired volume, but both node2 and node2 do - nodes[0].HostVolumes = map[string]*structs.ClientHostVolumeConfig{} - nodes[1].HostVolumes = map[string]*structs.ClientHostVolumeConfig{ - "foo": {ID: dhv1.ID}, - } - nodes[2].HostVolumes = map[string]*structs.ClientHostVolumeConfig{ - "foo": {ID: dhv2.ID}, - } - - for _, node := range nodes { - must.NoError(t, store.UpsertNode(structs.MsgTypeTestSetup, 1000, node)) - } - must.NoError(t, store.UpsertHostVolume(1000, dhv1)) - must.NoError(t, store.UpsertHostVolume(1000, dhv2)) - - stickyRequests := map[string]*structs.VolumeRequest{ - "foo": { - Type: "host", - Source: "foo", - Sticky: true, - AccessMode: structs.CSIVolumeAccessModeSingleNodeWriter, - AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem, - }, - } - stickyJob := mock.Job() - stickyJob.TaskGroups[0].Volumes = stickyRequests - - // claims are only present for node1 - existingClaims := []*structs.TaskGroupHostVolumeClaim{ - { - Namespace: structs.DefaultNamespace, - JobID: stickyJob.ID, - TaskGroupName: stickyJob.TaskGroups[0].Name, - VolumeID: dhv1.ID, - VolumeName: dhv1.Name, - }, - { - Namespace: "foo", // make sure we filter by ns correctly - JobID: stickyJob.ID, - TaskGroupName: stickyJob.TaskGroups[0].Name, - VolumeID: dhv1.ID, - VolumeName: dhv1.Name, - }, - { - Namespace: structs.DefaultNamespace, - JobID: "fooooo", // make sure we filter by jobID correctly - TaskGroupName: stickyJob.TaskGroups[0].Name, - VolumeID: dhv1.ID, - VolumeName: dhv1.Name, - }, - } - - for _, claim := range existingClaims { - must.NoError(t, store.UpsertTaskGroupHostVolumeClaim(structs.MsgTypeTestSetup, 1000, claim)) - } - - cases := []struct { - name string - node *structs.Node - job *structs.Job - expect bool - expectedClaimsInTheVolumeChecker int - }{ - { - "requesting a sticky volume on an infeasible node", - nodes[0], - stickyJob, - false, - 1, - }, - { - "requesting a sticky volume on a feasible node, existing claim", - nodes[1], - stickyJob, - true, - 0, - }, - { - "requesting a sticky volume on a feasible node, new claim", - nodes[1], - mock.Job(), - true, - 0, - }, - { - "requesting a sticky volume on a feasible node, but there is an existing claim for another vol ID on a different node", - nodes[0], - stickyJob, - false, - 1, - }, - { - "requesting a sticky volume on a node that has it, but it's claimed by a different alloc", - nodes[2], - stickyJob, - false, - 1, - }, - } - - for _, tc := range cases { - t.Run(tc.name, func(t *testing.T) { - checker := NewHostVolumeChecker(ctx) - checker.SetVolumes(mock.Alloc().Name, structs.DefaultNamespace, tc.job.ID, tc.job.TaskGroups[0].Name, stickyRequests) - actual := checker.Feasible(tc.node) - must.Eq(t, tc.expect, actual) - must.Eq(t, tc.expectedClaimsInTheVolumeChecker, len(checker.claims)) - }) - } -} - -// TestDynamicHostVolumeIsAvailable provides fine-grained coverage of the -// hostVolumeIsAvailable method -func TestDynamicHostVolumeIsAvailable(t *testing.T) { - - store, ctx := testContext(t) - - allCaps := []*structs.HostVolumeCapability{} - - for _, accessMode := range []structs.VolumeAccessMode{ - structs.HostVolumeAccessModeSingleNodeReader, - structs.HostVolumeAccessModeSingleNodeWriter, - structs.HostVolumeAccessModeSingleNodeSingleWriter, - structs.HostVolumeAccessModeSingleNodeMultiWriter, - } { - for _, attachMode := range []structs.VolumeAttachmentMode{ - structs.HostVolumeAttachmentModeFilesystem, - structs.HostVolumeAttachmentModeBlockDevice, - } { - allCaps = append(allCaps, &structs.HostVolumeCapability{ - AttachmentMode: attachMode, - AccessMode: accessMode, - }) + for i, c := range cases { + checker.SetVolumes(alloc.Name, c.RequestedVolumes) + if act := checker.Feasible(c.Node); act != c.Result { + t.Fatalf("case(%d) failed: got %v; want %v", i, act, c.Result) } } - - jobReader, jobWriter := mock.Job(), mock.Job() - jobReader.TaskGroups[0].Volumes = map[string]*structs.VolumeRequest{ - "example": { - Type: structs.VolumeTypeHost, - Source: "example", - ReadOnly: true, - }, - } - jobWriter.TaskGroups[0].Volumes = map[string]*structs.VolumeRequest{ - "example": { - Type: structs.VolumeTypeHost, - Source: "example", - }, - } - index, _ := store.LatestIndex() - index++ - must.NoError(t, store.UpsertJob(structs.MsgTypeTestSetup, index, nil, jobReader)) - index++ - must.NoError(t, store.UpsertJob(structs.MsgTypeTestSetup, index, nil, jobWriter)) - - allocReader0, allocReader1 := mock.Alloc(), mock.Alloc() - allocReader0.JobID = jobReader.ID - allocReader1.JobID = jobReader.ID - - allocWriter0, allocWriter1 := mock.Alloc(), mock.Alloc() - allocWriter0.JobID = jobWriter.ID - allocWriter1.JobID = jobWriter.ID - - index++ - must.NoError(t, store.UpsertAllocs(structs.MsgTypeTestSetup, index, - []*structs.Allocation{allocReader0, allocReader1, allocWriter0, allocWriter1})) - - testCases := []struct { - name string - hasProposed []*structs.Allocation - hasCaps []*structs.HostVolumeCapability - wantAccess structs.VolumeAccessMode - wantAttach structs.VolumeAttachmentMode - readOnly bool - expect bool - }{ - { - name: "enforce attachment mode", - hasCaps: []*structs.HostVolumeCapability{{ - AttachmentMode: structs.HostVolumeAttachmentModeBlockDevice, - AccessMode: structs.HostVolumeAccessModeSingleNodeSingleWriter, - }}, - wantAttach: structs.HostVolumeAttachmentModeFilesystem, - wantAccess: structs.HostVolumeAccessModeSingleNodeSingleWriter, - expect: false, - }, - { - name: "enforce read only", - hasProposed: []*structs.Allocation{allocReader0, allocReader1}, - wantAttach: structs.HostVolumeAttachmentModeFilesystem, - wantAccess: structs.HostVolumeAccessModeSingleNodeReader, - expect: false, - }, - { - name: "enforce read only ok", - hasProposed: []*structs.Allocation{allocReader0, allocReader1}, - wantAttach: structs.HostVolumeAttachmentModeFilesystem, - wantAccess: structs.HostVolumeAccessModeSingleNodeReader, - readOnly: true, - expect: true, - }, - { - name: "enforce single writer", - hasProposed: []*structs.Allocation{allocReader0, allocReader1, allocWriter0}, - wantAttach: structs.HostVolumeAttachmentModeFilesystem, - wantAccess: structs.HostVolumeAccessModeSingleNodeSingleWriter, - expect: false, - }, - { - name: "enforce single writer ok", - hasProposed: []*structs.Allocation{allocReader0, allocReader1}, - wantAttach: structs.HostVolumeAttachmentModeFilesystem, - wantAccess: structs.HostVolumeAccessModeSingleNodeSingleWriter, - expect: true, - }, - { - name: "multi writer is always ok", - hasProposed: []*structs.Allocation{allocReader0, allocWriter0, allocWriter1}, - wantAttach: structs.HostVolumeAttachmentModeFilesystem, - wantAccess: structs.HostVolumeAccessModeSingleNodeMultiWriter, - expect: true, - }, - { - name: "default capabilities ok", - expect: true, - }, - { - name: "default capabilities fail", - readOnly: true, - hasCaps: []*structs.HostVolumeCapability{{ - AttachmentMode: structs.HostVolumeAttachmentModeBlockDevice, - AccessMode: structs.HostVolumeAccessModeSingleNodeSingleWriter, - }}, - expect: false, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - vol := &structs.HostVolume{ - Name: "example", - State: structs.HostVolumeStateReady, - } - if len(tc.hasCaps) > 0 { - vol.RequestedCapabilities = tc.hasCaps - } else { - vol.RequestedCapabilities = allCaps - } - checker := NewHostVolumeChecker(ctx) - must.Eq(t, tc.expect, checker.hostVolumeIsAvailable( - vol, tc.wantAccess, tc.wantAttach, tc.readOnly, tc.hasProposed)) - }) - } - } func TestCSIVolumeChecker(t *testing.T) { @@ -1676,7 +1263,7 @@ func TestCheckVersionConstraint(t *testing.T) { for _, tc := range cases { _, ctx := testContext(t) p := newVersionConstraintParser(ctx) - if res := checkVersionMatch(p, tc.lVal, tc.rVal); res != tc.result { + if res := checkVersionMatch(ctx, p, tc.lVal, tc.rVal); res != tc.result { t.Fatalf("TC: %#v, Result: %v", tc, res) } } @@ -1758,7 +1345,7 @@ func TestCheckSemverConstraint(t *testing.T) { t.Run(tc.name, func(t *testing.T) { _, ctx := testContext(t) p := newSemverConstraintParser(ctx) - actual := checkVersionMatch(p, tc.lVal, tc.rVal) + actual := checkVersionMatch(ctx, p, tc.lVal, tc.rVal) must.Eq(t, tc.result, actual) }) } diff --git a/scheduler/generic_sched.go b/scheduler/generic_sched.go index e341625cd0f..6eb7177cf06 100644 --- a/scheduler/generic_sched.go +++ b/scheduler/generic_sched.go @@ -689,6 +689,10 @@ func (s *GenericScheduler) computePlacements(destructive, place []placementResul if missing.IsRescheduling() { updateRescheduleTracker(alloc, prevAllocation, now) } + + // If the allocation has task handles, + // copy them to the new allocation + propagateTaskState(alloc, prevAllocation, missing.PreviousLost()) } // If we are placing a canary and we found a match, add the canary @@ -778,6 +782,46 @@ func needsToSetNodes(a, b *structs.Job) bool { a.NodePool != b.NodePool } +// propagateTaskState copies task handles from previous allocations to +// replacement allocations when the previous allocation is being drained or was +// lost. Remote task drivers rely on this to reconnect to remote tasks when the +// allocation managing them changes due to a down or draining node. +// +// The previous allocation will be marked as lost after task state has been +// propagated (when the plan is applied), so its ClientStatus is not yet marked +// as lost. Instead, we use the `prevLost` flag to track whether the previous +// allocation will be marked lost. +func propagateTaskState(newAlloc, prev *structs.Allocation, prevLost bool) { + // Don't transfer state from client terminal allocs + if prev.ClientTerminalStatus() { + return + } + + // If previous allocation is not lost and not draining, do not copy + // task handles. + if !prevLost && !prev.DesiredTransition.ShouldMigrate() { + return + } + + newAlloc.TaskStates = make(map[string]*structs.TaskState, len(newAlloc.AllocatedResources.Tasks)) + for taskName, prevState := range prev.TaskStates { + if prevState.TaskHandle == nil { + // No task handle, skip + continue + } + + if _, ok := newAlloc.AllocatedResources.Tasks[taskName]; !ok { + // Task dropped in update, skip + continue + } + + // Copy state + newState := structs.NewTaskState() + newState.TaskHandle = prevState.TaskHandle.Copy() + newAlloc.TaskStates[taskName] = newState + } +} + // getSelectOptions sets up preferred nodes and penalty nodes func getSelectOptions(prevAllocation *structs.Allocation, preferredNode *structs.Node) *SelectOptions { selectOptions := &SelectOptions{} @@ -867,7 +911,6 @@ func (s *GenericScheduler) findPreferredNode(place placementResult) (*structs.No return preferredNode, nil } } - return nil, nil } diff --git a/scheduler/generic_sched_test.go b/scheduler/generic_sched_test.go index 1b9325c88d6..adda5e2cb2a 100644 --- a/scheduler/generic_sched_test.go +++ b/scheduler/generic_sched_test.go @@ -218,115 +218,6 @@ func TestServiceSched_JobRegister_StickyAllocs(t *testing.T) { } } -func TestServiceSched_JobRegister_StickyHostVolumes(t *testing.T) { - ci.Parallel(t) - - h := NewHarness(t) - - nodes := []*structs.Node{ - mock.Node(), - mock.Node(), - } - - hostVolCapsReadWrite := []*structs.HostVolumeCapability{ - { - AttachmentMode: structs.HostVolumeAttachmentModeFilesystem, - AccessMode: structs.HostVolumeAccessModeSingleNodeReader, - }, - { - AttachmentMode: structs.HostVolumeAttachmentModeFilesystem, - AccessMode: structs.HostVolumeAccessModeSingleNodeWriter, - }, - } - - dhv := &structs.HostVolume{ - Namespace: structs.DefaultNamespace, - ID: uuid.Generate(), - Name: "foo", - NodeID: nodes[1].ID, - RequestedCapabilities: hostVolCapsReadWrite, - State: structs.HostVolumeStateReady, - } - - nodes[0].HostVolumes = map[string]*structs.ClientHostVolumeConfig{} - nodes[1].HostVolumes = map[string]*structs.ClientHostVolumeConfig{"foo": {ID: dhv.ID}} - - for _, node := range nodes { - must.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, 1000, node)) - } - must.NoError(t, h.State.UpsertHostVolume(1000, dhv)) - - stickyRequest := map[string]*structs.VolumeRequest{ - "foo": { - Type: "host", - Source: "foo", - Sticky: true, - AccessMode: structs.CSIVolumeAccessModeSingleNodeWriter, - AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem, - }, - } - - // Create a job - job := mock.Job() - job.TaskGroups[0].Volumes = stickyRequest - must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) - - // Create a mock evaluation to register the job - eval := &structs.Evaluation{ - Namespace: structs.DefaultNamespace, - ID: uuid.Generate(), - Priority: job.Priority, - TriggeredBy: structs.EvalTriggerJobRegister, - JobID: job.ID, - Status: structs.EvalStatusPending, - } - must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) - - // Process the evaluation - must.NoError(t, h.Process(NewServiceScheduler, eval)) - - // Ensure the plan allocated - plan := h.Plans[0] - planned := make(map[string]*structs.Allocation) - for _, allocList := range plan.NodeAllocation { - for _, alloc := range allocList { - planned[alloc.ID] = alloc - } - } - must.MapLen(t, 10, planned) - - // Ensure that the allocations got the host volume ID added - for _, p := range planned { - must.Eq(t, p.PreviousAllocation, "") - } - - // Update the job to force a rolling upgrade - updated := job.Copy() - updated.TaskGroups[0].Tasks[0].Resources.CPU += 10 - must.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, updated)) - - // Create a mock evaluation to handle the update - eval = &structs.Evaluation{ - Namespace: structs.DefaultNamespace, - ID: uuid.Generate(), - Priority: job.Priority, - TriggeredBy: structs.EvalTriggerNodeUpdate, - JobID: job.ID, - Status: structs.EvalStatusPending, - } - must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) - must.NoError(t, h.Process(NewServiceScheduler, eval)) - - // Ensure we have created only one new allocation - must.SliceLen(t, 2, h.Plans) - plan = h.Plans[0] - var newPlanned []*structs.Allocation - for _, allocList := range plan.NodeAllocation { - newPlanned = append(newPlanned, allocList...) - } - must.SliceLen(t, 10, newPlanned) -} - func TestServiceSched_JobRegister_DiskConstraints(t *testing.T) { ci.Parallel(t) @@ -4258,6 +4149,100 @@ func TestServiceSched_NodeDrain_Queued_Allocations(t *testing.T) { } } +// TestServiceSched_NodeDrain_TaskHandle asserts that allocations with task +// handles have them propagated to replacement allocations when drained. +func TestServiceSched_NodeDrain_TaskHandle(t *testing.T) { + ci.Parallel(t) + + h := NewHarness(t) + + node := mock.Node() + require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) + + // Create some nodes + for i := 0; i < 10; i++ { + node := mock.Node() + require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) + } + + // Generate a fake job with allocations and an update policy. + job := mock.Job() + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + + var allocs []*structs.Allocation + for i := 0; i < 10; i++ { + alloc := mock.Alloc() + alloc.Job = job + alloc.JobID = job.ID + alloc.NodeID = node.ID + alloc.Name = fmt.Sprintf("my-job.web[%d]", i) + alloc.DesiredTransition.Migrate = pointer.Of(true) + alloc.TaskStates = map[string]*structs.TaskState{ + "web": { + TaskHandle: &structs.TaskHandle{ + Version: 1, + DriverState: []byte("test-driver-state"), + }, + }, + } + allocs = append(allocs, alloc) + } + require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) + + node.DrainStrategy = mock.DrainNode().DrainStrategy + require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) + + // Create a mock evaluation to deal with drain + eval := &structs.Evaluation{ + Namespace: structs.DefaultNamespace, + ID: uuid.Generate(), + Priority: 50, + TriggeredBy: structs.EvalTriggerNodeUpdate, + JobID: job.ID, + NodeID: node.ID, + Status: structs.EvalStatusPending, + } + require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval})) + + // Process the evaluation + err := h.Process(NewServiceScheduler, eval) + require.NoError(t, err) + + // Ensure a single plan + require.Len(t, h.Plans, 1) + plan := h.Plans[0] + + // Ensure the plan evicted all allocs + require.Len(t, plan.NodeUpdate[node.ID], len(allocs)) + + // Ensure the plan allocated + var planned []*structs.Allocation + for _, allocList := range plan.NodeAllocation { + planned = append(planned, allocList...) + } + require.Len(t, planned, len(allocs)) + + // Lookup the allocations by JobID + ws := memdb.NewWatchSet() + out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) + require.NoError(t, err) + + // Ensure all allocations placed + out, _ = structs.FilterTerminalAllocs(out) + require.Len(t, out, len(allocs)) + + // Ensure task states were propagated + for _, a := range out { + require.NotEmpty(t, a.TaskStates) + require.NotEmpty(t, a.TaskStates["web"]) + require.NotNil(t, a.TaskStates["web"].TaskHandle) + assert.Equal(t, 1, a.TaskStates["web"].TaskHandle.Version) + assert.Equal(t, []byte("test-driver-state"), a.TaskStates["web"].TaskHandle.DriverState) + } + + h.AssertEvalStatus(t, structs.EvalStatusComplete) +} + func TestServiceSched_RetryLimit(t *testing.T) { ci.Parallel(t) @@ -7340,6 +7325,124 @@ func TestServiceSched_CSITopology(t *testing.T) { } +// TestPropagateTaskState asserts that propagateTaskState only copies state +// when the previous allocation is lost or draining. +func TestPropagateTaskState(t *testing.T) { + ci.Parallel(t) + + const taskName = "web" + taskHandle := &structs.TaskHandle{ + Version: 1, + DriverState: []byte("driver-state"), + } + + cases := []struct { + name string + prevAlloc *structs.Allocation + prevLost bool + copied bool + }{ + { + name: "LostWithState", + prevAlloc: &structs.Allocation{ + ClientStatus: structs.AllocClientStatusRunning, + DesiredTransition: structs.DesiredTransition{}, + TaskStates: map[string]*structs.TaskState{ + taskName: { + TaskHandle: taskHandle, + }, + }, + }, + prevLost: true, + copied: true, + }, + { + name: "DrainedWithState", + prevAlloc: &structs.Allocation{ + ClientStatus: structs.AllocClientStatusRunning, + DesiredTransition: structs.DesiredTransition{ + Migrate: pointer.Of(true), + }, + TaskStates: map[string]*structs.TaskState{ + taskName: { + TaskHandle: taskHandle, + }, + }, + }, + prevLost: false, + copied: true, + }, + { + name: "LostWithoutState", + prevAlloc: &structs.Allocation{ + ClientStatus: structs.AllocClientStatusRunning, + DesiredTransition: structs.DesiredTransition{}, + TaskStates: map[string]*structs.TaskState{ + taskName: {}, + }, + }, + prevLost: true, + copied: false, + }, + { + name: "DrainedWithoutState", + prevAlloc: &structs.Allocation{ + ClientStatus: structs.AllocClientStatusRunning, + DesiredTransition: structs.DesiredTransition{ + Migrate: pointer.Of(true), + }, + TaskStates: map[string]*structs.TaskState{ + taskName: {}, + }, + }, + prevLost: false, + copied: false, + }, + { + name: "TerminalWithState", + prevAlloc: &structs.Allocation{ + ClientStatus: structs.AllocClientStatusComplete, + DesiredTransition: structs.DesiredTransition{}, + TaskStates: map[string]*structs.TaskState{ + taskName: { + TaskHandle: taskHandle, + }, + }, + }, + prevLost: false, + copied: false, + }, + } + + for i := range cases { + tc := cases[i] + t.Run(tc.name, func(t *testing.T) { + newAlloc := &structs.Allocation{ + // Required by propagateTaskState and populated + // by the scheduler's node iterator. + AllocatedResources: &structs.AllocatedResources{ + Tasks: map[string]*structs.AllocatedTaskResources{ + taskName: nil, // value isn't used + }, + }, + } + + propagateTaskState(newAlloc, tc.prevAlloc, tc.prevLost) + + if tc.copied { + // Assert state was copied + require.NotNil(t, newAlloc.TaskStates) + require.Contains(t, newAlloc.TaskStates, taskName) + require.Equal(t, taskHandle, newAlloc.TaskStates[taskName].TaskHandle) + } else { + // Assert state was *not* copied + require.Empty(t, newAlloc.TaskStates, + "expected task states not to be copied") + } + }) + } +} + // Tests that a client disconnect generates attribute updates and follow up evals. func TestServiceSched_Client_Disconnect_Creates_Updates_and_Evals(t *testing.T) { diff --git a/scheduler/reconcile.go b/scheduler/reconcile.go index 644e274dd90..bf9241797c2 100644 --- a/scheduler/reconcile.go +++ b/scheduler/reconcile.go @@ -14,8 +14,8 @@ import ( "sort" "time" + "github.com/armon/go-metrics" log "github.com/hashicorp/go-hclog" - metrics "github.com/hashicorp/go-metrics/compat" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/helper/uuid" diff --git a/scheduler/scheduler.go b/scheduler/scheduler.go index d0044d59cc3..7e22070966f 100644 --- a/scheduler/scheduler.go +++ b/scheduler/scheduler.go @@ -118,17 +118,6 @@ type State interface { // CSIVolumeByID fetch CSI volumes, containing controller jobs CSIVolumesByNodeID(memdb.WatchSet, string, string) (memdb.ResultIterator, error) - // HostVolumeByID fetches host volume by its ID - HostVolumeByID(memdb.WatchSet, string, string, bool) (*structs.HostVolume, error) - - // HostVolumesByNodeID gets an iterator with all the volumes attached to a - // given node - HostVolumesByNodeID(memdb.WatchSet, string, state.SortOption) (memdb.ResultIterator, error) - - // GetTaskGroupHostVolumeClaimsForTaskGroup gets all host volume claims for - // a given namespace, job ID and task group name - GetTaskGroupHostVolumeClaimsForTaskGroup(memdb.WatchSet, string, string, string) (memdb.ResultIterator, error) - // LatestIndex returns the greatest index value for all indexes. LatestIndex() (uint64, error) } diff --git a/scheduler/stack.go b/scheduler/stack.go index de8501b873e..5c897ddf2de 100644 --- a/scheduler/stack.go +++ b/scheduler/stack.go @@ -35,11 +35,10 @@ type Stack interface { } type SelectOptions struct { - PenaltyNodeIDs map[string]struct{} - PreferredNodes []*structs.Node - Preempt bool - AllocName string - AllocationHostVolumeIDs []string + PenaltyNodeIDs map[string]struct{} + PreferredNodes []*structs.Node + Preempt bool + AllocName string } // GenericStack is the Stack used for the Generic scheduler. It is @@ -52,8 +51,6 @@ type GenericStack struct { wrappedChecks *FeasibilityWrapper quota FeasibleIterator jobVersion *uint64 - jobNamespace string - jobID string jobConstraint *ConstraintChecker taskGroupDrivers *DriverChecker taskGroupConstraint *ConstraintChecker @@ -104,8 +101,6 @@ func (s *GenericStack) SetJob(job *structs.Job) { jobVer := job.Version s.jobVersion = &jobVer - s.jobNamespace = job.Namespace - s.jobID = job.ID s.jobConstraint.SetConstraints(job.Constraints) s.distinctHostsConstraint.SetJob(job) @@ -159,7 +154,7 @@ func (s *GenericStack) Select(tg *structs.TaskGroup, options *SelectOptions) *Ra s.taskGroupDrivers.SetDrivers(tgConstr.drivers) s.taskGroupConstraint.SetConstraints(tgConstr.constraints) s.taskGroupDevices.SetTaskGroup(tg) - s.taskGroupHostVolumes.SetVolumes(options.AllocName, s.jobNamespace, s.jobID, tg.Name, tg.Volumes) + s.taskGroupHostVolumes.SetVolumes(options.AllocName, tg.Volumes) s.taskGroupCSIVolumes.SetVolumes(options.AllocName, tg.Volumes) if len(tg.Networks) > 0 { s.taskGroupNetwork.SetNetwork(tg.Networks[0]) @@ -207,8 +202,6 @@ type SystemStack struct { ctx Context source *StaticIterator - jobNamespace string - jobID string wrappedChecks *FeasibilityWrapper quota FeasibleIterator jobConstraint *ConstraintChecker @@ -320,8 +313,6 @@ func (s *SystemStack) SetNodes(baseNodes []*structs.Node) { } func (s *SystemStack) SetJob(job *structs.Job) { - s.jobNamespace = job.Namespace - s.jobID = job.ID s.jobConstraint.SetConstraints(job.Constraints) s.distinctPropertyConstraint.SetJob(job) s.binPack.SetJob(job) @@ -354,7 +345,7 @@ func (s *SystemStack) Select(tg *structs.TaskGroup, options *SelectOptions) *Ran s.taskGroupDrivers.SetDrivers(tgConstr.drivers) s.taskGroupConstraint.SetConstraints(tgConstr.constraints) s.taskGroupDevices.SetTaskGroup(tg) - s.taskGroupHostVolumes.SetVolumes(options.AllocName, s.jobNamespace, s.jobID, tg.Name, tg.Volumes) + s.taskGroupHostVolumes.SetVolumes(options.AllocName, tg.Volumes) s.taskGroupCSIVolumes.SetVolumes(options.AllocName, tg.Volumes) if len(tg.Networks) > 0 { s.taskGroupNetwork.SetNetwork(tg.Networks[0]) diff --git a/scheduler/util.go b/scheduler/util.go index 9a2bdac9b72..dd050abd5ae 100644 --- a/scheduler/util.go +++ b/scheduler/util.go @@ -233,6 +233,16 @@ func tasksUpdated(jobA, jobB *structs.Job, taskGroup string) comparison { return c } + // Check Affinities + if c := affinitiesUpdated(jobA, jobB, taskGroup); c.modified { + return c + } + + // Check Spreads + if c := spreadsUpdated(jobA, jobB, taskGroup); c.modified { + return c + } + // Check consul updated if c := consulUpdated(a.Consul, b.Consul); c.modified { return c @@ -570,6 +580,67 @@ func networkPortMap(n *structs.NetworkResource) structs.AllocatedPorts { return m } +func affinitiesUpdated(jobA, jobB *structs.Job, taskGroup string) comparison { + var affinitiesA structs.Affinities + var affinitiesB structs.Affinities + + // accumulate job affinities + + affinitiesA = append(affinitiesA, jobA.Affinities...) + affinitiesB = append(affinitiesB, jobB.Affinities...) + + tgA := jobA.LookupTaskGroup(taskGroup) + tgB := jobB.LookupTaskGroup(taskGroup) + + // append group level affinities + + affinitiesA = append(affinitiesA, tgA.Affinities...) + affinitiesB = append(affinitiesB, tgB.Affinities...) + + // append task level affinities for A + + for _, task := range tgA.Tasks { + affinitiesA = append(affinitiesA, task.Affinities...) + } + + // append task level affinities for B + for _, task := range tgB.Tasks { + affinitiesB = append(affinitiesB, task.Affinities...) + } + + // finally check if all the affinities from both jobs match + if !affinitiesA.Equal(&affinitiesB) { + return difference("affinities", affinitiesA, affinitiesB) + } + + return same +} + +func spreadsUpdated(jobA, jobB *structs.Job, taskGroup string) comparison { + var spreadsA []*structs.Spread + var spreadsB []*structs.Spread + + // accumulate job spreads + + spreadsA = append(spreadsA, jobA.Spreads...) + spreadsB = append(spreadsB, jobB.Spreads...) + + tgA := jobA.LookupTaskGroup(taskGroup) + tgB := jobB.LookupTaskGroup(taskGroup) + + // append group spreads + spreadsA = append(spreadsA, tgA.Spreads...) + spreadsB = append(spreadsB, tgB.Spreads...) + + if !slices.EqualFunc(spreadsA, spreadsB, func(a, b *structs.Spread) bool { + return a.Equal(b) + }) { + return difference("spreads", spreadsA, spreadsB) + } + + return same +} + // renderTemplatesUpdated returns the difference in the RestartPolicy's // render_templates field, if set func renderTemplatesUpdated(a, b *structs.RestartPolicy, msg string) comparison { diff --git a/scheduler/util_test.go b/scheduler/util_test.go index 387e0a97e23..b0d17b37aa1 100644 --- a/scheduler/util_test.go +++ b/scheduler/util_test.go @@ -233,6 +233,140 @@ func TestShuffleNodes(t *testing.T) { } +func TestTaskUpdatedAffinity(t *testing.T) { + ci.Parallel(t) + + j1 := mock.Job() + j2 := mock.Job() + name := j1.TaskGroups[0].Name + must.False(t, tasksUpdated(j1, j2, name).modified) + + // TaskGroup Affinity + j2.TaskGroups[0].Affinities = []*structs.Affinity{ + { + LTarget: "node.datacenter", + RTarget: "dc1", + Operand: "=", + Weight: 100, + }, + } + must.True(t, tasksUpdated(j1, j2, name).modified) + + // TaskGroup Task Affinity + j3 := mock.Job() + j3.TaskGroups[0].Tasks[0].Affinities = []*structs.Affinity{ + { + LTarget: "node.datacenter", + RTarget: "dc1", + Operand: "=", + Weight: 100, + }, + } + must.True(t, tasksUpdated(j1, j3, name).modified) + + j4 := mock.Job() + j4.TaskGroups[0].Tasks[0].Affinities = []*structs.Affinity{ + { + LTarget: "node.datacenter", + RTarget: "dc1", + Operand: "=", + Weight: 100, + }, + } + must.True(t, tasksUpdated(j1, j4, name).modified) + + // check different level of same affinity + j5 := mock.Job() + j5.Affinities = []*structs.Affinity{ + { + LTarget: "node.datacenter", + RTarget: "dc1", + Operand: "=", + Weight: 100, + }, + } + + j6 := mock.Job() + j6.Affinities = make([]*structs.Affinity, 0) + j6.TaskGroups[0].Affinities = []*structs.Affinity{ + { + LTarget: "node.datacenter", + RTarget: "dc1", + Operand: "=", + Weight: 100, + }, + } + must.False(t, tasksUpdated(j5, j6, name).modified) +} + +func TestTaskUpdatedSpread(t *testing.T) { + ci.Parallel(t) + + j1 := mock.Job() + j2 := mock.Job() + name := j1.TaskGroups[0].Name + + must.False(t, tasksUpdated(j1, j2, name).modified) + + // TaskGroup Spread + j2.TaskGroups[0].Spreads = []*structs.Spread{ + { + Attribute: "node.datacenter", + Weight: 100, + SpreadTarget: []*structs.SpreadTarget{ + { + Value: "r1", + Percent: 50, + }, + { + Value: "r2", + Percent: 50, + }, + }, + }, + } + must.True(t, tasksUpdated(j1, j2, name).modified) + + // check different level of same constraint + j5 := mock.Job() + j5.Spreads = []*structs.Spread{ + { + Attribute: "node.datacenter", + Weight: 100, + SpreadTarget: []*structs.SpreadTarget{ + { + Value: "r1", + Percent: 50, + }, + { + Value: "r2", + Percent: 50, + }, + }, + }, + } + + j6 := mock.Job() + j6.TaskGroups[0].Spreads = []*structs.Spread{ + { + Attribute: "node.datacenter", + Weight: 100, + SpreadTarget: []*structs.SpreadTarget{ + { + Value: "r1", + Percent: 50, + }, + { + Value: "r2", + Percent: 50, + }, + }, + }, + } + + must.False(t, tasksUpdated(j5, j6, name).modified) +} + func TestTasksUpdated(t *testing.T) { ci.Parallel(t) diff --git a/ui/yarn.lock b/ui/yarn.lock index 60ce4faf221..1544df6ebd1 100644 --- a/ui/yarn.lock +++ b/ui/yarn.lock @@ -11237,9 +11237,9 @@ mute-stream@0.0.8: integrity sha512-nnbWWOkoWyUsTjKrhgD0dcz22mdkSnpYqbEjIm2nhwhuxlSkpywJmBo8h0ZqJdkp73mb90SssHkN4rsRaBAfAA== nanoid@^3.3.7: - version "3.3.8" - resolved "https://registry.yarnpkg.com/nanoid/-/nanoid-3.3.8.tgz#b1be3030bee36aaff18bacb375e5cce521684baf" - integrity sha512-WNLf5Sd8oZxOm+TzppcYk8gVOgP+l58xNy58D0nbUnOxOWRWvlcCV4kUF7ltmI6PsrLl/BgKEyS4mqsGChFN0w== + version "3.3.7" + resolved "https://registry.yarnpkg.com/nanoid/-/nanoid-3.3.7.tgz#d0c301a691bc8d54efa0a2226ccf3fe2fd656bd8" + integrity sha512-eSRppjcPIatRIMC1U6UngP8XFcz8MQWGQdt1MTBQ7NaAmvXDfvNxbvWV3x2y6CdEUciCSsDHDQZbhYaB8QEo2g== nanomatch@^1.2.9: version "1.2.13" diff --git a/website/content/api-docs/events.mdx b/website/content/api-docs/events.mdx index d8c606d22e4..0cc34c76a73 100644 --- a/website/content/api-docs/events.mdx +++ b/website/content/api-docs/events.mdx @@ -28,23 +28,19 @@ the nature of this endpoint individual topics require specific policies. Note that if you do not include a `topic` parameter all topics will be included by default, requiring a management token. - -| Topic | ACL Required | -|--------------|------------------------------| -| `*` | `management` | -| `ACLPolicy` | `management` | -| `ACLRole` | `management` | -| `ACLToken` | `management` | -| `Allocation` | `namespace:read-job` | -| `CSIPlugin` | `namespace:read-job` | -| `CSIVolume` | `namespace:csi-read-volume` | -| `Deployment` | `namespace:read-job` | -| `Evaluation` | `namespace:read-job` | -| `HostVolume` | `namespace:host-volume-read` | -| `Job` | `namespace:read-job` | -| `NodePool` | `management` | -| `Node` | `node:read` | -| `Service` | `namespace:read-job` | +| Topic | ACL Required | +| ------------ | -------------------- | +| `*` | `management` | +| `ACLToken` | `management` | +| `ACLPolicy` | `management` | +| `ACLRole` | `management` | +| `Job` | `namespace:read-job` | +| `Allocation` | `namespace:read-job` | +| `Deployment` | `namespace:read-job` | +| `Evaluation` | `namespace:read-job` | +| `Node` | `node:read` | +| `NodePool` | `management` | +| `Service` | `namespace:read-job` | ### Parameters @@ -69,58 +65,50 @@ by default, requiring a management token. ### Event Topics -| Topic | Output | -|------------|----------------------------------------| -| ACLPolicy | ACLPolicy | -| ACLRoles | ACLRole | -| ACLToken | ACLToken | -| Allocation | Allocation (no job information) | -| CSIPlugin | CSIPlugin | -| CSIVolume | CSIVolume | -| Deployment | Deployment | -| Evaluation | Evaluation | -| HostVolume | HostVolume (dynamic host volumes only) | -| Job | Job | -| Node | Node | -| NodeDrain | Node | -| NodePool | NodePool | -| Service | Service Registrations | +| Topic | Output | +| ---------- | ------------------------------- | +| ACLToken | ACLToken | +| ACLPolicy | ACLPolicy | +| ACLRoles | ACLRole | +| Allocation | Allocation (no job information) | +| Job | Job | +| Evaluation | Evaluation | +| Deployment | Deployment | +| Node | Node | +| NodeDrain | Node | +| NodePool | NodePool | +| Service | Service Registrations | ### Event Types | Type | -|-------------------------------| -| ACLPolicyDeleted | +| ----------------------------- | +| ACLTokenUpserted | +| ACLTokenDeleted | | ACLPolicyUpserted | -| ACLRoleDeleted | +| ACLPolicyDeleted | | ACLRoleUpserted | -| ACLTokenDeleted | -| ACLTokenUpserted | +| ACLRoleDeleted | | AllocationCreated | -| AllocationUpdateDesiredStatus | | AllocationUpdated | -| CSIVolumeDeregistered | -| CSIVolumeRegistered | -| DeploymentAllocHealth | -| DeploymentPromotion | +| AllocationUpdateDesiredStatus | | DeploymentStatusUpdate | +| DeploymentPromotion | +| DeploymentAllocHealth | | EvaluationUpdated | -| HostVolumeDeleted | -| HostVolumeRegistered | -| JobBatchDeregistered | -| JobDeregistered | | JobRegistered | +| JobDeregistered | +| JobBatchDeregistered | +| NodeRegistration | | NodeDeregistration | -| NodeDrain | | NodeEligibility | +| NodeDrain | | NodeEvent | -| NodePoolDeleted | | NodePoolUpserted | -| NodeRegistration | +| NodePoolDeleted | | PlanResult | -| ServiceDeregistration | | ServiceRegistration | - +| ServiceDeregistration | ### Sample Request diff --git a/website/content/api-docs/volumes.mdx b/website/content/api-docs/volumes.mdx index 5995137c9d9..9b56c16e88b 100644 --- a/website/content/api-docs/volumes.mdx +++ b/website/content/api-docs/volumes.mdx @@ -1,14 +1,13 @@ --- layout: api page_title: Volumes - HTTP API -description: |- - The Nomad `/volume` and `/volumes` endpoints query for and interact with Container Storage Interface (CSI) volumes and dynamic host volumes. +description: The `/volume` endpoints are used to query for and interact with volumes. --- # Volumes HTTP API -The `/volume` and `/volumes` endpoints query for and interact with -Container Storage Interface (CSI) volumes and dynamic host volumes. +The `/volume` and `/volumes` endpoints are used to query for and interact with +volumes. ## List Volumes @@ -18,28 +17,29 @@ This endpoint lists all volumes. | ------ | ------------- | ------------------ | | `GET` | `/v1/volumes` | `application/json` | -The following table shows this endpoint's support for [blocking queries][] and -[required ACLs][]. +The table below shows this endpoint's support for +[blocking queries](/nomad/api-docs#blocking-queries) and +[required ACLs](/nomad/api-docs#acls). -| Blocking Queries | ACL Required | -|------------------|--------------------------------------------------------------| -| `YES` | `namespace:csi-list-volume`
`namespace:host-volume-read` | +| Blocking Queries | ACL Required | +| ---------------- | --------------------------- | +| `YES` | `namespace:csi-list-volume` | ### Parameters -- `type` `(string: )` - Specifies the type of volume to query. One of - `csi` or `host`. The `host` value queries dynamic host volumes. Specify this - as a query string parameter. +- `type` `(string: "")` - Specifies the type of volume to + query. Currently only supports `csi`. This is specified as a query + string parameter. Returns an empty list if omitted. - `node_id` `(string: "")` - Specifies a string to filter volumes based on an Node ID prefix. Because the value is decoded to bytes, the prefix must have an even number of hexadecimal characters - (0-9a-f). Specify this as a query string parameter. + (0-9a-f). This is specified as a query string parameter. - `plugin_id` `(string: "")` - Specifies a string to filter volumes based on a plugin ID prefix. Because the value is decoded to bytes, the prefix must have an even number of hexadecimal characters - (0-9a-f). Specify this as a query string parameter. + (0-9a-f). This is specified as a query string parameter. - `next_token` `(string: "")` - This endpoint supports paging. The `next_token` parameter accepts a string which identifies the next expected volume. This @@ -62,7 +62,7 @@ $ curl \ https://localhost:4646/v1/volumes?type=csi&node_id=foo&plugin_id=plugin-id1 ``` -### Sample Response for CSI Volumes +### Sample Response ```json [ @@ -95,37 +95,17 @@ $ curl \ ] ``` -### Sample Response for dynamic host volumes - -```json -[ - { - "CapacityBytes": 1048576000, - "CreateIndex": 42, - "CreateTime": 1736191825846395400, - "ID": "3735cc2c-cc64-11ef-89ed-bfb5b3bc38ea", - "ModifyIndex": 64, - "ModifyTime": 1736191825846395400, - "Name": "example", - "Namespace": "default", - "NodeID": "5c5830d0-cc64-11ef-a293-4f03e55969ea", - "NodePool": "default", - "PluginID": "plugin-id1", - "State": "ready" - } -] -``` - -## Read CSI Volume +## Read Volume -This endpoint reads information about a specific CSI volume. +This endpoint reads information about a specific volume. | Method | Path | Produces | | ------ | --------------------------- | ------------------ | | `GET` | `/v1/volume/csi/:volume_id` | `application/json` | -The following table shows this endpoint's support for [blocking queries][] and -[required ACLs][]. +The table below shows this endpoint's support for +[blocking queries](/nomad/api-docs#blocking-queries) and +[required ACLs](/nomad/api-docs#acls). | Blocking Queries | ACL Required | | ---------------- | --------------------------- | @@ -134,7 +114,7 @@ The following table shows this endpoint's support for [blocking queries][] and ### Parameters - `:volume_id` `(string: )` - Specifies the ID of the - volume. This must be the full ID. Specify this as part of the + volume. This must be the full ID. This is specified as part of the path. ### Sample Request @@ -295,11 +275,10 @@ $ curl \ } ``` -## Register CSI Volume +## Register Volume -This endpoint registers an external CSI volume with Nomad. The volume must exist -in the external storage provider. Refer to the [Create CSI Volume][] section for -details. +This endpoint registers an external volume with Nomad. The volume must exist +in the external storage provider (see [Create Volume] below). Making the same request again with a higher `RequestedCapacityMin` value may trigger a [Volume Expansion][]. @@ -308,8 +287,9 @@ may trigger a [Volume Expansion][]. | ------ | --------------------------- | ------------------ | | `PUT` | `/v1/volume/csi/:volume_id` | `application/json` | -The following table shows this endpoint's support for [blocking queries][] and -[required ACLs][]. +The table below shows this endpoint's support for +[blocking queries](/nomad/api-docs#blocking-queries) and +[required ACLs](/nomad/api-docs#acls). | Blocking Queries | ACL Required | | ---------------- | ---------------------------- | @@ -318,13 +298,13 @@ The following table shows this endpoint's support for [blocking queries][] and ### Parameters - `:volume_id` `(string: )` - Specifies the ID of the - volume. This must be the full ID. Specify this as part of the + volume. This must be the full ID. This is specified as part of the path. ### Sample Payload The payload must include a JSON document that describes the volume's -parameters. Note that the `NodeID` field is required for the register API. +parameters. ```json { @@ -363,9 +343,9 @@ $ curl \ https://localhost:4646/v1/volume/csi/volume-id1 ``` -## Create CSI Volume +## Create Volume -This endpoint creates a CSI volume in an external storage provider and registers +This endpoint creates a volume in an external storage provider and registers it with Nomad. Only CSI plugins that implement the [Controller][csi_plugins_internals] interface with the `CREATE_DELETE_VOLUME` capability support this endpoint. @@ -377,8 +357,9 @@ may trigger a [Volume Expansion][]. | ------ | ---------------------------------- | ------------------ | | `PUT` | `/v1/volume/csi/:volume_id/create` | `application/json` | -The following table shows this endpoint's support for [blocking queries][] and -[required ACLs][]. +The table below shows this endpoint's support for +[blocking queries](/nomad/api-docs#blocking-queries) and +[required ACLs](/nomad/api-docs#acls). | Blocking Queries | ACL Required | | ---------------- | ---------------------------- | @@ -387,7 +368,7 @@ The following table shows this endpoint's support for [blocking queries][] and ### Parameters - `:volume_id` `(string: )` - Specifies the ID of the - volume. This must be the full ID. Specify this as part of the + volume. This must be the full ID. This is specified as part of the path. ### Sample Payload @@ -439,17 +420,18 @@ $ curl \ ``` -## Deregister CSI Volume +## Deregister Volume -This endpoint deregisters an external CSI volume from Nomad. It is an error to +This endpoint deregisters an external volume with Nomad. It is an error to deregister a volume that is in use. | Method | Path | Produces | | -------- | --------------------------- | ------------------ | | `DELETE` | `/v1/volume/csi/:volume_id` | `application/json` | -The following table shows this endpoint's support for [blocking queries][] and -[required ACLs][]. +The table below shows this endpoint's support for +[blocking queries](/nomad/api-docs#blocking-queries) and +[required ACLs](/nomad/api-docs#acls). | Blocking Queries | ACL Required | | ---------------- | ---------------------------- | @@ -458,13 +440,13 @@ The following table shows this endpoint's support for [blocking queries][] and ### Parameters - `:volume_id` `(string: )` - Specifies the ID of the - volume. This must be the full ID. Specify this as part of the + volume. This must be the full ID. This is specified as part of the path. - `force` `(bool: false)` - Force deregistration of the volume and immediately drop claims for terminal allocations. Returns an error if the volume has running allocations. This does not detach the volume from client nodes. - Specify this as a query string parameter. + This is specified as a query string parameter. ### Sample Request @@ -474,9 +456,9 @@ $ curl \ https://localhost:4646/v1/volume/csi/volume-id1?force=false ``` -## Delete CSI Volume +## Delete Volume -This endpoint deletes an external CSI volume from the storage provider, and +This endpoint deletes an external volume from the storage provider, and deregisters it from Nomad. It is an error to delete a volume that is in use. Only CSI plugins that implement the [Controller][csi_plugins_internals] interface with the `CREATE_DELETE_VOLUME` capability support this endpoint. @@ -485,8 +467,9 @@ interface with the `CREATE_DELETE_VOLUME` capability support this endpoint. | -------- | ---------------------------------- | ------------------ | | `DELETE` | `/v1/volume/csi/:volume_id/delete` | `application/json` | -The following table shows this endpoint's support for [blocking queries][] and -[required ACLs][]. +The table below shows this endpoint's support for +[blocking queries](/nomad/api-docs#blocking-queries) and +[required ACLs](/nomad/api-docs#acls). | Blocking Queries | ACL Required | | ---------------- | ---------------------------- | @@ -500,7 +483,7 @@ stored when the CSI volume was created. ### Parameters - `:volume_id` `(string: )` - Specifies the ID of the - volume. This must be the full ID. Specify this as part of the + volume. This must be the full ID. This is specified as part of the path. ### Sample Request @@ -512,17 +495,18 @@ $ curl \ https://localhost:4646/v1/volume/csi/volume-id1/delete ``` -## Detach CSI Volume +## Detach Volume -This endpoint detaches an external CSI volume from a Nomad client node. It is an +This endpoint detaches an external volume from a Nomad client node. It is an error to detach a volume that is in use. | Method | Path | Produces | | -------- | ---------------------------------- | ------------------ | | `DELETE` | `/v1/volume/csi/:volume_id/detach` | `application/json` | -The following table shows this endpoint's support for [blocking queries][] and -[required ACLs][]. +The table below shows this endpoint's support for +[blocking queries](/nomad/api-docs#blocking-queries) and +[required ACLs](/nomad/api-docs#acls). | Blocking Queries | ACL Required | | ---------------- | ---------------------------- | @@ -531,11 +515,11 @@ The following table shows this endpoint's support for [blocking queries][] and ### Parameters - `:volume_id` `(string: )` - Specifies the ID of the - volume. This must be the full ID. Specify this as part of the + volume. This must be the full ID. This is specified as part of the path. - `node` `(string: )` - The node to detach the volume from. - Specify this as a query string parameter. + This is specified as a query string parameter. ### Sample Request @@ -545,19 +529,20 @@ $ curl \ https://localhost:4646/v1/volume/csi/volume-id/detach?node=00000000-0000-0000-0000-000000000000 ``` -## List External CSI Volumes +## List External Volumes -This endpoint lists storage CSI volumes that are known to the external storage -provider but may not be registered with Nomad. Only CSI plugins that implement -the [Controller][csi_plugins_internals] interface with the `LIST_VOLUMES` -capability support this endpoint. +This endpoint lists storage volumes that are known to the external storage +provider but may not be registered with Nomad. Only CSI plugins that +implement the [Controller][csi_plugins_internals] interface with the +`LIST_VOLUMES` capability support this endpoint. | Method | Path | Produces | |--------|------------------------|--------------------| | `GET` | `/v1/volumes/external` | `application/json` | -The following table shows this endpoint's support for [blocking queries][] and -[required ACLs][]. +The table below shows this endpoint's support for +[blocking queries](/nomad/api-docs#blocking-queries) and +[required ACLs](/nomad/api-docs#acls). | Blocking Queries | ACL Required | | ---------------- | ---------------------------- | @@ -568,14 +553,14 @@ The following table shows this endpoint's support for [blocking queries][] and - `plugin_id` `(string: "")` - Specifies a string to filter volumes based on a plugin ID prefix. Because the value is decoded to bytes, the prefix must have an even number of hexadecimal characters - (0-9a-f). Specify this as a query string parameter. + (0-9a-f). This is specified as a query string parameter. - `next_token` `(string: "")` - This endpoint supports paging. The `next_token` parameter accepts a string returned in a previous response's `NextToken` field to request the next page of results. - `per_page` `(int: )` - Specifies a maximum number of snapshots to - return for this request. The response includes a `NextToken` field that + return for this request. The response will include a `NextToken` field that can be passed to the next request to fetch additional pages. ### Sample Request @@ -611,19 +596,20 @@ $ curl \ } ``` -## Create CSI Volume Snapshot +## Create Snapshot -This endpoint creates a snapshot of a CSI volume on the external storage +This endpoint creates a snapshot of a volume on the external storage provider. Only CSI plugins that implement the -[Controller][csi_plugins_internals] interface with the `CREATE_DELETE_SNAPSHOT` -capability support this endpoint. +[Controller][csi_plugins_internals] interface with the +`CREATE_DELETE_SNAPSHOT` capability support this endpoint. | Method | Path | Produces | | -------- | --------------------------------- | ------------------ | | `POST` | `/v1/volumes/snapshot` | `application/json` | -The following table shows this endpoint's support for [blocking queries][] and -[required ACLs][]. +The table below shows this endpoint's support for +[blocking queries](/nomad/api-docs#blocking-queries) and +[required ACLs](/nomad/api-docs#acls). | Blocking Queries | ACL Required | | ---------------- | ---------------------------- | @@ -678,19 +664,20 @@ $ curl \ } ``` -## Delete CSI Volume Snapshot +## Delete Snapshot -This endpoint deletes a CSI volume snapshot from the external storage +This endpoint deletes a volume snapshot from the external storage provider. Only CSI plugins that implement the -[Controller][csi_plugins_internals] interface with the `CREATE_DELETE_SNAPSHOT` -capability support this endpoint. +[Controller][csi_plugins_internals] interface with the +`CREATE_DELETE_SNAPSHOT` capability support this endpoint. | Method | Path | Produces | | -------- | ---------------------- | ------------------ | | `DELETE` | `/v1/volumes/snapshot` | `application/json` | -The following table shows this endpoint's support for [blocking queries][] and -[required ACLs][]. +The table below shows this endpoint's support for +[blocking queries](/nomad/api-docs#blocking-queries) and +[required ACLs](/nomad/api-docs#acls). | Blocking Queries | ACL Required | | ---------------- | ---------------------------- | @@ -704,12 +691,12 @@ stored when the CSI snapshot was created. ### Parameters - `plugin_id` `(string: )` - Specifies the prefix of a CSI plugin ID - to perform the delete. Because the value is decoded to bytes, the prefix must - have an even number of hexadecimal characters (0-9a-f). Specify this as a - query string parameter. + to perform the delete. Because the value is decoded to bytes, the prefix + must have an even number of hexadecimal characters (0-9a-f). This is + specified as a query string parameter. - `snapshot_id` `(string: )` - Specifies the snapshot ID to - delete. Specify this as a query parameter. + delete. This is specified as a query parameter. ### Sample Request @@ -720,9 +707,9 @@ $ curl \ https://localhost:4646/v1/volumes/snapshot ``` -## List CSI Volume Snapshots +## List Snapshots -This endpoint lists CSI volume snapshots on the external storage provider. Only +This endpoint lists volume snapshots on the external storage provider. Only CSI plugins that implement the [Controller][csi_plugins_internals] interface with the `LIST_SNAPSHOTS` capability support this endpoint. @@ -731,8 +718,9 @@ with the `LIST_SNAPSHOTS` capability support this endpoint. | ------ | ---------------------- | ------------------ | | `GET` | `/v1/volumes/snapshot` | `application/json` | -The following table shows this endpoint's support for [blocking queries][] and -[required ACLs][]. +The table below shows this endpoint's support for +[blocking queries](/nomad/api-docs#blocking-queries) and +[required ACLs](/nomad/api-docs#acls). | Blocking Queries | ACL Required | | ---------------- | --------------------------- | @@ -747,7 +735,7 @@ stored when the CSI snapshot was created. - `plugin_id` `(string: )` - Specifies the prefix of a CSI plugin ID to perform the list. Because the value is decoded to bytes, the prefix must - have an even number of hexadecimal characters (0-9a-f). Specify this as + have an even number of hexadecimal characters (0-9a-f). This is specified as a query string parameter. - `next_token` `(string: "")` - This endpoint supports paging. The @@ -755,7 +743,7 @@ stored when the CSI snapshot was created. `NextToken` field to request the next page of results. - `per_page` `(int: )` - Specifies a maximum number of snapshots to - return for this request. The response includes a `NextToken` field that + return for this request. The response will include a `NextToken` field that can be passed to the next request to fetch additional pages. ### Sample Request @@ -794,335 +782,8 @@ $ curl \ } ``` -## Read Dynamic Host Volume - -This endpoint reads information about a specific dynamic host volume. - -| Method | Path | Produces | -|--------|------------------------------|--------------------| -| `GET` | `/v1/volume/host/:volume_id` | `application/json` | - -The following table shows this endpoint's support for [blocking queries][] and -[required ACLs][]. - -| Blocking Queries | ACL Required | -|------------------|------------------------------| -| `YES` | `namespace:host-volume-read` | - -### Parameters - -- `:volume_id` `(string: )` - Specifies the ID of the - volume. This must be the full ID. Specify this as part of the - path. - -### Sample Request - -```shell-session -$ curl \ - https://localhost:4646/v1/volume/host/c0f7ee7d-5cc6-92fd-f2b5-14b79f01979f -``` - -### Sample Response - -```json -{ - "Allocations": [], - "CapacityBytes": 49283072, - "CreateIndex": 11, - "CreateTime": 1736191993011594200, - "HostPath": "/run/nomad/dev/alloc_mounts/c0f7ee7d-5cc6-92fd-f2b5-14b79f01979f", - "ID": "c0f7ee7d-5cc6-92fd-f2b5-14b79f01979f", - "ModifyIndex": 12, - "ModifyTime": 1736191993011594200, - "Name": "external-plugin", - "Namespace": "default", - "NodeID": "670cb259-bc26-653b-e316-655af04ad260", - "NodePool": "default", - "Parameters": { - "hello": "world" - }, - "PluginID": "example-plugin-mkfs", - "RequestedCapabilities": [ - { - "AccessMode": "single-node-writer", - "AttachmentMode": "file-system" - } - ], - "RequestedCapacityMaxBytes": 50000000, - "RequestedCapacityMinBytes": 50000000, - "State": "ready" -} -``` - -## Create Dynamic Host Volume - -This endpoint creates a dynamic host volume and registers it with Nomad. - -The response body includes the volume definition with the `ID` and `NodeID` -fields populated. Making the same request without the ID creates a new volume on -a different node. Making a request with the ID set updates the volume to the new -parameters, if possible. - -| Method | Path | Produces | -|--------|--------------------------|--------------------| -| `PUT` | `/v1/volume/host/create` | `application/json` | - -The following table shows this endpoint's support for [blocking queries][] and -[required ACLs][]. - -| Blocking Queries | ACL Required | -|------------------|--------------------------------------------------------------------------------------------------------------------------------| -| `NO` | `namespace:host-volume-create` or
`namespace:host-volume-write`.
`namespace:sentinel-override` if `PolicyOverride` set | - -### Parameters - -- `Volume` `(Volume: )` - Specifies the JSON definition of the host - volume. You should include the ID field if you are updating an existing - volume. - -- `PolicyOverride` `(bool: false)` - If set, Nomad overrides any soft mandatory - Sentinel policies. This field allows creating a volume when it would be denied - by policy. - -### Sample Payload - -```json -{ - "PolicyOverride": false, - "Volume": { - "Name": "example", - "Namespace": "default", - "NodePool": "prod", - "PluginID": "mkdir", - "RequestedCapacityMinBytes": 10737418240, - "RequestedCapacityMaxBytes": 21474836480, - "RequestedCapabilities": [ - { - "AccessMode": "single-node-writer", - "AttachmentMode": "file-system" - }, - { - "AccessMode": "single-node-writer", - "AttachmentMode": "block-device" - } - ], - "Constraints": [ - { - "LTarget": "${attr.kernel.name}", - "RTarget": "linux", - "Operand": "=" - } - ], - "Parameters": { - "foo": "bar" - } - } -} -``` - -### Sample Request - -```shell-session -$ curl \ - --request PUT \ - --data @payload.json \ - https://localhost:4646/v1/volume/host/create -``` - -### Sample Response - -```json -{ - "Volume": { - "Allocations": [], - "CapacityBytes": 21474836480, - "Constraints": [ - { - "LTarget": "${attr.kernel.name}", - "RTarget": "linux", - "Operand": "=" - } - ], - "CreateIndex": 11, - "CreateTime": 1736191993011594200, - "ID": "c0f7ee7d-5cc6-92fd-f2b5-14b79f01979f", - "ModifyIndex": 11, - "ModifyTime": 1736191993011594200, - "Name": "example", - "Namespace": "default", - "NodeID": "45460554-cc67-11ef-84b7-33d383a55487", - "NodePool": "prod", - "Parameters": { - "foo": "bar" - }, - "PluginID": "mkdir", - "RequestedCapabilities": [ - { - "AccessMode": "single-node-writer", - "AttachmentMode": "file-system" - }, - { - "AccessMode": "single-node-writer", - "AttachmentMode": "block-device" - } - ], - "RequestedCapacityMaxBytes": 21474836480, - "RequestedCapacityMinBytes": 10737418240, - "State": "pending" - }, - "Warnings": null -} -``` - -## Register Dynamic Host Volume - -This endpoint registers an existing dynamic host volume with Nomad. - -| Method | Path | Produces | -|--------|----------------------------|--------------------| -| `PUT` | `/v1/volume/host/register` | `application/json` | - -The following table shows this endpoint's support for [blocking queries][] and -[required ACLs][]. - -| Blocking Queries | ACL Required | -|------------------|----------------------------------------------------------------------------------------------------------------------------------| -| `NO` | `namespace:host-volume-register` or
`namespace:host-volume-write`.
`namespace:sentinel-override` if `PolicyOverride` set | - -### Parameters - -- `Volume` `(Volume: )` - Specifies the JSON definition of the host - volume. You should include the ID field if you are updating an existing - volume. - -- `PolicyOverride` `(bool: false)` - If set, Nomad overrides any soft mandatory - Sentinel policies. This field allows registering a volume when it would be denied - by policy. - -### Sample Payload - -```json -{ - "PolicyOverride": false, - "Volume": { - "Name": "example", - "Namespace": "default", - "NodePool": "prod", - "PluginID": "mkdir", - "RequestedCapacityMinBytes": 10737418240, - "RequestedCapacityMaxBytes": 21474836480, - "RequestedCapabilities": [ - { - "AccessMode": "single-node-writer", - "AttachmentMode": "file-system" - }, - { - "AccessMode": "single-node-writer", - "AttachmentMode": "block-device" - } - ], - "Constraints": [ - { - "LTarget": "${attr.kernel.name}", - "RTarget": "linux", - "Operand": "=" - } - ], - "Parameters": { - "foo": "bar" - } - } -} -``` - -### Sample Request - -```shell-session -$ curl \ - --request PUT \ - --data @payload.json \ - https://localhost:4646/v1/volume/host/register -``` - -### Sample Response - -```json -{ - "Volume": { - "Allocations": [], - "CapacityBytes": 21474836480, - "Constraints": [ - { - "LTarget": "${attr.kernel.name}", - "RTarget": "linux", - "Operand": "=" - } - ], - "CreateIndex": 11, - "CreateTime": 1736191993011594200, - "ID": "c0f7ee7d-5cc6-92fd-f2b5-14b79f01979f", - "ModifyIndex": 31, - "ModifyTime": 1736191993721594200, - "Name": "example", - "Namespace": "default", - "NodeID": "45460554-cc67-11ef-84b7-33d383a55487", - "NodePool": "prod", - "Parameters": { - "foo": "bar" - }, - "PluginID": "mkdir", - "RequestedCapabilities": [ - { - "AccessMode": "single-node-writer", - "AttachmentMode": "file-system" - }, - { - "AccessMode": "single-node-writer", - "AttachmentMode": "block-device" - } - ], - "RequestedCapacityMaxBytes": 21474836480, - "RequestedCapacityMinBytes": 10737418240, - "State": "ready" - }, - "Warnings": null -} -``` - -## Delete Dynamic Host Volume - -This endpoint deletes a dynamic host volume, and deregisters it from Nomad. It -is an error to delete a volume that is in use. - -| Method | Path | Produces | -|----------|-------------------------------------|--------------------| -| `DELETE` | `/v1/volume/host/:volume_id/delete` | `application/json` | - -The following table shows this endpoint's support for [blocking queries][] and -[required ACLs][]. - -| Blocking Queries | ACL Required | -|------------------|---------------------------------------------------------------------| -| `NO` | `namespace:host-volume-write` or
`namespace:host-volume-delete` | - -### Parameters - -- `:volume_id` `(string: )` - Specifies the ID of the - volume. This must be the full ID. Specify this as part of the - path. - -### Sample Request - -```shell-session -$ curl \ - --request DELETE \ - https://localhost:4646/v1/volume/host/ba97ef42-cc68-11ef-a2e7-ffddaecbdb89 -``` - -[blocking queries]: /nomad/api-docs#blocking-queries -[required ACLs]: /nomad/api-docs#acls [csi]: https://github.com/container-storage-interface/spec [csi_plugin]: /nomad/docs/job-specification/csi_plugin [csi_plugins_internals]: /nomad/docs/concepts/plugins/csi#csi-plugins -[Create CSI Volume]: #create-csi-volume -[Volume Expansion]: /nomad/docs/other-specifications/volume/csi#volume-expansion +[Create Volume]: #create-volume +[Volume Expansion]: /nomad/docs/other-specifications/volume#volume-expansion diff --git a/website/content/docs/commands/setup/consul.mdx b/website/content/docs/commands/setup/consul.mdx index 296ba314fd9..1fdd828395a 100644 --- a/website/content/docs/commands/setup/consul.mdx +++ b/website/content/docs/commands/setup/consul.mdx @@ -14,6 +14,13 @@ This command requires `acl:write` permissions for Consul and respects `CONSUL_HTTP_TOKEN`, `CONSUL_HTTP_ADDR`, and other [Consul-related environment variables][consulenv]. + + +This command is an experimental feature and may change its behavior in future +versions of Nomad. + + + ## Usage ```plaintext diff --git a/website/content/docs/commands/setup/vault.mdx b/website/content/docs/commands/setup/vault.mdx index 186262c93b5..835a85dd4f0 100644 --- a/website/content/docs/commands/setup/vault.mdx +++ b/website/content/docs/commands/setup/vault.mdx @@ -21,6 +21,13 @@ migrate to use Workload Identities with Vault. This option requires Refer to [Migrating to Using Workload Identity with Vault][nomad_acl_vault_wid_migrate] for more information. + + +This command is an experimental feature and may change its behavior in future +versions of Nomad. + + + ## Usage ```plaintext diff --git a/website/content/docs/commands/volume/create.mdx b/website/content/docs/commands/volume/create.mdx index 798390783b7..f383469f0a9 100644 --- a/website/content/docs/commands/volume/create.mdx +++ b/website/content/docs/commands/volume/create.mdx @@ -2,19 +2,15 @@ layout: docs page_title: 'Commands: volume create' description: | - The `nomad volume create` command creates storage volumes that are either - Container Storage Interface (CSI) volumes or dynamic host volumes. + Create volumes with CSI plugins. --- # Command: volume create -The `volume create` command creates storage volumes as either [Container Storage -Interface (CSI)][csi] volumes or dynamic host volumes. - -The [`volume create`][] command can create dynamic host volumes if host volume -plugins are installed on the node, or CSI volumes if the CSI plugins implement -the [Controller][csi_plugins_internals] interface. The `volume create` command -automatically [registers][] the volume as well. +The `volume create` command creates external storage volumes with Nomad's +[Container Storage Interface (CSI)][csi] support. Only CSI plugins that +implement the [Controller][csi_plugins_internals] interface support this +command. The volume will also be [registered] when it is successfully created. ## Usage @@ -22,41 +18,21 @@ automatically [registers][] the volume as well. nomad volume create [options] [file] ``` -The `volume create` command requires a single argument, specifying the path to a -file containing a valid [volume specification][volume_specification]. Nomad -reads the file and submits the volume to the server for placement. If the -supplied path is "-", the volume file is read from STDIN. Otherwise the file is +The `volume create` command requires a single argument, specifying the path to +a file containing a valid [volume specification][volume_specification]. This +file will be read and the volume will be submitted to Nomad for scheduling. If +the supplied path is "-", the volume file is read from STDIN. Otherwise it is read from the file at the supplied path. -When ACLs are enabled, this command requires a token with the appropriate -capability in the volume's namespace: the `csi-write-volume` capability for CSI -volumes or `host-volume-create` for dynamic host volumes. +When ACLs are enabled, this command requires a token with the +`csi-write-volume` capability for the volume's namespace. ## General Options @include 'general_options.mdx' -## Volume Create Options - -- `-detach`: Return immediately instead of entering monitor mode for dynamic - host volumes. After creating a volume, Nomad prints the volume ID to the - screen, which you can use to examine the volume using the `volume status` - command. If `-detach` is omitted or false, the command monitors the state of - the volume until it has been fingerprinted by the client and is ready to be - scheduled. Not valid for CSI volumes. - -- `-verbose`: Display full information when monitoring volume state. Used for - dynamic host volumes only. Not valid for CSI volumes. - -- `-policy-override`: Sets the flag to force override any soft mandatory - Sentinel policies. Used for dynamic host volumes only. Not valid for CSI - volumes. - ## Volume Specification -Refer to the [CSI Volume Specification][csi_vol_spec] and the [Dynamic Host -Volume Specification][host_vol_spec] for further information. - diff --git a/website/content/docs/other-specifications/namespace.mdx b/website/content/docs/other-specifications/namespace.mdx index 7d091ae2f7e..20e711ea6c8 100644 --- a/website/content/docs/other-specifications/namespace.mdx +++ b/website/content/docs/other-specifications/namespace.mdx @@ -108,23 +108,20 @@ consul { ### `node_pool_config` Parameters -- `default` `(string: "default")` - Specifies the node pool to use for jobs or - dynamic host volumes in this namespace that don't define a node pool in their - specification. - -- `allowed` `(array: nil)` - Specifies the node pools that jobs or - dynamic host volumes in this namespace are allowed to use. By default, all - node pools are allowed. If an empty list is provided only the namespace's - default node pool is allowed. This field supports wildcard globbing through - the use of `*` for multi-character matching. This field cannot be used with - `denied`. - -- `denied` `(array: nil)` - Specifies the node pools that jobs or - dynamic host volumes in this namespace are not allowed to use. This field - supports wildcard globbing through the use of `*` for multi-character - matching. If specified, jobs and dynamic host volumes are allowed to use any - node pool, except for those that match any of these patterns. This field - cannot be used with `allowed`. +- `default` `(string: "default")` - Specifies the node pool to use for jobs in + this namespace that don't define a node pool in their specification. + +- `allowed` `(array: nil)` - Specifies the node pools that are allowed + to be used by jobs in this namespace. By default, all node pools are allowed. + If an empty list is provided only the namespace's default node pool is + allowed. This field supports wildcard globbing through the use of `*` for + multi-character matching. This field cannot be used with `denied`. + +- `denied` `(array: nil)` - Specifies the node pools that are not + allowed to be used by jobs in this namespace. This field supports wildcard + globbing through the use of `*` for multi-character matching. If specified, + any node pool is allowed to be used, except for those that match any of these + patterns. This field cannot be used with `allowed`. ### `vault` Parameters diff --git a/website/content/docs/other-specifications/quota.mdx b/website/content/docs/other-specifications/quota.mdx index ebb4d84b99d..3245fb58e57 100644 --- a/website/content/docs/other-specifications/quota.mdx +++ b/website/content/docs/other-specifications/quota.mdx @@ -35,16 +35,11 @@ limit { cpu = 2500 memory = 1000 memory_max = 1000 - device "nvidia/gpu/1080ti" { count = 1 } - - storage { - variables = 1000 # in MB - host_volumes = "1000 GiB" - } } + variables_limit = 1000 } ``` @@ -61,6 +56,9 @@ in a Nomad region. - `region` `(string)` - The Nomad `region` that the limit applies to. - `region_limit` ([RegionLimit](#region_limit-parameters)) - Resources to limit. +- `variables_limit` `(int: 0)` - Maximum total size of all Nomad +[`variables`][] in MiB. The default `0` means unlimited, and `-1` means +variables are fully disabled. ### `region_limit` parameters @@ -77,7 +75,6 @@ all `resources.memory` in the namespace. - `memory_max` `(int: )` - The limit on total mount of hard memory limits in MB from all `resources.memory_max` in the namespace. - `device` ([Device](#device-parameters): nil) -- `storage` ([Storage](#storage-parameters): nil) ### `device` parameters @@ -86,27 +83,12 @@ equivalent to the [`device`][] block in a job specification. - `count` `(int)` - How many of this device may be used. -### `storage` parameters - -The `storage` block defines limits on provisioned storage. - -- `host_volumes` `(int: 0)` - Maximum total size of all [dynamic host volumes][] - in MiB. The default `0` means unlimited, and `-1` means variables are fully - disabled. This field accepts human-friendly string inputs such as "100 - GiB". The quota for host volumes is enforced at the time the volume is created - via [`volume create`][]. - -- `variables` `(int: 0)` - Maximum total size of all Nomad [variables][] in - MiB. The default `0` means unlimited, and `-1` means variables are fully - disabled. This field accepts human-friendly string inputs such as "100 GiB". - [Resource Quotas]: /nomad/docs/enterprise#resource-quotas [`quota`]: /nomad/docs/commands/quota [Quota HTTP API]: /nomad/api-docs/quotas [Quotas tutorial]: /nomad/tutorials/governance-and-policy/quotas [`Namespace`]: /nomad/docs/other-specifications/namespace -[variables]: /nomad/docs/concepts/variables +[`variables`]: /nomad/docs/concepts/variables [`resources`]: /nomad/docs/job-specification/resources [CPU concepts]: /nomad/docs/concepts/cpu [`device`]: /nomad/docs/job-specification/device#device-parameters -[dynamic host volumes]: /nomad/docs/other-specifications/volume/host diff --git a/website/content/docs/other-specifications/volume/capability.mdx b/website/content/docs/other-specifications/volume/capability.mdx index 92f4b7364af..c03e20142b5 100644 --- a/website/content/docs/other-specifications/volume/capability.mdx +++ b/website/content/docs/other-specifications/volume/capability.mdx @@ -1,8 +1,7 @@ --- layout: docs page_title: capability Block - Volume Specification -description: |- - Configure Container Storage Interface (CSI) and dynamic host storage volume capability in the "capability" block of the Nomad volume specification. Set single node or multiple node access and file system or block device attachment mode. +description: The "capability" block allows for validating the capability of a volume. --- # `capability` Block @@ -36,43 +35,18 @@ for each capability you intend to use in a job's [`volume`] block. ## `capability` Parameters -- `access_mode` `(string)` - Defines whether a volume should be available - concurrently. The `access_mode` and `attachment_mode` from the volume request - must exactly match one of the volume's `capability` blocks. - - - For CSI volumes the `access_mode` is required. Can be one of the following: - - - `"single-node-reader-only"` - - `"single-node-writer"` - - `"multi-node-reader-only"` - - `"multi-node-single-writer"` - - `"multi-node-multi-writer"` - - Most CSI plugins support only single-node modes. - Consult the documentation of the storage provider and CSI plugin. - - - For dynamic host volumes the `access_mode` is optional. Can be one of the following: - - - `"single-node-writer"` - - `"single-node-reader-only"` - - `"single-node-single-writer"` - - `"single-node-multi-writer"` - - In the job specification, the default is `single-node-writer` unless - `read_only = true`, which translates to `single-node-reader-only`. - -- `attachment_mode` `(string)` - The storage API used by the volume. One of - `"file-system"` or `"block-device"`. The `access_mode` and `attachment_mode` - from the volume request must exactly match one of the volume's `capability` - blocks. - - - For CSI volumes the `attachment_mode` field is required. Most storage - providers support `"file-system"`, to mount volumes using the CSI - filesystem API. Some storage providers support `"block-device"`, which - mounts the volume with the CSI block device API within the container. - - - For dynamic host volumes the `attachment_mode` field is optional and - defaults to `"file-system"`. +- `access_mode` `(string: )` - Defines whether a volume should be +available concurrently. Can be one of `"single-node-reader-only"`, +`"single-node-writer"`, `"multi-node-reader-only"`, +`"multi-node-single-writer"`, or `"multi-node-multi-writer"`. Most CSI plugins +support only single-node modes. Consult the documentation of the storage +provider and CSI plugin. + +- `attachment_mode` `(string: )` - The storage API that will be used +by the volume. Most storage providers will support `"file-system"`, to mount +volumes using the CSI filesystem API. Some storage providers will support +`"block-device"`, which will mount the volume with the CSI block device API +within the container. ## `capability` Examples diff --git a/website/content/docs/other-specifications/volume/csi.mdx b/website/content/docs/other-specifications/volume/csi.mdx deleted file mode 100644 index dd38c12fef0..00000000000 --- a/website/content/docs/other-specifications/volume/csi.mdx +++ /dev/null @@ -1,291 +0,0 @@ ---- -layout: docs -page_title: CSI Volume Specification -description: |- - Learn about the Nomad Container Storage Interface (CSI) volume - specification. Create and register CSI volumes using the `volume create` - and `volume register` commands and the - `PUT /v1/volume/csi/:volume_id/create` and - `PUT /v1/volume/csi/:volume_id` API endpoints. Define capacity, capabilities, mount - options, secrets, topology requests, and a context map of values passed - directly to the CSI plugin to validate the volume. Learn how volume - creation and registration are different. Additionally, learn how to - expand the size of a volume and update a volume definition. ---- - -# CSI Volume Specification - -This page provides reference information for the Nomad Container Storage -Interface (CSI) volume specification. Create and register CSI volumes using the -[`volume create`][] and [`volume register`][] commands and the [`PUT -/v1/volume/csi/:volume_id/create`][api_volume_create] and [`PUT -/v1/volume/csi/:volume_id`][api_volume_register] API endpoints. Define capacity, -capabilities, mount options, secrets, topology requests, and a context map of -values passed directly to the CSI plugin to validate the volume. Learn how -volume creation and registration are different. Additionally, learn how to -expand the size of a volume and update a volume definition. - -Some attributes are only supported by a specific operation, while others may -have a different meaning for each action, so read the documentation for each -attribute carefully. The section [Differences Between Create and -Register](#differences-between-create-and-register) provides a summary of the -differences. - -Provide the file as either HCL or JSON to the commands and as JSON to the API. - -## Volume Specification Parameters - -- `capability` ([Capability][capability]: <required>) - - Option for validating the capability of a volume. - -- `capacity_min` `(string: )` - Option for requesting a minimum - capacity, in bytes. The capacity of a volume may be the physical size of a - disk, or a quota, depending on the storage provider. The specific size of the - resulting volume is somewhere between `capacity_min` and `capacity_max`; the - exact behavior is up to the storage provider. If you want to specify an exact - size, you should set `capacity_min` and `capacity_max` to the same - value. Accepts human-friendly suffixes such as `"100GiB"`. This field may not - be supported by all storage providers. Increasing this value and reissuing - `volume create` or `volume register` may expand the volume, if the CSI plugin - supports it. - -- `capacity_max` `(string: )` - Option for requesting a maximum - capacity, in bytes. The capacity of a volume may be the physical size of a - disk, or a quota, depending on the storage provider. The specific size of the - resulting volume is somewhere between `capacity_min` and `capacity_max`; the - exact behavior is up to the storage provider. If you want to specify an exact - size, you should set `capacity_min` and `capacity_max` to the same - value. Accepts human-friendly suffixes such as `"100GiB"`. This field may not - be supported by all storage providers. - -- `clone_id` `(string: )` - If the storage provider supports cloning, - the external ID of the volume to clone when creating this volume. If omitted, - the volume is created from scratch. The `clone_id` cannot be set if the - `snapshot_id` field is set. Only allowed on volume creation. - -- `context` (map:nil) - An optional key-value map of - strings passed directly to the CSI plugin to validate the volume. The details - of these parameters are specific to each storage provider, so consult the - specific plugin documentation for more information. Only allowed on volume - registration. Note that, like the rest of the volume specification, this block - is declarative, and an update replaces it in its entirety, therefore all - parameters need to be specified. - -- `external_id` `(string: )` - The ID of the physical volume from the - storage provider. For example, the volume ID of an AWS EBS volume or Digital - Ocean volume. Only allowed on volume registration. - -- `id` `(string: )` - The unique ID of the volume. This is how the - [`volume.source`][csi_volume_source] field in a job specification refers to - the volume. - -- `mount_options` ([MountOptions][mount_options]: - <required>) - Options for mounting `file-system` volumes that - don't already have a pre-formatted file system. - -- `name` `(string: )` - The display name of the volume. On volume - creation, this field may be used by the external storage provider to tag the - volume. - -- `namespace` `(string: )` - The namespace of the volume. This field - overrides the namespace provided by the `-namespace` flag or `NOMAD_NAMESPACE` - environment variable. Defaults to `"default"` if unset. - -- `parameters` (map:nil) - An optional key-value map - of strings passed directly to the CSI plugin to configure the volume. The - details of these parameters are specific to each storage provider, so consult - the specific plugin documentation for more information. - -- `plugin_id` `(string: )` - The ID of the [CSI plugin][csi_plugin] - that manages this volume. - -- `secrets` (map:nil) - An optional key-value map of - strings used as credentials for publishing and unpublishing volumes. - -- `snapshot_id` `(string: )` - If the storage provider supports - snapshots, the external ID of the snapshot to restore when creating this - volume. If omitted, the volume is created from scratch. The `snapshot_id` - cannot be set if the `clone_id` field is set. Only allowed on volume creation. - -- `topology_request` ([TopologyRequest][topology_request]: nil) - - Specify locations such as region, zone, and rack where the provisioned volume - must be accessible from in the case of volume creation, or the locations where - the existing volume is accessible from in the case of volume registration. - -- `type` `(string: )` - The type of volume. Must be `"csi"` for CSI - volumes. - -## Differences Between Create and Register - -Several fields are set automatically by the plugin when `volume create` or -`volume register` commands are successful and you should not set their values if -they are not supported by the operation. - -On **volume registration** you should not set the [`snapshot_id`](#snapshot_id), -or [`clone_id`](#clone_id) fields. - -On **volume creation** you should not set the [`external_id`](#external_id) or -[`context`](#context) fields. - -## Updating a Volume Definition - -The `volume register` command allows updating a volume definition. However, -after volume registration, you are only allowed to update the following fields: - -* `capacity_min` and `capacity_max`. You may increase the volume size if the CSI - plugin supports it. Expansion may or may not be possible while the volume is - in use, again depending on the plugin. Reducing volume capacity is not - allowed per the CSI spec. -* `mount_options` block if the volume is not in use. -* `secrets` block -* `context` block. The values for this field are typically provided by the CSI - plugin and should not be updated unless recommended by the CSI plugin's - documentation. - -Additionally, you may add or remove `capability` blocks, but only if the -capability is not currently in use by a mounted volume. - -## Volume Expansion - -You may expand CSI Volumes if the CSI controller plugin and node plugin, if -required, has the `EXPAND_VOLUME` capability. - -To trigger a volume expansion, increase [`capacity_min`](#capacity_min) above -the current real capacity of the volume, as received from the [`volume -status`][] command, and re-issue either [`volume create`][] or [`volume -register`][]. - -Nomad reconciles the requested capacity by issuing expand volume requests to the -controller plugin, and if required by the controller, also to the node plugins -for each allocation that has a claim on the volume. - -## Examples - -### Volume registration - -This is an example file used for the [`volume register`][] command. - -```hcl -id = "ebs_prod_db1" -name = "database" -type = "csi" -external_id = "vol-23452345" -plugin_id = "ebs-prod" - -capability { - access_mode = "single-node-reader-only" - attachment_mode = "file-system" -} - -capability { - access_mode = "single-node-writer" - attachment_mode = "file-system" -} - -mount_options { - fs_type = "ext4" - mount_flags = ["noatime"] -} - -topology_request { - required { - topology { segments { "rack" = "R2" } } - topology { segments { "rack" = "R1", "zone" = "us-east-1a"} } - } -} - -secrets { - example_secret = "xyzzy" -} - -parameters { - skuname = "Premium_LRS" -} - -context { - endpoint = "http://192.168.1.101:9425" -} -``` - - -### Example Volume Expansion - -Either [`volume create`][] or [`volume register`][] commands can trigger -an expand to occur, after the volume has already been created or registered. - -This example shows you how to expand a volume using the `volume create` command. - -1. Create a volume configuration file called `volume.hcl` that defines ID, name, - type, plugin ID, and capacity. - - ```hcl - id = "ebs_prod_db1" - name = "database" - type = "csi" - plugin_id = "ebs-prod" - - capacity_min = "50gib" - capacity_max = "50gib" - - # ... etc ... - ``` - -1. Create the volume using the [`volume create`] command. - - ```shell-session - $ nomad volume create volume.hcl - Created external volume vol-asdf1234 with ID ebs_prod_db1 - ``` - -1. Review the volume's current capacity using the [`volume status`] command. - - ```shell-session - $ nomad volume status ebs_prod_db1 | grep Capacity - Capacity = 50 GiB - ``` - -1. Increase volume capacity in the `volume.hcl` file. - - Update the `capacity_min` and `capacity_max` fields. - - ```hcl - id = "ebs_prod_db1" - name = "database" - type = "csi" - plugin_id = "ebs-prod" - # all of the above must remain the same - - capacity_min = "100gib" # double - capacity_max = "100gib" # increased to match - - # ... etc ... - ``` - -1. Expand the volume using the [`volume create`] command. - - ```shell-session - $ nomad volume create volume.hcl - Created external volume vol-asdf1234 with ID ebs_prod_db1 - ``` - -1. Review the new capacity by running the [`volume status`] command. - - ```shell-session - $ nomad volume status ebs_prod_db1 | grep Capacity - Capacity = 100 GiB - ``` - -If you encounter errors that are not clear from the command output, -the CSI plugin allocation logs and Nomad leader server logs may be -helpful. - -[api_volume_create]: /nomad/api-docs/volumes#create-csi-volume -[api_volume_register]: /nomad/api-docs/volumes#register-volume -[capability]: /nomad/docs/other-specifications/volume/capability -[csi_plugin]: /nomad/docs/job-specification/csi_plugin -[csi_volume_source]: /nomad/docs/job-specification/volume#source -[mount_options]: /nomad/docs/other-specifications/volume/mount_options -[topology_request]: /nomad/docs/other-specifications/volume/topology_request -[`volume create`]: /nomad/docs/commands/volume/create -[`volume register`]: /nomad/docs/commands/volume/register -[`volume status`]: /nomad/docs/commands/volume/status diff --git a/website/content/docs/other-specifications/volume/host.mdx b/website/content/docs/other-specifications/volume/host.mdx deleted file mode 100644 index 2247142f95d..00000000000 --- a/website/content/docs/other-specifications/volume/host.mdx +++ /dev/null @@ -1,307 +0,0 @@ ---- -layout: docs -page_title: Dynamic Host Volume Specification -description: |- - Learn about the Nomad dynamic host volume specification. Create and - register dynamic host volumes using the `volume create` and `volume - register` commands and the `PUT /v1/volume/host/create` and - `PUT /v1/volume/host/register` API endpoints. Define capacity, - capability, constraint, node, node pool, and parameters passed directly - to the plugin to configure the volume. Learn how volume creation and - registration are different. Additionally, learn how to place a volume on - specific nodes, update a volume, and expand a volume's capacity. ---- - -# Dynamic Host Volume Specification - -This page provides reference information for the Nomad dynamic host volume -specification. Create and register dynamic host volumes using the [`volume -create`][] and [`volume register`][] commands and the [`PUT -/v1/volume/host/create`][api_volume_create] and [`PUT -/v1/volume/host/register`][api_volume_register] API endpoints. Define capacity, -capability, constraint, node, node pool, and parameters passed directly to the -plugin to configure the volume. Learn how volume creation and registration are -different. Additionally, learn how to place a volume on specific nodes, update a -volume, and expand a volume's capacity. - -Some attributes are only be supported by specific operation, while others may -have a different meaning for each action, so read the documentation for each -attribute carefully. The section [Differences Between Create and -Register](#differences-between-create-and-register) provides a summary of the -differences. - -The file may be provided as either HCL or JSON to the commands and as JSON to -the API. - -## Volume Specification Parameters - -- `capacity` `(string: )` - The size of a volume in bytes. Either the - physical size of a disk or a quota, depending on the plugin. This field must - be between the `capacity_min` and `capacity_max` values unless they are - omitted. Accepts human-friendly suffixes such as `"100GiB"`. Only supported - for volume registration. - -- `capacity_min` `(string: )` - Option for requesting a minimum - capacity, in bytes. The capacity of a volume may be the physical size of a - disk, or a quota, depending on the plugin. The specific size of the resulting - volume is somewhere between `capacity_min` and `capacity_max`; the exact - behavior is up to the plugin. If you want to specify an exact size, set - `capacity_min` and `capacity_max` to the same value. Accepts human-friendly - suffixes such as `"100GiB"`. Plugins that cannot restrict the size of volumes - may ignore this field. - -- `capacity_max` `(string: )` - Option for requesting a maximum - capacity, in bytes. The capacity of a volume may be the physical size of a - disk, or a quota, depending on the plugin. The specific size of the resulting - volume is somewhere between `capacity_min` and `capacity_max`; the exact - behavior is up to the plugin. If you want to specify an exact size, set - `capacity_min` and `capacity_max` to the same value. Accepts human-friendly - suffixes such as `"100GiB"`. Plugins that cannot restrict the size of volumes - may ignore this field. - -- `capability` ([Capability][capability]: <required>) - - Option for validating the capability of a volume. - -- `constraint` ([Constraint][constraint]: <optional>) - A - restriction on the eligible nodes where a volume can be created. Refer to the - [volume placement](#volume-placement) section for details. You can provide - multiple `constraint` blocks to add more constraints. Optional for volume - creation and ignored for volume registration. - -- `id` `(string: )` - The ID of a previously created volume to update - via `volume create` or `volume register`. You should never set this field when - initially creating or registering a volume, and you should only use the values - returned from the Nomad API for the ID. - -- `host_path` `(string)` - The path on disk where the volume exists. You should - set this only for volume registration. It is ignored for volume creation. - -- `name` `(string: )` - The name of the volume, which is used as the - [`volume.source`][volume_source] field in job specifications that claim this - volume. Host volume names must be unique per node. Names are visible to any - user with `node:read` ACL, even across namespaces, so they should not be - treated as sensitive values. - -- `namespace` `(string: )` - The namespace of the volume. This field - overrides the namespace provided by the `-namespace` flag or `NOMAD_NAMESPACE` - environment variable. Defaults to `"default"` if unset. - -- `node_id` `(string)` - A specific node where you would like the volume to be - created. Refer to the [volume placement](#volume-placement) section for - details. Optional for volume creation but required for volume registration. - -- `node_pool` `(string: )` - A specific node pool where you would like - the volume to be created. Refer to the [volume placement](#volume-placement) - section for details. Optional for volume creation or volume registration. If - you also provide `node_id`, the node must be in the provided `node_pool`. - -- `parameters` (map:nil) - An optional key-value map - of strings passed directly to the plugin to configure the volume. The details - of these parameters are specific to the plugin. - -- `plugin_id` `(string)` - The ID of the [dynamic host volume - plugin][dhv_plugin] that manages this volume. Required for volume creation. - -- `type` `(string: )` - The type of volume. Must be `"host"` for - dynamic host volumes. - -## Differences Between Create and Register - -Several fields are set automatically by Nomad or the plugin when `volume create` -or `volume register` commands, or the equivalent APIs, are successful and you -should not set their values if they are not supported by the operation. - -In **volume creation** you must set the [`plugin_id`](#plugin_id) field. The -[`capacity`](#capacity) and [`host_path`](#host_path) fields are ignored. - -In **volume registration** you must set the [`node_id`](#node_id) and -[`host_path`](#host_path) fields. The [`plugin_id`](#plugin_id) and -[`constraint`](#constraint) fields are ignored. The [`node_pool`](#node_pool), -[`capacity_max`](#capacity_max), and [`capacity_min`](#capacity_min) fields are -ignored but must be consistent if set; the `node_pool` must match the node set -by `node_id`, and the minimum capacity must be less than the `capacity_max` and -`capacity`. - -## Volume Placement - -The `volume create` command creates the volume on a single node. If `node_id` is -set, the volume is created on that node. The `node_pool` must be unset or match -that node. Otherwise, the Nomad server iterates over the available nodes and -place the volume on the first available node based on the following factors: - -* The node cannot already have a host volume with the same name. -* If `node_pool` is set, the selected node must be in that node pool. -* The node must meet any and all constraints defined by the `constraint` fields. - -## Updating a Volume Definition - -The `volume create` and `volume register` commands allow updating a volume -definition. However, after volume registration, you are only allowed to update -the following fields: - -- `plugin_id` -- `capacity_min` and `capacity_max`. You may increase - the volume size if the plugin supports it. Expansion may or may not be - possible while the volume is in use, again depending on the plugin. Reducing - volume capacity is not allowed. Only available for volume creation. -- `capacity`, but only updated for volume registration. -- `constraint` fields. Ignored after the volume is created. - -Additionally, you may add or remove `capability` blocks, but only if the -capability is not currently in use by a mounted volume. - -You cannot update the `name`, `type`, and `node_id` fields. You may only change -the `node_pool` field from empty to the node pool that matches the `node_id` -field. - -## Volume Expansion - -Dynamic host volumes may be expanded if the plugin allows. Reducing the size of -a volume is not permitted. - -To trigger a volume expansion, increase [`capacity_min`](#capacity_min) above -the current real capacity of the volume (as shown with the [`volume status`][] -command), and re-issue [`volume create`][]. - -Nomad reconciles the requested capacity by issuing a create request to the -plugin. - -## Examples - -### Volume creation - -This is an example file used for the [`volume create`][] command. - -```hcl -name = "database" -type = "host" -plugin_id = "lvm-thin-provisioner" -node_pool = "prod" - -capacity_min = "80G" -capacity_max = "100G" - -capability { - access_mode = "single-node-reader-only" - attachment_mode = "file-system" -} - -capability { - access_mode = "single-node-writer" - attachment_mode = "file-system" -} - -parameters { - skuname = "Premium_LRS" -} -``` - -### Volume registration - -This is an example file used for the [`volume register`][] command. - -```hcl -name = "database" -type = "host" -node_id = "a7b4c0ca-cc78-11ef-8b5a-cb6ea67b844c" -host_path = "/var/srv/example" -capacity = "80G" - -capability { - access_mode = "single-node-reader-only" - attachment_mode = "file-system" -} - -capability { - access_mode = "single-node-writer" - attachment_mode = "file-system" -} - -parameters { - skuname = "Premium_LRS" -} -``` - -### Example Volume Expansion - -This example shows how to expand a volume after it has been created or -registered. - -1. Create a volume configuration file called `volume.hcl` that defines name, - type, plugin ID, and capacity. - - ```hcl - name = "database" - type = "host" - plugin_id = "external-plugin" - - capacity_min = "30GiB" - capacity_max = "50GiB" - ``` - -1. Create the volume using the [`volume create`] command. - - ```shell-session - $ nomad volume create ./volume.hcl - ==> Created host volume database with ID 0c903229-311d-ba8a-f77e-45c31b83fab3 - ✓ Host volume "0c903229" ready - - 2025-01-06T16:56:09-05:00 - ID = 0c903229-311d-ba8a-f77e-45c31b83fab3 - Name = database - Namespace = default - Plugin ID = external-plugin - Node ID = cfe033a7-50de-2c46-cd18-12be7429eeb3 - Node Pool = default - Capacity = 50 GiB - State = ready - Host Path = /run/nomad/dev/alloc_mounts/0c903229-311d-ba8a-f77e-45c31b83fab3 - ``` - -1. Review the volume's current capacity using the [`volume status`] command. - - ```shell-session - $ nomad volume status -type=host 0c903229 | grep Capacity - Capacity = 50 GiB - ``` - -1. Increase volume capacity in the `volume.hcl` file. - - Update the `capacity_min` and `capacity_max` fields. - - ```hcl - id = "0c903229-311d-ba8a-f77e-45c31b83fab3" - name = "database" - type = "host" - plugin_id = "external-plugin" - - capacity_min = "100GiB" # double - capacity_max = "100GiB" # increased to match - ``` - -1. Expand the volume using the [`volume create`] command. - - The `volume create` command can trigger an expansion to occur, after the - volume has already been created or registered. - - ```shell-session - $ nomad volume create volume.hcl - Created host volume database with ID 0c903229-311d-ba8a-f77e-45c31b83fab3 - ``` - -1. Review the new capacity by running the [`volume status`] command. - - ```shell-session - $ nomad volume status -type=host 0c903229 | grep Capacity - Capacity = 100 GiB - ``` - -[api_volume_create]: /nomad/api-docs/volumes#create-dynamic-host-volume -[api_volume_register]: /nomad/api-docs/volumes#register-dynamic-host-volume -[volume_source]: /nomad/docs/job-specification/volume#source -[capability]: /nomad/docs/other-specifications/volume/capability -[constraint]: /nomad/docs/job-specification/constraint -[`volume create`]: /nomad/docs/commands/volume/create -[`volume register`]: /nomad/docs/commands/volume/register -[`volume status`]: /nomad/docs/commands/volume/status -[dhv_plugin]: /nomad/docs/concepts/plugins/storage/host-volumes diff --git a/website/content/docs/other-specifications/volume/index.mdx b/website/content/docs/other-specifications/volume/index.mdx index ee0ba73eea1..9ba9fc3cc17 100644 --- a/website/content/docs/other-specifications/volume/index.mdx +++ b/website/content/docs/other-specifications/volume/index.mdx @@ -1,32 +1,24 @@ --- layout: docs page_title: Volume Specification -description: |- - Learn about the Nomad volume specification. Create and register Container - Storage Interface (CSI) and dynamic host volumes using the `volume create` - and `volume register` commands and the equivalent API endpoints. Define - capability, mount options, topology requests, secrets, and parameters. +description: Learn about the Volume specification used to create and register volumes to Nomad. --- # Volume Specification The Nomad volume specification defines the schema for creating and registering -This page provides reference information for the Nomad volume -specification. Create and register Container Storage Interface (CSI) and dynamic -host volumes using the [`volume create`][] and [`volume register`][] commands -and the equivalent API endpoints. Define capability, mount options, topology -requests, secrets, and parameters. +volumes using the [`volume create`] and [`volume register`] commands and the +[`PUT /v1/volume/csi/:volume_id/create`][api_volume_create] and [`PUT +/v1/volume/csi/:volume_id`][api_volume_register] API endpoints. -Some attributes are only be supported by one volume type or the other, or a -specific operation, while others may have a different meaning for each action, -so read the documentation for each attribute carefully. +Some attributes are only be supported by specific operation, while others may +have a different meaning for each action, so read the documentation for each +attribute carefully. The section [Differences Between Create and +Register](#differences-between-create-and-register) provides a summary of the +differences. -* [CSI volume specification][csi_spec] -* [Dynamic host volume specification][dhv_spec] - -Provide the file as either HCL or JSON to the commands and as JSON to -the API. An example HCL configuration for a `volume create` command with a CSI -volume: +The file may be provided as either HCL or JSON to the commands and as JSON to +the API. An example HCL configuration for a `volume create` command: ```hcl id = "ebs_prod_db1" @@ -72,7 +64,255 @@ parameters { } ``` +## Volume Specification Parameters + +- `id` `(string: )` - The unique ID of the volume. This is how the + [`volume.source`][csi_volume_source] field in a job specification will refer + to the volume. + +- `namespace` `(string: )` - The namespace of the volume. This field + overrides the namespace provided by the `-namespace` flag or `NOMAD_NAMESPACE` + environment variable. Defaults to `"default"` if unset. + +- `name` `(string: )` - The display name of the volume. On **volume + creation**, this field may be used by the external storage provider to tag + the volume. + +- `type` `(string: )` - The type of volume. Currently only `"csi"` + is supported. + +- `external_id` `(string: )` - The ID of the physical volume from + the storage provider. For example, the volume ID of an AWS EBS volume or + Digital Ocean volume. Only allowed on **volume registration**. + +- `plugin_id` `(string: )` - The ID of the [CSI plugin][csi_plugin] + that manages this volume. + +- `snapshot_id` `(string: )` - If the storage provider supports + snapshots, the external ID of the snapshot to restore when creating this + volume. If omitted, the volume will be created from scratch. The + `snapshot_id` cannot be set if the `clone_id` field is set. Only allowed on + **volume creation**. + +- `clone_id` `(string: )` - If the storage provider supports cloning, + the external ID of the volume to clone when creating this volume. If omitted, + the volume will be created from scratch. The `clone_id` cannot be set if the + `snapshot_id` field is set. Only allowed on **volume creation**. + +- `capacity_min` `(string: )` - Option for requesting a minimum + capacity, in bytes. The capacity of a volume may be the physical size of a + disk, or a quota, depending on the storage provider. The specific size of the + resulting volume will be somewhere between `capacity_min` and `capacity_max`; + the exact behavior is up to the storage provider. If you want to specify an + exact size, you should set `capacity_min` and `capacity_max` to the same + value. Accepts human-friendly suffixes such as `"100GiB"`. This field may not + be supported by all storage providers. Increasing this value and re-issuing + `volume create` or `volume register` may expand the volume, if the CSI plugin + supports it. + +- `capacity_max` `(string: )` - Option for requesting a maximum + capacity, in bytes. The capacity of a volume may be the physical size of a + disk, or a quota, depending on the storage provider. The specific size of the + resulting volume will be somewhere between `capacity_min` and `capacity_max`; + the exact behavior is up to the storage provider. If you want to specify an + exact size, you should set `capacity_min` and `capacity_max` to the same + value. Accepts human-friendly suffixes such as `"100GiB"`. This field may not + be supported by all storage providers. + +- `capability` ([Capability][capability]: <required>) - + Option for validating the capability of a volume. + +- `mount_options` ([MountOptions][mount_options]: <required>) - + Options for mounting `file-system` volumes that don't already have a + pre-formatted file system. + +- `topology_request` ([TopologyRequest][topology_request]: nil) - + Specify locations (region, zone, rack, etc.) where the provisioned volume + must be accessible from in the case of **volume creation** or the locations + where the existing volume is accessible from in the case of **volume + registration**. + +- `secrets` (map:nil) - An optional key-value map + of strings used as credentials for publishing and unpublishing volumes. + +- `parameters` (map:nil) - An optional key-value + map of strings passed directly to the CSI plugin to configure the volume. The + details of these parameters are specific to each storage provider, so consult + the specific plugin documentation for more information. + +- `context` (map:nil) - An optional key-value map + of strings passed directly to the CSI plugin to validate the volume. The + details of these parameters are specific to each storage provider, so consult + the specific plugin documentation for more information. Only allowed on + **volume registration**. Note that, like the rest of the volume specification, + this block is declarative, and an update replaces it in its entirety, therefore + all parameters need to be specified. + +## Differences Between Create and Register + +Several fields are set automatically by the plugin when `volume create` or +`volume register` commands are successful and you should not set their values +if they are not supported by the operation. + +You should not set the [`snapshot_id`](#snapshot_id), or [`clone_id`](#clone_id) +fields on **volume registration**. + +And you should not set the [`external_id`](#external_id) or +[`context`](#context) fields on **volume creation**. + +## Updating a Volume Definition + +The `volume register` command allows updating a volume definition. But not all +fields can be updated after the volume is registered: + +* The `capacity_min` and `capacity_max` fields can be updated, and may increase + the volume size if the CSI plugin supports it. Expansion may or may not be + possible while the volume is in use, again depending on the plugin. + Reducing volume capacity is not allowed per the CSI spec. +* The `capability` blocks can be added or removed, but only if the capability is + not currently in use by a mounted volume. +* The `mount_options` block can be updated if the volume is not in use. +* The `secrets` block can be updated. +* The `context` block can be updated. The values for this field are typically + provided by the CSI plugin, and should not be updated unless recommended by + the CSI plugin's documentation. + +## Volume Expansion + +CSI Volumes may be expanded (increased in size) if the CSI controller plugin +(and node plugin, if required) has the `EXPAND_VOLUME` capability. + +To trigger a volume expansion, increase [`capacity_min`](#capacity_min) +above the current real capacity of the volume (as seen with the +[`volume status`][] command), and re-issue either [`volume create`][] +or [`volume register`][]. + +Nomad will reconcile the requested capacity by issuing expand volume requests +to the controller plugin, and if required by the controller, also to the +node plugins for each allocation that has a claim on the volume. + +## Examples + +### Volume registration + +This is an example file used for the [`volume register`] command. + +```hcl +id = "ebs_prod_db1" +name = "database" +type = "csi" +external_id = "vol-23452345" +plugin_id = "ebs-prod" + +capability { + access_mode = "single-node-reader-only" + attachment_mode = "file-system" +} + +capability { + access_mode = "single-node-writer" + attachment_mode = "file-system" +} + +mount_options { + fs_type = "ext4" + mount_flags = ["noatime"] +} + +topology_request { + required { + topology { segments { "rack" = "R2" } } + topology { segments { "rack" = "R1", "zone" = "us-east-1a"} } + } +} + +secrets { + example_secret = "xyzzy" +} + +parameters { + skuname = "Premium_LRS" +} + +context { + endpoint = "http://192.168.1.101:9425" +} +``` + + +### Example Volume Expansion + +Either [`volume create`][] or [`volume register`][] commands can trigger +an expand to occur, after the volume has already been created or registered. + +Example using `volume create`, with a `volume.hcl` file: + +```hcl +id = "ebs_prod_db1" +name = "database" +type = "csi" +plugin_id = "ebs-prod" + +capacity_min = "50gib" +capacity_max = "50gib" + +# ... etc ... +``` + +Create the volume: + +```shell-session +$ nomad volume create volume.hcl +Created external volume vol-asdf1234 with ID ebs_prod_db1 +``` + +See its current capacity with [`volume status`][]: + +```shell-session +$ nomad volume status ebs_prod_db1 | grep Capacity +Capacity = 50 GiB +``` + +Update `volume.hcl`: + +```hcl +id = "ebs_prod_db1" +name = "database" +type = "csi" +plugin_id = "ebs-prod" +# all of the above must remain the same + +capacity_min = "100gib" # double +capacity_max = "100gib" # increased to match + +# ... etc ... +``` + +Run `volume create` again: + +```shell-session +$ nomad volume create volume.hcl +Created external volume vol-asdf1234 with ID ebs_prod_db1 +``` + +Check the volume capacity again: + +```shell-session +$ nomad volume status ebs_prod_db1 | grep Capacity +Capacity = 100 GiB +``` + +If you encounter errors that are not clear from the command output, +the CSI plugin allocation logs and/or Nomad leader server logs may be +helpful. + +[api_volume_create]: /nomad/api-docs/volumes#create-volume +[api_volume_register]: /nomad/api-docs/volumes#register-volume +[capability]: /nomad/docs/other-specifications/volume/capability +[csi_plugin]: /nomad/docs/job-specification/csi_plugin +[csi_volume_source]: /nomad/docs/job-specification/volume#source +[mount_options]: /nomad/docs/other-specifications/volume/mount_options +[topology_request]: /nomad/docs/other-specifications/volume/topology_request [`volume create`]: /nomad/docs/commands/volume/create [`volume register`]: /nomad/docs/commands/volume/register -[csi_spec]: /nomad/docs/other-specifications/volume/csi -[dhv_spec]: /nomad/docs/other-specifications/volume/host +[`volume status`]: /nomad/docs/commands/volume/status diff --git a/website/content/docs/other-specifications/volume/mount_options.mdx b/website/content/docs/other-specifications/volume/mount_options.mdx index c963df5be94..6bee03d0136 100644 --- a/website/content/docs/other-specifications/volume/mount_options.mdx +++ b/website/content/docs/other-specifications/volume/mount_options.mdx @@ -1,8 +1,7 @@ --- layout: docs page_title: mount_options Block - Volume Specification -description: |- - Configure Container Storage Interface (CSI) storage volume file system type and mount flags in the "mount_options" block of the Nomad volume specification. +description: The "mount_options" block allows for configuring how a volume is mounted. --- # `mount_options` Block @@ -13,9 +12,8 @@ description: |- ]} /> -Options for mounting `file-system` CSI volumes that don't already have a -pre-formatted file system. The `mount_options` block is not supported for -dynamic host volumes. +Options for mounting `file-system` volumes that don't already have a +pre-formatted file system. ```hcl id = "ebs_prod_db1" diff --git a/website/content/docs/other-specifications/volume/topology_request.mdx b/website/content/docs/other-specifications/volume/topology_request.mdx index 27cbfda9fcd..0c1c6a8ae93 100644 --- a/website/content/docs/other-specifications/volume/topology_request.mdx +++ b/website/content/docs/other-specifications/volume/topology_request.mdx @@ -1,8 +1,7 @@ --- layout: docs page_title: topology_request Block - Volume Specification -description: |- - Configure Container Storage Interface (CSI) storage volume topology in the "topology_request" block of the Nomad volume specification. Specify region, zone, and rack so that Nomad can access a provisioned CSI volume. Review volume creation examples with preferred and required topologies. +description: The "topology_request" block allows specifying locations where the provisioned volume must be accessible from. --- # `topology_request` Block @@ -13,10 +12,8 @@ description: |- ]} /> -Specify locations such as region, zone, and rack, where a provisioned CSI volume must -be accessible, or where an existing volume is accessible. The `topology_request` -block is not supported for dynamic host volumes. - +Specify locations (region, zone, rack, etc.) where the provisioned volume must +be accessible from or from where an existing volume is accessible from. ```hcl id = "ebs_prod_db1" diff --git a/website/content/docs/upgrade/upgrade-specific.mdx b/website/content/docs/upgrade/upgrade-specific.mdx index 3c4c7de9d69..7a78c0bbca5 100644 --- a/website/content/docs/upgrade/upgrade-specific.mdx +++ b/website/content/docs/upgrade/upgrade-specific.mdx @@ -12,46 +12,6 @@ upgrade. However, specific versions of Nomad may have more details provided for their upgrades as a result of new features or changed behavior. This page is used to document those details separately from the standard upgrade flow. -## Nomad 1.10.0 - -#### Quota specification variable_limits deprecated - -In Nomad 1.10.0, the quota specification's `variable_limits` field is -deprecated. It is replaced by a new `storage` block with a `variables` field, -under the `region_limit` block. Existing quotas will be automatically migrated -during server upgrade. The `variables_limit` field will be removed from the -quota specification in Nomad 1.12.0. - -#### Go SDK API change for quota limits - -In Nomad 1.10.0, the Go API for quotas has a breaking change. The -`QuotaSpec.RegionLimit` field is now of type `QuotaResources` instead of -`Resources`. The `QuotaSpec.VariablesLimit` field is deprecated in lieu of -`QuotaSpec.RegionLimit.Storage.Variables` and will be removed in Nomad 1.12.0. - -#### Remote task driver support removed - -All support for remote task driver capabilities has been removed in Nomad 1.10.0. -Drivers with the `RemoteTasks` capability will no longer be detached in the event -the allocation is lost, nor will remote tasks be detached when a node is drained. -Workloads running as remote tasks should be migrated prior to upgrading. - -#### Loading Binaries from `plugin_dir` Without Configuration - -Plugins stored within the [`plugin_dir`](/nomad/docs/configuration#plugin_dir) -will now only be loaded when they have a corresponding -[`plugin`](/nomad/docs/configuration/plugin) block in the agent configuration -file. - -#### Affinity and spread updates are non-destructive - -In Nomad 1.10.0, a scheduler bug was fixed so that updates to `affinity` and -`spread` blocks are no longer destructive. After a job update that changes only -these blocks, existing allocations remain running with their job version -incremented. If you were relying on the previous behavior to redistribute -workloads, you can force a destructive update by changing fields that require -one, such as the `meta` block. - ## Nomad 1.9.5 #### CNI plugins diff --git a/website/data/docs-nav-data.json b/website/data/docs-nav-data.json index 18df8fbe90e..51529ae7df4 100644 --- a/website/data/docs-nav-data.json +++ b/website/data/docs-nav-data.json @@ -185,20 +185,7 @@ }, { "title": "Storage", - "routes": [ - { - "title": "Overview", - "path": "concepts/plugins/storage" - }, - { - "title": "CSI", - "path": "concepts/plugins/storage/csi" - }, - { - "title": "Host Volumes", - "path": "concepts/plugins/storage/host-volumes" - } - ] + "path": "concepts/plugins/csi" }, { "title": "Networking", @@ -1978,14 +1965,6 @@ "title": "Overview", "path": "other-specifications/volume" }, - { - "title": "CSI", - "path": "other-specifications/volume/csi" - }, - { - "title": "Dynamic Host Volumes", - "path": "other-specifications/volume/host" - }, { "title": "capability", "path": "other-specifications/volume/capability" diff --git a/website/package-lock.json b/website/package-lock.json index ae0ce60a2f0..b6d12bfcad5 100644 --- a/website/package-lock.json +++ b/website/package-lock.json @@ -9638,7 +9638,6 @@ "resolved": "https://registry.npmjs.org/prettier/-/prettier-3.5.1.tgz", "integrity": "sha512-hPpFQvHwL3Qv5AdRvBFMhnKo4tYxp0ReXiPn2bxkiohEX6mBeBwEpBSQTkD458RaaDKQMYSp4hX4UtfUTA5wDw==", "dev": true, - "license": "MIT", "bin": { "prettier": "bin/prettier.cjs" }, diff --git a/website/public/img/nomad-ui-block.png b/website/public/img/nomad-ui-block.png deleted file mode 100644 index f69a21693995cbcc5927b47e58bc0208251575ff..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 18682 zcmdqJg@-5^~O(yerNcXxM7OPA8pdFbvEP`bMgNavyB-FTmSpZmP; z_ZNKr_JPgWd-Ym#%`wJYhRe%-M0tt#5()|mMdFjFA`}$NG;kk<_yYJ@Q?B9z1qH2R zAtE9#AtFK|?`UUgVQm5h^)AjPwnIu-;mtrtvO%%FD4bZ3NKimX8=S?(N6|HsWMiAr zLW>+%kM<&tZ-Lw9Hi)!EO%3lasNsZ!l(Y^5&}k}E38m$qKF1QgMeCtnNOKmq_t{2fY?#3Bkk7^{jioa$bUdJ*!vWQOqSHmC zsQK1OL+h8WMwW`^0c$|J(Xgi$G(m#k=6E%~^sr^R!xN^x)*T!)lwq*0rx+>F->N3+ z5~eaTP_)23A{2C}1r!``2Mv7ifDaTDZ2V^^1mHIY@Da_1`THr%bT;hY_fS#KFA6D% zNJs#`m5dxsOl+OZ?VNo+PQ(CN%~&X_JFCk`a~s*&Fd7)!8JaM<+t@!Rf#P-N25xOk zoDE3aZLDpbxZU~4{=C5r+&_QKL`L%G6=y3xGIbew5)nH`6A}(aW=3W*{+A>qB)pEs zrre66;{Qnw{KZFR?(A&O&BWy9=Ems8#%Skg#>DdB!v`j2Rwh zOe~DdO#kE!B;|enmRsJ!-Naf$)WQbn9-s_osaoBK_ zqGG~=c`c%v%`GCqccXSk0$z73ZO6;_0}iM70!%GDb!@8{5Z}84wwfv!W%L&qB+&o9 zDv^3Gn<fwMGsdVMz>K&GkPo$}2m)`gg)u{}{M;B|dOJ zHvXIPITwXrEdS0-5QZNkyXf*YNo2ymN{xjQV;}gRzmcv#`R6Wt^rx5oS4{+;D+l{O z`Tzfa%+(r@bxN2AEv@ptL@;an(7EttlA=FaLm!#3C zzLqM8!lh6xQ(vEO0r5XQpgEdOG&nl#yLz0R7&K1dFVlu+kOWXLsfx1J+A=b|_oiOD z9#F&~A!+V1^!raa{&1Om(4V6CydLBCXYD?HA(*F>`8qY)t`L_8ABswt92ENH)1|dW zzF>eiKQa%9f3B`@Nxeq3`tmnzF?<1dFd999FmF#RSzjvm)&8`v;)-YgY+rzjM+nQ`zTyMg@L}i+0tx>_cwx4hwelPmJi=;_U{7ocleOQK(8?jkK3U`>0QW6Y`8I#!pQ)!7 zr$%l6tXH{!&ctFiF&4#X67}n_o0^h zEKS+P*lmfKe(C;aPKtG`O!&8d^Un@RP@dD~HsB5Yb%d76!9tx@f$XO2?&TBYrZ7@lJJMcLvJjS~%AGR4WuAa{OiF7&` zt2X))8YSX+*~Ooho5&HGA@SY`{h=OT2rxC zA>{L{-TL)ymb)L7>Sew4{6qwuW^L*#T-G`6c8~LdC_)}ppKj;MPPE&Je5rVLE7gEI z@sO8bFfgJGXJ0ccr%UK^ZnF4$Z(g7g(P#NwrqOFRei}*t@O^Kd8l99uvzA#bPdplL zqC$sf|1FPtyXo1Mok`DUl-s3&>F)jAiTs57n{$aA;gEcz0o3?+3S^N^@qic)t=v5qkeBjudAb^hr6p`6c;7q(M+AILdhgr#THkaLU3#NJvc=#5u*RK z+13kgM<$)C%6K?g2Jd|llg!um4r`bDl^sLZr$U^cj+Q*O&PNBh(p{&D#?S7K>B<^q zGq@${EQL5D3OC^%o_>FiDZ5?qxzZ`t>j;oeWfp2{Q?H=+IRkT57fIBr&} znEkdYKct^$D>^(dS-cEATutWN-L5jrZk#eMj;4PF2**=|6Miw;>k4#9i8QeeYG|FcNp0Bt0#_M_UiFhCaORN2Q1DSH79W-HJ(pt&SV+nNJrcvzLM$m{Z8{%j}9uVfWZB7?gw z33~>W=w*p=aYH^xrt`6y(lj;18=pVG&RfeN(l= zLqpelLZx^idAm}Agy>n9{$ONM(5Y6{hFe|Y`c-nQ_XA|sWGq|A@%rSo4%}?MZdrqP%NIXx?LYizh3JKvi#5=RPC@yMOo|T(Dh6$l*mp)6vD)mSmCUG95hZ80-x0?g zMgQm#K2>s)?2GAj@mrwU;#{@sqvlW|BaK7?Ro$N8!ZyPqMS!P~R>e~30?3q7JSQWv zq(h_GkHUghZK_;Ww=tW5@V&HtPJ5G0;gZ5I=9AZ9GxicLzq{Oyhy8Y$wr-W;qtg9^ zGCh;TIRc*QaTa&^&_S&6(7?le>rFhflDNdxHa-TYhht3m(n^b5MVan*`fUkHo1~%m zlYE?@J^B7|WMrIS9drsqi?)GbkC z`m5ZxhaFh?0&Yc?o&L}>ryzqK2BGNNlkINArxQHtNE{|t^r@M_>5-plHHmf2kwMw@ zg_}8Wc4XS{yumK}GrxfPoeTe1>}%!wHQ&w|l`-AJ_3Z41$-?PZea2b{+dnzbVx^qB zeDD!0kb9C<Kn54F3$9!K9kZ=Sivh8(&R|E!}Cyk0b2Idhi;@oMJBtU8xR>)#z4pY-Qfn_93ZQc@lm7gsBoRDSKr*q5GGx%mDy;OO0BlnzlGgDYi zhSU#N+I?D<6dswWIt{IHDnQr;5((neOuF|qOYJ_3#z}GHA8FO%FfHr81bI3dbO#Gn z4yQ2c>?wjehfs*Op<(&sR(-W{%Z4K<^YNf^;KOv(<7E7YyQ_?z%p2l>jmvTtGvP() z&G`3^zYM|-+kNh)6n#`oPB*d=O`IdEr7xXMiu*l$0sDjwP8aK}gWX|UA7-L1av~mw z@w__HIopT~kTbhFURCS9g^e7XDc6E$d`A_9$1aJ@pji~W^h$>)cNUH}&mA9Fxm5Ny zEYO&OXvF+^^lSS`ExjyYs?#co3C1^o)o%DAmBLuA!ypZ}*X-MxdnjtU@6-8xU4I|aoH^+N+lP@6+hM}H!bJ|&(!%Q8?&{-GBZusrrAN3yNT4VX! z?yF48W$!FEjCU6cL*IK=1~{9qigUY~moBLXt!VE{D{N(@lfRH66A+kJ-e%YM)(}Q; zFs??D{#r4`PX(hRT}SZ^6D`wJW$C7D)Pp4JdtW(SqA9t1fjyENsxL|8S+Axou6I`H z)bcwo2IHG-q&!Tv8zd74;)jP-)pw5$2yTqSwjMkKE1tkz58!-p6nI-&syun$)h7Ns z<$IpGg!{?%xbH!s-}21rAqx+%4E?vw;#>m2_EOn++K+Gdd*ooQN(n?&iR7{ecXLr7 z^G%%p;XOKkS@Cfj&U;WK79Fi!8zdRy`?-rkBR>JR^Ct~!r>`Xh_4ecbB#xX*O-{32 zmH39p{9TBTkB<|r9&Ec)#WgB~+VX2m2lSog#7Skj<9E#V9>;2Sms^O9}T0Qr5_}$fxj=_qCb#~bksFOaNtkUrAD(AKYYQOK^S2Y?B zU639w)=m96kX%yC)$t50mbwvr%k7xl@k(31)huJPEj?Hr*jvEeXU&(D;uP9i`lpQA zji8auJz_oFmXegyKW>H&;v4#{l{_)C2bo-|c^^)zh#CsW(^cM{ogWXRp{zq%8RoT} zhCv*^$5~=j7V{u$>fuV8+C-roH8`oI3m5$8@1ahmT%x>kAl_s{U~>42&9@f2n6v&k zb~2%GaQmf7{?B>{hTbnMWFp(*Ci{7-7n-|q$+_+ z#0FjH{B&tpN zz#LDf#^{CJ784#N7V!q=GtVcQkZh`p2K>>ayWVE;SH((q65yZ& zOtkwT_*mb!$rLDj=XbIv7xH$GP5?ldW*{EKa4Ao1=0&3|$<|=PEXEYK@Bp|u^#DTcxFvRIbqaGn?*tyVHEfbmdq=g z5n5gb*Oh`~G>X{FMkw}YY-8_l&S(uX1e==r<9&U+xD%yfN!l|x6W=KaH@Vy;=c)QB zztDd^*_PuLEZ`Ws=Lbm}1}EGgVwbVIrzT>?l@2ggWAHhhguCf?rr z^{TT(e|FxZr|3K?{@e!s@Vz&C7TJpV5koLd+70-am@?O!G1)$?whdbHaR`{VBzY%8 z_>aTxD908*GWot0iaGmUQ^gSS$j-bQrllWDHQ$jzY+pdy>x6ru?!Ims2*rY>>Yu+r zq*|d(c^!hytWjam^I7wK&jPCApvN%@H-+ z>tBPWwLy;INTBx7Og`#h9x=$?G`xTrDQ~~TL>}oAwWVIc82{^;TIi=qXX?&{BkQEp zr6xjRW9I&zm+M_zWlqc)e_TK^X!?{;Foap*weNIGV&7bF0kQAe_)F4)be|R(nX)^t zC+M{MUnvt?J;`U5^AnotDYPnz47wo2EPQdU^cM5DZp*X|m<1h&%L9)g$oIM#W@4A! z9BLlbGWii*m%P=`jtc{@Wn8N<&25ASYmK^sF0sBaMO|`~p z6oZ)84VX5P`Af3$cCKJwSU+p^(aF-mJgW3Gekon=%68rD*Sd2ErNK0k+87Z!r%^;7C)wk<=f0CZYBzyyFdaA?y^A zZ)$17+ZbDZ1fsp4VdSVlxb!w%E%P?`vOKTb<|$()(q?bd3ELlq%SL!#9S2=Ow(83x z2J~pDHoyW5W$cM;bp?eLLJuF#i?J_!CI9#ZJ(v3$Hsynydo-rf>g3!)WnG)^CWq06 z{w^wAFv2Eiq|5n_&lUl9Q0n{E;Lt(bV z&b@>;XH&6=VL)k74U z;vjcX|3!r0#4279XOb8`=cD^L%A(VfR;9u(3kdArJ=viGLMUfI+h>!AM*UF^=u;Jou>$F|i{O!t$CsWHZi?kWB%fR0w4Kq% zsHNp!(ZL;au1LO(Y;L3X4qx@_i0hQ;A<6`ERW*>;J^&zYzsk13o7~S{&y5-Re@2rQ zZfy@5xvs{4TJQk+h_vF2jQFDOGo)e_iYZlKprj(N+I=Gw3U+X=DvM_PUj9a9pkB35 zSQCKSCO?07VwIh6lJE*Ve!eAVrBMCc6d!@M#PO))6ox^=`L@wG6-r3k^v)BBSr?v` zVYbL%Xoy!d^ZxErlRex>2kVVdu^7b}O{oAJ(|w(Rhb#8UY4Url;lUuM9dHx2|9M`@qvS1yI}8QWtJ@_@}?6gNt}P3@u&KSNqAh7lC;|_y6*LG36=VG z>JjGRwRdKEw-c6zy;VRV4Q~$8vZ$iZK2_?!QRqjsl8){Ds6%q}dMJgdk~*7AU;|V} zsJbmkjGhVM8jrT$B<2-;xLmaJL=y43%<_Be^DR;jW&k#&1AAejXVy^g4K-8)>a{{? zpWN~hlD253I5RQ^SLxz6H17|k)U3S*jyddIV-i78qcswNW6=rkJ|TW{uW{Kyjymc4 zT;zroi0m5bExGg&TPWJ~hQ34rNfnwU7jO7S*M^m_{W$JDyZZ^&yO~P8N;O?(aF3T2 z+`GLt@0*CrLJmxv6S3aGgdknLU>93&h;46JoM$c`5hL^ZSi2+QPz!a*`AazdWM7f% zz0z<`4UW5Bj_XfG^L&`?PrhF=-$Ycxb+^Gn!?^BFtmcV^W8A)#n@jy+jx()MV>)h; z`53g{Xgr9a^U98Nq1J*9Kn``HZ1fudN~~$G4o^~;sd$*={&jtddGYdXn1A-bOMD); zBW1m2>&o-a>N|j`lH3`|fSlG4Q8&M~eVoh@`N((kI)QsVD`p=d#|G^v3RVwyadg1v zkFDB+14ZeY&wz_oI({!aCgw*-Ju|D^%CH3R1x5{S8yTU&b!!l;7FOZ1hABQ%WXIl}cwXyORi} zw?LzzohbF2m5}~i7qLc5?Y%r|K{-@lyaEW(BC0ntQ&v7bNiQ(3B$Mg$ePUVW$LV## zUV^?Nv@3^=B@1oBZ&@aX+(XLShAG*2w7m+TYAE(i?O)-dxQdcX@9XHCg^w^z+~WWc2vmiLt&5zmhe-H5WuhefJjg_5x}$}MA1ED z{?sX3!7$^2|nw7H%UwPf1gEEe{U>Nu{K&oVrM^9^tmxNTLN`iMV;{v>ZT;-x=&7EYOCy z{1YeP^~igSI{b>hyF#`s_J@Lg==yxe%PlD@A2ZZVN8xHAn=xLgJ!agpW#YkAL7}kp zMq&Oe=-To|eMvbYp`R6`^sAYombszX;{KMlc-djr$x6nX*KCQxoY>dVk|m}4gGkUyC(q(q9-0wYv8GfWG=#rcuLPwq`K8v(HaOl`o|8d1>rMk^M~WyXgdrHg+uhL*C>Pbrj<2 z`-F^g`M!P)?34}-e)<@hMjvu#^Lz!M!zt!{s@T5OMtMCNMgjvUbgHEVlxPtrj!bE{6Pwf1 zR;~1ePLTn)b0HtIi%`)b%7Xq8yWj$T?vay)qQW!c;FHC@{@N+jhmh`#!t~yb?BjF_ zsey-yeGHWPpQP^FN5t#nNHan(uO+epTK{ZIVhzDJP!2EphwF*b)Jez^bnAT) zz0V29-}AxAcy>5ne4FE=@uGg$0BmT7p6M@|YZ&;{hnfW*zc{g#nd|p~;7l9#q+&4k~ z?#6O0-vcJsne3O8pPeps2Bg5T-2X?>^F$wuAr_bsI3P#uvhF!FRwyDPPp=AT?4Rhq zHdxK`SRkx+%}O;O$h`kGPg8<|Wc!a?+nof}o2he>bCyb9grZG;aA3I5o5q`IJGkIu z_}Rt%YyfzFx#Pu8iNB@nHBxBu++Q<28{^z9uiZ@dt;|r2Y9Mv(zjasaIkoieq!bS6 zgp&VbIRA`&*eP6No^BK|4BWrP^ffv_nb$+A=KYs4FBnBFs6evUO9E=T zS|GjL-&J^4IX02xl?Sd#1)-jY^zW=dArJpY?vq zA7}q01#Va}1%o5@LeYqu;qM0%sBOVc$>Dmh9mT=Beao#LN{o8#1f*FRZ7%!CKQsBX z>_xPKZ?9SLw&l`pgwUl8%}1ZwqF;<<@at>=>a4(zy$PjM8voGGU0xAx1yHss!1Qms;+j(YWh0)=L>LMB>}&!7 zzqhE`c($*OM)QH2$MN#5Y-upo^^Z6|3Mp*A4AkNM&nVxv05v|IQ_q%0x2&i)0&AGo z>uJSDqux1$SDuj^JlsZ`#%fYK@pB|sOkjAK$IYa82w~qQe522KVsk*wd1=082k^fP z<2mhYl|&zJt+K4=t93HHjy@EtSEc94WywA&qI-O6@N zO!jH05GNji=-y zAGP(1T{c#`74M8!(33evYLpzaQt^2bTlQ)JS{t`Y5zuUHky&pBH8G6Fr|V@@XDE8z3ocB(V-4qHkKryg!cd4ZkCVO3mTR%o!0G9?Q&!UFIQLL$GsmV)Cv(~yYgcHMHo|t_Pd9aAS9{pVATiwit zHjeY@D4+&{59Vt~6-i~kv0D|Ccdh}pfd1yB2Mv6&JIQ?vcvmGpaiOnpbITV&L_H7d zfZT)=c49GEVA1f>nBQ?L3>yTn!V`x=I)Iiw1`(WcJ(yEK)(sm4*p5L!;dJ?0UCKpc@pzT7ts zwrss8p_wY5opL-E*h^wAb1|3SW0d+p%*KqvF@Ki&0H7VA%Bw2Tl@Eq8z;f6wmBsko zKpOmG_;2SVYmVQIbV#1s>(KB zyn35mL$h8QljNd7Om|umlVm16UR>-qCpro^JHPCO%b- zc&&Gb_#ZAdAiQ&^h)J@aaiJ-7>HI>-bKwj0g7epEO@!9ly;5#jKKI{TcLL^Y3oA8d za)b{fJjZZv7&Mh@ZLLzQt?Nt#ma5ov zJzBEJryFjP#pklmmIXG8O#YQ-mr{P!WK$nrlc6_sgf<_!?AQEdzj3jgctkjmJM8>R-Dm$y&nQfH;^HT3`<#2F&A7hu4;x1SC7! z`54BJtaq|KqcZ_*pN^)uTGj0IDnVpu&qA9^*CXFQNp~&jen`e3$%SC%OO_rLU1UdA zmi{y%TqEbtOg1%CBCfn8G21~v&V$E%;>eY&Z|$Y`FBc7Vi*(x}`SQ4UvM7x<#0yb8VN z7YwP(-qbT`50E$eQqan(bvI}*sC=E%VuHdZPCVUH|4k1X_Jv*hjkw$o zQNK0Ai2@mXW21k?L&|hgD*&8Lhq=LvS=3-av$D|J2)Urb*Dt@0Z!4cK=U00Gt1*{q()=Bwl$Wwr2sW+buC@P8-P$pWK_(A5L2`rF?N z-aCK&qYaupvIExIHx!&?py`p-bNiGJUpeiLVhLdgZOKnaL+yBAymNyPdWr!(c;R>< zZ$Qab4Dm#u65t;_-Iu8BrPBC)nlIPId13$SiTeA`@=u?Lt<>AJ+}E0C)>_RXr1wR zZI^v`OZ@0E9j zD@}SESTI2f4|qWu9NPgH;9o0MKEZXSAiVyx{LiE}p!7!Q9HON^;kF4dLN zD*pD~cB^*CZErS?B}*>0YO~$(t8I++d`TKcI(fcp#^)hgZiyzwt=jlV96GYWL@m*2 z&J|TvnSq$kooCBAKKQ%iHI=prXC28nx}PeCg~z~y2~siz)^x{yW73Ie4PQHFbqd+7 z)#X@?y(LnF1aRop%haHR408hE6C?ZFEv{{iidWZQi`5#E9g}`!4~JZ$^%BG7R;7|N z1skrm`_z#HPvacDN|jg7mrNQfar}DOg+Od><@WUWPzQb+I0;9e5XxRd+Q?FjR_duQ z7C#o~04+oW9xhi~@8s{LSwK<4Y^-b4OoH{ufG-<^u8K5)uW}4`D6QHNBhWSwk9wO^ zz7I0y@fs6sN5Z5+Oup|i?gy|yWR@d-cM0`xtDqMswMosu_VksH=#74p>A&oP2njYp z>94NIr43jXsbqSUBRFybStmJ$jH@N5aDt7W(DZTFO-5L?EvBhwBlSPru8#Nuh#PAB z&kZQi&7gCUhxZLU>ery(R%s#ydz|r=q5zs%XNB1(u2c=wX&C%=aNZ^`zR7HfH zcDW{_A~V7jRtm|ePqiPdToN-9x&IF6TT~GwYK(7euDEox=jndQ98lUhrzFY*jR>A^JCXLr*{w4J%s7TN3(=N|@tqwrI7wnffZ-=N~^1Xjqsp9GR%HQT5xhUn6CH z<7hNq|0I8ggp-Is4qq(f=k@*u;m?2~RjhMAJc0!o(@W~_vi?lZ3IIU1nD)H+*SpXd z_K{Fxby}&Ze`2lvfpglJ*+s6uKGXagEK;mt1NgOpnFaH|Rs%qmjZ}gPhhI^Y{z?0M z)5!opP+J2-s{buZ27tBf_yx~+^qpsDYQOK#ZCVy_N zs4Gm>6!8_59DX<%wOnSh&7#|nMfMUe&GZ`yk|9hWP_E|s?oML+CoUiMOsik+Jvhdh z@Wg~yym{1wti*s@4CU8d1P- zvC&5Tp~?Okb2MPMyr3dA{-Iv}ObuM<%GqL9Rou-_B&?Y4bP5#1wr$iqXhjLgKau1j z+DnVSvB>-r-JHn}9kEw9I# z7KPFoq=ekgNT(vui2J|}YbB%u8lz*}5BrIvR;iPall3JL;JvutpRaX^oGl+Tp*pl2DhU9_xX}1)%VMIyhI)6>Et_8Jv8Ct3Hx4Uusg#N_hpqvxxo9H35mEuv z%VUAx*exATD?r$7?JEWTvasL$R6TnFlh55rQh@GGmFo%F698Wx^00(BpW+7L(}opZj2^rOW)z|fv9J6ZxT{>!3>bH$xZII^ST|VZB%{k|aR^9!^OD0{9x@VW&KQ0AL{wz&;Y|-SE#REmRkP zni@|QlsoQCS@hAS04PMg#{Fb1=l*uzPO-t3f$q3cDtV~XL~?Mtbk_Jhx%}(&Qqy_9 zc(i46xoZE@@f!PWD$8b`UPoqE!UcGZYgu$=sPpYcDNC&m zN*Ujq?*Rrw6P-#rQNE<@w7CxkyU%1;ZOEjL9p>H$ormS>qX zq}p0TcplNF-xaHB-r{aCQ(KkVI0#?X+1{)IIpKk9#?dZ^w)T8nr_vcGHz%TsOMzc`wq-SUp(&;s_%^yw}-=?3-L4nmaM3 zvki;_1d)kOK00@&Bp7!EW+ncRA|n^~Wn30GBKNwy&xaSx;)ewTe8o&3$%R(k5y?)H zJ(~Ozt@hbgpW->44cy^w_Q*JV-XP{aE2y zItCX`63BWbw|uDy(|9n8->CjT0S#53)%=8+L0SRf{$q>tA(#*61_wkoQ9x$c}{6}|yEM_@;hk#0u zxJ45W=#Xs2Gi4vRQP5uK*W2(AabzYKl-y63s=0ineuBf9dS`W2b(vT3BNm5V{0l)z z1;n_kNBabQq0#;Rx{9bUfy$?J029ur;i!%MJh52Ok`r-L9Alv|nB-|IfmE~eXw^@! zJ%g3covqBMk!jtxkI(67>4OhihSC)}%+c~|AhZ@6vKD^9sN=PJ*H3%@M93RU$YuX& z#k2IQ2nLZM4nw*XA2XABqq=N~*Z4Bak$f6=XV67u$Gxf8W;EWCjC5lK5jwzVP=;er zB`$ki(h9ALk+kxmjN3?iF8FOf`&|LQKanVElo-TG7d@&&4i`mg7`5|aFO<6>kXbs5 z$b}0FJuU1CL8&Ed4{*{wx83D|M!sEEr{lJ8n`%?;R578s0eAW1ST}}Fxh7VEwGF91 z5U^j;2Nt1x$r4^}G*Hj|a8b;58)Me)?Xh)t>=)axQdY2x9E&J*kUBab1<;md&El_! z%lc@^+V9V-a8D%ch8I?}kgU}gR*P<%lZbgd&bQ<2sQvQkbqAe!Meo}?&_0E4h(TEB z$B12QE3eLXMyVfe(Iy_$8*IP#M&ii5c=h}Hv>Vga2%d)K;c!3V-JqAHildcYi zI?HMMG%VHg$05n+P=pI&Y(>nqtY+guMU6)Nigpf2BK&LY9>~ywMwRe~43uOObs^Gp zx{k+!i-U2MHk}{&^rw}Q8O_JLyufj{WFgo)Ng#6bf~71IeFh}HVFOKs{Q~uO=_GB3 zssf}k09%i{JK8Tj9H+O-5z&B9MWnt=BTwP`3s&foP4X z9qkadg=g1cRK3eU1gGcw;UqdiBrqXpe~;8>FEKnh?5Ri!O{2~-nrKJ1nkPDi3$EwK)xGWrnagHnGK zDI>l%hV;q#Xjxfj%Fmo04xd}+810mb%SM0fB?iAuJ9FH0~L{S@dG z*;)*DSqCEMlaV4;SrbT^TtAR-1WQ|aQ^@eBl^8shLv9JPzyOwY5Y%uFD${dyLu+i+ zZgJH?HmpUkVJ4M`r`Xupp$7CFmz$T%TP|+j0CQQaanA5SwU+SMR&E%oq6K)JRzfyp z=f$D0>5E}|&yX>}aNtWmak66m%dM|m4j3L6?_z00-{=;OsbLOezE`^_<2veFt*D*7quYZ{qOnazcvA)p;;dT0|{HGf1+JU3{!}JOi9uRG3Pjz(3WRhi`_C5ys%})prfz$T21DHiE|8%R1CiNaFY51K$QEnh%L+# zLtv=T5QMz^jOB2wy)I8SL$i(ccqbNI|H}rx8z$N1nz2yX@b~>qyEX9ur_H1f zMAA`XoQ8ur{`d&Q93Fa_4ZB4;#?r2!MiZj|chITw#YWQO8fj16G@I9@_1S2Z5v!~9 zS})th<_7!#0x7SvI|d+HCO9gPR^sZZpKcsBlxoQUJ*=L#H0Qw1h)MCOXy6`*%B3GZ zEngg|L_UJO-d;TK*0jD7aWxzK0mKXZW{C;4;57yU$b^@fY-)e#)?4QryYEtRdQ$AI zydF+jee)^r$NM-Ydq7rw(xILgDt?klHs!52{dxQF!TX}5xB)ltxy3f){a znu3o$mukIB?zP{KlhD2H5ab~__pjJtC&eR~45gkr86sNKClNfkB2|WBi7h>bHYOo` z)|nW$Vi9n%uU@TSBW41}WJ{)5RrlH{B;wV#ADH#-LSBqUwi~cgXcGfMWG?mN{P{hg zMA_tvEVUA2BbEh%P7EhD7F_o0IzCLS^`MMrut#=aGoYVnO>cdK3yY&u3%rj@plRPp z3VEB$QwBvxr3=}ttJ|wb1+ zyt6F*U_YvUzN1;{N9U^tPLiurF?he!6+9z5q+;>;tc8=b*1_PcvBrg+k)__?w4Xw4 z2?3pZ(HJXC+ zUTnE7NuKA=+xYC&G& z&=%>)Ig3N&6u?S+S~5SqOU3!pSRU|0)fT8?d?Ke6n%$m0m7m{*eEH0DWkw$QdoV96 z`el^WWOGv;UPPO<`CVTmR#FzuQ^~#dKJC*k#f+o*JJ_dv@nd#*Oyx!M)zf&&h|*~d zUDfYY=1X(?9Q+gNkqZZllT~)AMuCmPDQUhsQCYjW36G+UuLM_)r?3h-ls7uLR61R(|Ix z=3Z74k!PV2nm1L>?Z4vhKeVX0Wa0d-*mqVLpBxAU1&{dre*p~bJn2Y$<0$WO7sU;8 zD?|Dc>O;>Gp7iqgkedyam5oQ3}9no}zHTp@1dbe=^$MUo}8wH!31UB$C41P9} zr*U{T_qqFLajaN#-T8^0+_vIw8Z6hWMcl1sw2MxqUbynU*y%I+3;rJ?UKbR!@)T<2 zYd=&uV;!sJuO}k;eOktv;-c&|REMq{JO!=M3f4Aa^uODbv=`3%tU973^t;TbV&ss% zvb1q*0deIh6ZWy>I>a=NV@5>HCS3OP$kAzf2h?w3KFi>qk-5ayHM|hRLUgj-yS(=6 zE!uu2B$#d>uSG55<`1J`i5XU#%WY|7vY*8skV=uB#cq|k$_GP`aO7af_*~1d5;(GH z(Y~UWafi=)!rxceJitg9T|&%`|4m$U2SACra0wb?{hRn;`38`^3mF>6Ecgsw#LGz6 zj%e449L-K9Pp#ygEMP*;WU}YY=7emmuBhKp^)evbwyw$ z-spoFoHt?2$euC5A8O3s46YS>`dM5R3H{>k%CSuqU@p9tjdusIHz=)st&{_fBZ$}B zL>beAe-6%(K#wB2Lr!tL<>810o`EZYfP7#mbOuNTQK1u5VPpT0Knsj;Ax1VbGW z!0}bV)3ei&2i!W^fa!MeL)X;l$gZNRQ-8?o9N~Mt*!J*N&^b4ru6{1-oD!M