diff --git a/.bldr.toml b/.bldr.toml index 74ceebd1f6a..53e47b33483 100644 --- a/.bldr.toml +++ b/.bldr.toml @@ -227,6 +227,7 @@ paths = [ "lib/logger/*", "lib/platform/*", "lib/proc/*", + "lib/product/*", "lib/proxy/*", "lib/stringutils/*", "lib/tls/*", @@ -391,6 +392,7 @@ paths = [ "lib/io/*", "lib/platform/*", "lib/proc/*", + "lib/product/*", "lib/proxy/*", "lib/secrets/*", "lib/stringutils/*", @@ -473,6 +475,7 @@ paths = [ "lib/license/*", "lib/platform/*", "lib/proc/*", + "lib/product/*", "lib/proxy/*", "lib/secrets/*", "lib/stringutils/*", diff --git a/.expeditor/create-manifest.rb b/.expeditor/create-manifest.rb index c5cb0ff17de..e99635007fe 100755 --- a/.expeditor/create-manifest.rb +++ b/.expeditor/create-manifest.rb @@ -11,8 +11,7 @@ BLDR_API_HOST="bldr.habitat.sh" BLDR_API_USER_AGENT="Chef Expeditor" -# Packages that are present in -# components/automate-deployment/pkg/assets/data/services.json but we wish to +# Packages that are present in products.meta but we wish to # exclude from the manifest (probably because they are not yet published to the # depot). # @@ -164,42 +163,25 @@ def get_hab_deps_latest() manifest["git_sha"] = out.strip -collections = File.open("components/automate-deployment/pkg/assets/data/services.json") do |f| +products_meta = File.open("products.meta") do |f| JSON.parse(f.read) end -pkg_paths_by_collection = {} - -non_package_data_keys = %w{ collection binlinks } - -collections.each do |collection| - paths_for_collection = [] - collection.each do |pkg_type, pkg_list| - next if non_package_data_keys.include?(pkg_type) - paths_for_collection += pkg_list - end - collection_name = collection["collection"] - pkg_paths_by_collection[collection_name] = paths_for_collection -end - manifest["packages"] = [] -pkg_paths_by_collection.each do |name, pkg_paths| - - pkg_paths.each do |pkg_path| - next if SKIP_PACKAGES.include?(pkg_path) +products_meta["packages"].each do |pkg_path| + next if SKIP_PACKAGES.include?(pkg_path) - package_ident = pkg_path.split("/") - pkg_origin = package_ident[0] - pkg_name = package_ident[1] + package_ident = pkg_path.split("/") + pkg_origin = package_ident[0] + pkg_name = package_ident[1] - latest_release = get_latest(channel_for_origin(pkg_origin), pkg_origin, pkg_name) + latest_release = get_latest(channel_for_origin(pkg_origin), pkg_origin, pkg_name) - pkg_version = latest_release["version"] - pkg_release = latest_release["release"] + pkg_version = latest_release["version"] + pkg_release = latest_release["release"] - puts " Adding package #{pkg_origin}/#{pkg_name}/#{pkg_version}/#{pkg_release} from collection #{name}" - manifest["packages"] << "#{pkg_origin}/#{pkg_name}/#{pkg_version}/#{pkg_release}" - end + puts " Adding package #{pkg_origin}/#{pkg_name}/#{pkg_version}/#{pkg_release}" + manifest["packages"] << "#{pkg_origin}/#{pkg_name}/#{pkg_version}/#{pkg_release}" end # Add extra packages to manifest that deployment-service doesn't need to manage diff --git a/.expeditor/verify_private.pipeline.yml b/.expeditor/verify_private.pipeline.yml index 5d83d7d89f0..9665cd928a9 100644 --- a/.expeditor/verify_private.pipeline.yml +++ b/.expeditor/verify_private.pipeline.yml @@ -514,7 +514,7 @@ steps: - label: "[integration] airgap upgrade" command: - integration/run_test integration/tests/airgap_upgrade.sh - timeout_in_minutes: 20 + timeout_in_minutes: 30 expeditor: executor: linux: @@ -589,7 +589,7 @@ steps: - label: "[integration] upgrade dev -> master" command: - integration/run_test integration/tests/upgrade.sh - timeout_in_minutes: 20 + timeout_in_minutes: 25 expeditor: executor: linux: @@ -598,7 +598,7 @@ steps: - label: "[integration] upgrade acceptance -> master" command: - integration/run_test integration/tests/upgrade_acceptance_master.sh - timeout_in_minutes: 20 + timeout_in_minutes: 25 expeditor: executor: linux: @@ -607,7 +607,7 @@ steps: - label: "[integration] upgrade current -> master" command: - integration/run_test integration/tests/upgrade_current_master.sh - timeout_in_minutes: 20 + timeout_in_minutes: 25 expeditor: executor: linux: @@ -616,7 +616,7 @@ steps: - label: "[integration] manual upgrade current -> master" command: - integration/run_test integration/tests/manual_upgrade.sh - timeout_in_minutes: 20 + timeout_in_minutes: 25 expeditor: executor: linux: @@ -625,7 +625,7 @@ steps: - label: "[integration] deep upgrades" command: - integration/run_test integration/tests/deep_upgrade.sh - timeout_in_minutes: 20 + timeout_in_minutes: 25 expeditor: executor: linux: @@ -634,7 +634,7 @@ steps: - label: "[integration] deep migrate upgrade" command: - integration/run_test integration/tests/migrate_upgrade.sh - timeout_in_minutes: 20 + timeout_in_minutes: 25 expeditor: executor: linux: diff --git a/api/config/deployment/config_request.go b/api/config/deployment/config_request.go index 8cea7faed60..8bdebfa6630 100644 --- a/api/config/deployment/config_request.go +++ b/api/config/deployment/config_request.go @@ -95,11 +95,9 @@ func (c *ConfigRequest) Validate() error { } if desiredProducts := c.V1.Svc.GetProducts(); len(desiredProducts) > 0 { - availableProducts := services.ListProducts() - for _, desiredProduct := range desiredProducts { - if !stringutils.SliceContains(availableProducts, desiredProduct) { - err.AddInvalidValue("deployment.v1.svc.products", fmt.Sprintf("Valid products are %s", strings.Join(availableProducts, ", "))) - } + validationErr := services.ValidateProductDeployment(desiredProducts) + if validationErr != nil { + err.AddInvalidValue("deployment.v1.svc.products", validationErr.Error()) } } diff --git a/components/automate-cli/package.meta b/components/automate-cli/package.meta new file mode 100644 index 00000000000..92044d4dc7d --- /dev/null +++ b/components/automate-cli/package.meta @@ -0,0 +1,4 @@ +{ + "name": "chef/automate-cli", + "binlinks": ["chef-automate"] +} diff --git a/components/automate-cs-nginx/package.meta b/components/automate-cs-nginx/package.meta new file mode 100644 index 00000000000..dd2c922afc3 --- /dev/null +++ b/components/automate-cs-nginx/package.meta @@ -0,0 +1,4 @@ +{ + "name": "chef/automate-cs-nginx", + "binlinks": ["knife", "chef-server-ctl"] +} diff --git a/components/automate-deployment/docs/how-to-add-a-service.md b/components/automate-deployment/docs/how-to-add-a-service.md index fb1963fc3a9..4e0491cc76d 100644 --- a/components/automate-deployment/docs/how-to-add-a-service.md +++ b/components/automate-deployment/docs/how-to-add-a-service.md @@ -140,7 +140,7 @@ determining what packages to build. Add your package to the ### Manifest Chicken and Egg Issue -"The manifest" is a JSON file which is created by the script `.expeditor/create-manifest.rb`. The manifest contains exact versions of habitat packages that comprise an Automate release. The package versions are determined by querying the habitat depot/bldr for the versions of every package described in the `services.json` file (more on that below). The reason we must query the depot is because we do not build every package on every build, so instead we build the manifest based on what exists in the depot. During tests we rely on the deployment service preferring packages from local disk to upgrade just those packages with changes. +"The manifest" is a JSON file which is created by the script `.expeditor/create-manifest.rb`. The manifest contains exact versions of habitat packages that comprise an Automate release. The package versions are determined by querying the habitat depot/bldr for the versions of every package described in the `product.meta` file (more on that below). The reason we must query the depot is because we do not build every package on every build, so instead we build the manifest based on what exists in the depot. During tests we rely on the deployment service preferring packages from local disk to upgrade just those packages with changes. When introducing a new service, the manifest is a problem because there are initially zero version of your new package in the depot, so a query for the current version of that package will fail and the manifest can't be generated. One way around this is to merge your new service in two pull requests. The first just needs to have a buildable package so that there's something in the depot to query. If you've followed this guide in order, you should be able to submit and merge your first pull request now and then follow up with a second pull request which includes the work described below (along with whatever other test suites, etc. you need). @@ -150,6 +150,6 @@ Note: deployment-team has added some code that makes tests use a locally-generat You need to run `make update-bindings` from `components/automate-deployment` whenever you add/change/remove bindings. You don't necessarily need to change the bindings when first adding your service, but when you start to wire up your new service to other services this will be needed. -### The Services File +### The product.meta File -The list of services and other mandatory packages that comprise Automate 2 is maintained in `components/automate-deployment/pkg/assets/data/services.json`. This file is also used to generate the manifest file that represents a Chef Automate 2 release as described above. Edit this file and add your service. In general, you will want to add it to the "automate-full" collection; the other collections are optional additions. +The list of services and other mandatory packages that comprise Automate 2 and other Chef products is maintained in `product.meta`. This file is also used to generate the manifest file that represents a Chef Automate 2 release as described above. Edit this file and add your service. Services may also define extra metadata for their service in a `package.meta` for their component. See https://godoc.org/github.com/chef/automate/lib/product for more information. diff --git a/components/automate-deployment/pkg/assets/assets.bindata.go b/components/automate-deployment/pkg/assets/assets.bindata.go index de57cdd3135..822abc8383e 100644 --- a/components/automate-deployment/pkg/assets/assets.bindata.go +++ b/components/automate-deployment/pkg/assets/assets.bindata.go @@ -8,7 +8,6 @@ // data/a1stub_certs/ChefAutomateUpgradeFromv1SelfTest.key // data/binds.txt // data/init-config.toml -// data/services.json // DO NOT EDIT! package assets @@ -91,7 +90,7 @@ func dataA1_elasticsearch_mappingsComplianceTemplateJson() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "data/a1_elasticsearch_mappings/compliance-template.json", size: 5111, mode: os.FileMode(420), modTime: time.Unix(1554914860, 0)} + info := bindataFileInfo{name: "data/a1_elasticsearch_mappings/compliance-template.json", size: 5111, mode: os.FileMode(420), modTime: time.Unix(1554910181, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -111,7 +110,7 @@ func dataA1_elasticsearch_mappingsInsightsTemplateJson() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "data/a1_elasticsearch_mappings/insights-template.json", size: 2705, mode: os.FileMode(420), modTime: time.Unix(1554914860, 0)} + info := bindataFileInfo{name: "data/a1_elasticsearch_mappings/insights-template.json", size: 2705, mode: os.FileMode(420), modTime: time.Unix(1554910181, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -131,7 +130,7 @@ func dataA1stub_certsChefautomateupgradefromv1selftestCrl() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "data/a1stub_certs/ChefAutomateUpgradeFromv1SelfTest.crl", size: 958, mode: os.FileMode(420), modTime: time.Unix(1554914860, 0)} + info := bindataFileInfo{name: "data/a1stub_certs/ChefAutomateUpgradeFromv1SelfTest.crl", size: 958, mode: os.FileMode(420), modTime: time.Unix(1554910181, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -151,7 +150,7 @@ func dataA1stub_certsChefautomateupgradefromv1selftestCrt() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "data/a1stub_certs/ChefAutomateUpgradeFromv1SelfTest.crt", size: 1826, mode: os.FileMode(420), modTime: time.Unix(1554914860, 0)} + info := bindataFileInfo{name: "data/a1stub_certs/ChefAutomateUpgradeFromv1SelfTest.crt", size: 1826, mode: os.FileMode(420), modTime: time.Unix(1554910181, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -171,7 +170,7 @@ func dataA1stub_certsChefautomateupgradefromv1selftestKey() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "data/a1stub_certs/ChefAutomateUpgradeFromv1SelfTest.key", size: 3243, mode: os.FileMode(420), modTime: time.Unix(1554914860, 0)} + info := bindataFileInfo{name: "data/a1stub_certs/ChefAutomateUpgradeFromv1SelfTest.key", size: 3243, mode: os.FileMode(420), modTime: time.Unix(1554910181, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -191,7 +190,7 @@ func dataBindsTxt() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "data/binds.txt", size: 4254, mode: os.FileMode(420), modTime: time.Unix(1561076241, 0)} + info := bindataFileInfo{name: "data/binds.txt", size: 4254, mode: os.FileMode(420), modTime: time.Unix(1561380282, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -211,27 +210,7 @@ func dataInitConfigToml() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "data/init-config.toml", size: 1823, mode: os.FileMode(420), modTime: time.Unix(1554914860, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var _dataServicesJson = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xdc\x54\xdd\x6e\xdb\x3c\x0c\xbd\xcf\x53\x08\xbe\xae\xd0\xfb\xbe\x4a\x51\x14\x34\x4d\x2b\x84\x69\xd1\x9f\x28\x37\xcd\x57\xf4\xdd\x07\xe7\x77\xce\xa6\x38\xd9\x56\x60\xd8\x95\x01\xf1\x1c\xfe\x1c\xf3\xf0\x79\xe5\xdc\xc7\xca\x39\xe7\x2a\x54\x11\xc2\xcc\x1a\xab\x27\x57\xc1\x98\xb5\x87\x4c\xbe\x1d\x45\xaa\x87\x3d\xa4\x81\x0c\xaf\x46\xe9\x8d\x91\xac\x7a\x72\xcf\xbb\xe7\x89\xbb\xa6\xf6\xb1\x06\xec\xc6\xc1\x07\xc8\xb4\x81\xed\x81\x73\x0c\x9e\xf2\x0d\x6a\x39\x24\xb2\xff\xa4\x84\x20\x01\xcb\x8c\x46\x90\x70\x5d\xed\x30\x2f\x87\x06\x40\xe4\xeb\xeb\x0f\x61\x29\xc7\xbc\xc3\x12\xc8\x96\xd2\x8c\x7c\x11\x19\x82\x37\x6e\x08\x21\xf9\xc3\x94\x17\x00\x7a\xa3\x98\x0b\x31\x18\xf3\xfa\xff\x12\xcf\x16\x12\x9f\x7a\x6a\xe8\xfd\x22\x94\x09\x7a\xbb\x52\x33\x16\x62\x46\x98\x28\x17\x99\xc3\x20\x8c\x30\xad\x5b\x09\x12\x35\x73\xbb\x88\x69\xa8\x87\x08\x81\x4a\x93\xa1\xf6\x83\x30\x44\xa4\x02\x40\x18\x29\x1a\x79\xd4\x98\x93\x4a\x09\xa5\x08\xe2\x47\x2b\xd6\x31\x32\x63\x2d\x89\xc1\x31\x90\x95\xfe\x1c\x6a\x6c\x39\xf8\x3e\xf4\x25\x44\x43\x83\xe8\xb6\x2f\xff\xfc\xc9\x99\xbe\x25\x6a\xae\xc5\x85\x5b\xc2\x2d\x4a\x49\x89\xfd\x76\x2d\x2c\xed\x42\x58\x14\x1a\x5f\x83\x4c\x82\xa7\xb9\x79\x07\x8e\x91\x9a\xd7\x01\xb0\x83\xf0\x13\xff\x9e\x72\xa0\xf0\x9c\x49\xef\x39\xc1\x0d\xc4\x86\xea\x31\x7c\xd7\x99\x26\x7a\x4c\xb6\x8d\x38\x4f\x57\x73\x14\x8e\xdd\x94\xe8\xe3\x88\x9d\x15\x3f\x17\x38\x94\xf0\xc7\x70\x75\x78\x7f\xd9\x7d\x3f\x57\xce\x7d\x3e\x94\x4e\xe8\x8e\x38\x29\x4d\xe9\x1f\x38\xa0\x8b\x2b\xf8\x17\x5d\xd8\xe5\x3b\x7a\xf3\x3d\x44\xf3\xb5\x6a\x67\x6b\x92\xf6\x0a\x46\xd1\xd7\xdc\x26\xb5\x7c\x1d\x44\x69\x7a\xbd\x82\x89\x81\xe3\xe5\x05\xfe\x45\x73\x95\xdd\xf3\x27\x8d\xf0\xf0\x23\xf1\x38\xc5\x8c\xdd\x45\x6e\xcf\x2a\xbb\x99\x3d\x3c\x66\xb9\xc7\x59\x1b\x4d\x5d\x2b\xba\x29\xda\xea\xa6\x75\x3f\x35\x7c\x4c\x37\x37\x6b\x19\xb6\x9f\xee\x8b\xe5\xbf\x6c\x6a\x26\xe6\x29\x78\xa7\x70\xbd\x46\xce\x9a\x38\x86\xdb\x2e\xd2\xd9\xb1\x49\x7b\xca\x6b\x1a\xed\x8e\x7b\xb2\xc8\xfe\x6d\xd5\x76\xd3\xae\x5e\x56\xdf\x02\x00\x00\xff\xff\xfd\x46\xa5\xda\xc7\x0a\x00\x00") - -func dataServicesJsonBytes() ([]byte, error) { - return bindataRead( - _dataServicesJson, - "data/services.json", - ) -} - -func dataServicesJson() (*asset, error) { - bytes, err := dataServicesJsonBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "data/services.json", size: 2759, mode: os.FileMode(420), modTime: time.Unix(1561076245, 0)} + info := bindataFileInfo{name: "data/init-config.toml", size: 1823, mode: os.FileMode(420), modTime: time.Unix(1554910181, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -295,7 +274,6 @@ var _bindata = map[string]func() (*asset, error){ "data/a1stub_certs/ChefAutomateUpgradeFromv1SelfTest.key": dataA1stub_certsChefautomateupgradefromv1selftestKey, "data/binds.txt": dataBindsTxt, "data/init-config.toml": dataInitConfigToml, - "data/services.json": dataServicesJson, } // AssetDir returns the file names below a certain @@ -351,7 +329,6 @@ var _bintree = &bintree{nil, map[string]*bintree{ }}, "binds.txt": &bintree{dataBindsTxt, map[string]*bintree{}}, "init-config.toml": &bintree{dataInitConfigToml, map[string]*bintree{}}, - "services.json": &bintree{dataServicesJson, map[string]*bintree{}}, }}, }} diff --git a/components/automate-deployment/pkg/assets/assets.go b/components/automate-deployment/pkg/assets/assets.go index 345b71a9f36..b1f00b7a4b3 100644 --- a/components/automate-deployment/pkg/assets/assets.go +++ b/components/automate-deployment/pkg/assets/assets.go @@ -1,6 +1,5 @@ package assets -//go:generate go run ../../tools/sort-services/sort-services.go data/services.json data/binds.txt //go:generate go-bindata -pkg $GOPACKAGE -o assets.bindata.go data/... //go:generate ../../../../scripts/fix_bindata_header assets.bindata.go //go:generate go fmt ./ diff --git a/components/automate-deployment/pkg/assets/data/services.json b/components/automate-deployment/pkg/assets/data/services.json deleted file mode 100644 index 31c53590551..00000000000 --- a/components/automate-deployment/pkg/assets/data/services.json +++ /dev/null @@ -1,114 +0,0 @@ -[ - { - "collection": "automate-full", - "data_services": [ - "chef/backup-gateway", - "chef/automate-postgresql", - "chef/automate-elasticsearch" - ], - "all_services": [ - "chef/backup-gateway", - "chef/automate-postgresql", - "chef/automate-pg-gateway", - "chef/automate-elasticsearch", - "chef/automate-es-gateway", - "chef/automate-ui", - "chef/pg-sidecar-service", - "chef/event-service", - "chef/authz-service", - "chef/es-sidecar-service", - "chef/automate-dex", - "chef/teams-service", - "chef/authn-service", - "chef/secrets-service", - "chef/applications-service", - "chef/notifications-service", - "chef/nodemanager-service", - "chef/compliance-service", - "chef/license-control-service", - "chef/local-user-service", - "chef/session-service", - "chef/ingest-service", - "chef/config-mgmt-service", - "chef/deployment-service", - "chef/data-feed-service", - "chef/data-lifecycle-service", - "chef/event-gateway", - "chef/automate-gateway", - "chef/automate-load-balancer" - ], - "pinned_packages": [ - "chef/automate-cli" - ], - "extra_packages": [ - "chef/automate-debug", - "core/rsync" - ], - "binlinks": { - "automate-cli": [ - "chef-automate" - ] - } - }, - { - "collection": "chef-server", - "data_services": [ - "chef/backup-gateway", - "chef/automate-postgresql", - "chef/automate-elasticsearch" - ], - "all_services": [ - "chef/backup-gateway", - "chef/deployment-service", - "chef/automate-postgresql", - "chef/automate-pg-gateway", - "chef/automate-elasticsearch", - "chef/automate-es-gateway", - "chef/pg-sidecar-service", - "chef/es-sidecar-service", - "chef/automate-cs-bookshelf", - "chef/automate-cs-oc-bifrost", - "chef/automate-cs-oc-erchef", - "chef/automate-cs-nginx", - "chef/automate-load-balancer" - ], - "pinned_packages": [], - "extra_packages": [], - "binlinks": { - "automate-cli": [ - "chef-automate" - ], - "automate-cs-nginx": [ - "knife", - "chef-server-ctl" - ] - } - }, - { - "collection": "workflow", - "data_services": [], - "all_services": [ - "chef/automate-workflow-server", - "chef/automate-workflow-nginx" - ], - "pinned_packages": [], - "extra_packages": [], - "binlinks": { - "automate-workflow-server": [ - "workflow-ctl" - ] - } - }, - { - "collection": "monitoring", - "data_services": [ - "chef/automate-prometheus" - ], - "all_services": [ - "chef/automate-prometheus" - ], - "pinned_packages": [], - "extra_packages": [], - "binlinks": {} - } -] diff --git a/components/automate-deployment/pkg/backup/runner.go b/components/automate-deployment/pkg/backup/runner.go index 9ba9ae56660..3329593eb4e 100644 --- a/components/automate-deployment/pkg/backup/runner.go +++ b/components/automate-deployment/pkg/backup/runner.go @@ -634,7 +634,7 @@ func (r *Runner) restoreServices(desiredServices []*deployment.Service, restoreC } // Install any binlinks for this package - binlinks := services.BinlinksForService(svc.Name()) + binlinks := services.BinlinksForPackage(svc.Name()) for _, cmd := range binlinks { cmdOutput, err := r.target.BinlinkPackage(svc, cmd) if err != nil { diff --git a/components/automate-deployment/pkg/converge/compiler.go b/components/automate-deployment/pkg/converge/compiler.go index 7485f7701e2..3d95c9415de 100644 --- a/components/automate-deployment/pkg/converge/compiler.go +++ b/components/automate-deployment/pkg/converge/compiler.go @@ -287,7 +287,7 @@ func (phase *InstallPhase) Run(writer *eventWriter) error { modified = true } - binlinks := services.BinlinksForService(step.pkg.Name()) + binlinks := services.BinlinksForPackage(step.pkg.Name()) for _, cmd := range binlinks { isBinlinked, err := step.target.IsBinlinked(step.pkg, cmd) if err != nil { diff --git a/components/automate-deployment/pkg/deployment/deployment.go b/components/automate-deployment/pkg/deployment/deployment.go index 1416debefb9..2fc16d38552 100644 --- a/components/automate-deployment/pkg/deployment/deployment.go +++ b/components/automate-deployment/pkg/deployment/deployment.go @@ -6,8 +6,6 @@ import ( "sync" "time" - "github.com/chef/automate/lib/stringutils" - "github.com/gofrs/uuid" "github.com/golang/protobuf/ptypes" "github.com/pkg/errors" @@ -157,7 +155,7 @@ func (d *Deployment) ReplaceUserOverrideConfig(config *dc.AutomateConfig) error func ContainsAutomateCollection(c *dc.ConfigRequest) bool { products := c.GetV1().GetSvc().GetProducts() if len(products) > 0 { - return stringutils.SliceContains(products, "automate-full") + return services.ContainsCollection("automate", products) } return true } @@ -168,7 +166,7 @@ func ExpectedServiceIDsForConfig(c *dc.ConfigRequest) ([]habpkg.HabPkg, error) { if len(c.GetV1().GetSvc().GetProducts()) > 0 { collections = c.GetV1().GetSvc().GetProducts() } else { - collections = []string{"automate-full"} + collections = []string{"automate"} if c.GetV1().GetSvc().GetEnableChefServer().GetValue() { collections = append(collections, "chef-server") diff --git a/components/automate-deployment/pkg/services/internal/generated/gen.go b/components/automate-deployment/pkg/services/internal/generated/gen.go new file mode 100644 index 00000000000..8f29575ca1b --- /dev/null +++ b/components/automate-deployment/pkg/services/internal/generated/gen.go @@ -0,0 +1,333 @@ +// Code generated by go generate; DO NOT EDIT. +package generated + +var ProductMetadataJSON = ` +{ + "packages": [ + { + "name": "chef/backup-gateway", + "metadata": { + "name": "chef/backup-gateway", + "data_service": true, + "binlinks": null + } + }, + { + "name": "chef/automate-postgresql", + "metadata": { + "name": "chef/automate-postgresql", + "data_service": true, + "binlinks": null + } + }, + { + "name": "chef/automate-pg-gateway", + "metadata": null + }, + { + "name": "chef/automate-elasticsearch", + "metadata": { + "name": "chef/automate-elasticsearch", + "data_service": true, + "binlinks": null + } + }, + { + "name": "chef/automate-es-gateway", + "metadata": null + }, + { + "name": "chef/automate-ui", + "metadata": null + }, + { + "name": "chef/pg-sidecar-service", + "metadata": null + }, + { + "name": "chef/event-service", + "metadata": null + }, + { + "name": "chef/authz-service", + "metadata": null + }, + { + "name": "chef/es-sidecar-service", + "metadata": null + }, + { + "name": "chef/automate-dex", + "metadata": null + }, + { + "name": "chef/teams-service", + "metadata": null + }, + { + "name": "chef/authn-service", + "metadata": null + }, + { + "name": "chef/secrets-service", + "metadata": null + }, + { + "name": "chef/applications-service", + "metadata": null + }, + { + "name": "chef/notifications-service", + "metadata": null + }, + { + "name": "chef/nodemanager-service", + "metadata": null + }, + { + "name": "chef/compliance-service", + "metadata": null + }, + { + "name": "chef/license-control-service", + "metadata": null + }, + { + "name": "chef/local-user-service", + "metadata": null + }, + { + "name": "chef/session-service", + "metadata": null + }, + { + "name": "chef/ingest-service", + "metadata": null + }, + { + "name": "chef/config-mgmt-service", + "metadata": null + }, + { + "name": "chef/data-feed-service", + "metadata": null + }, + { + "name": "chef/data-lifecycle-service", + "metadata": null + }, + { + "name": "chef/event-gateway", + "metadata": null + }, + { + "name": "chef/automate-gateway", + "metadata": null + }, + { + "name": "chef/automate-cs-bookshelf", + "metadata": null + }, + { + "name": "chef/automate-cs-oc-bifrost", + "metadata": null + }, + { + "name": "chef/automate-cs-oc-erchef", + "metadata": null + }, + { + "name": "chef/automate-cs-nginx", + "metadata": { + "name": "chef/automate-cs-nginx", + "data_service": false, + "binlinks": [ + "knife", + "chef-server-ctl" + ] + } + }, + { + "name": "chef/automate-workflow-server", + "metadata": { + "name": "chef/automate-workflow-server", + "data_service": false, + "binlinks": [ + "workflow-ctl" + ] + } + }, + { + "name": "chef/automate-workflow-nginx", + "metadata": null + }, + { + "name": "chef/automate-load-balancer", + "metadata": null + }, + { + "name": "chef/automate-prometheus", + "metadata": { + "name": "chef/automate-prometheus", + "data_service": true, + "binlinks": null + } + }, + { + "name": "chef/automate-cli", + "metadata": { + "name": "chef/automate-cli", + "data_service": false, + "binlinks": [ + "chef-automate" + ] + } + }, + { + "name": "core/rsync", + "metadata": null + } + ], + "collections": [ + { + "name": "core", + "aliases": null, + "type": "base", + "services": [ + "chef/backup-gateway", + "chef/license-control-service", + "chef/automate-load-balancer" + ], + "packages": [ + "chef/automate-cli", + "core/rsync" + ], + "dependencies": null, + "hidden": false + }, + { + "name": "postgresql", + "aliases": null, + "type": "base", + "services": [ + "chef/automate-postgresql", + "chef/automate-pg-gateway", + "chef/pg-sidecar-service" + ], + "packages": null, + "dependencies": null, + "hidden": false + }, + { + "name": "elasticsearch", + "aliases": null, + "type": "base", + "services": [ + "chef/automate-elasticsearch", + "chef/automate-es-gateway", + "chef/es-sidecar-service" + ], + "packages": null, + "dependencies": null, + "hidden": false + }, + { + "name": "auth", + "aliases": null, + "type": "base", + "services": [ + "chef/authz-service", + "chef/authn-service", + "chef/automate-dex", + "chef/local-user-service", + "chef/session-service" + ], + "packages": null, + "dependencies": [ + "core", + "postgresql" + ], + "hidden": false + }, + { + "name": "automate", + "aliases": [ + "automate-full" + ], + "type": "product", + "services": [ + "chef/automate-ui", + "chef/event-service", + "chef/teams-service", + "chef/authn-service", + "chef/secrets-service", + "chef/applications-service", + "chef/notifications-service", + "chef/nodemanager-service", + "chef/compliance-service", + "chef/ingest-service", + "chef/config-mgmt-service", + "chef/data-feed-service", + "chef/data-lifecycle-service", + "chef/event-gateway", + "chef/automate-gateway" + ], + "packages": null, + "dependencies": [ + "core", + "postgresql", + "elasticsearch", + "auth" + ], + "hidden": false + }, + { + "name": "chef-server", + "aliases": [ + "chef-infra-server" + ], + "type": "product", + "services": [ + "chef/automate-cs-bookshelf", + "chef/automate-cs-oc-bifrost", + "chef/automate-cs-oc-erchef", + "chef/automate-cs-nginx" + ], + "packages": null, + "dependencies": [ + "core", + "postgresql", + "elasticsearch" + ], + "hidden": false + }, + { + "name": "workflow", + "aliases": null, + "type": "product", + "services": [ + "chef/automate-workflow-server", + "chef/automate-workflow-nginx" + ], + "packages": null, + "dependencies": [ + "automate" + ], + "hidden": false + }, + { + "name": "monitoring", + "aliases": null, + "type": "product", + "services": [ + "chef/automate-prometheus" + ], + "packages": null, + "dependencies": [ + "automate" + ], + "hidden": true + } + ] +} +` diff --git a/components/automate-deployment/pkg/services/parser/parser.go b/components/automate-deployment/pkg/services/parser/parser.go deleted file mode 100644 index e6b3b26796b..00000000000 --- a/components/automate-deployment/pkg/services/parser/parser.go +++ /dev/null @@ -1,60 +0,0 @@ -package parser - -import ( - "encoding/json" - - "github.com/pkg/errors" - - "github.com/chef/automate/components/automate-deployment/pkg/bind" -) - -type ServiceCollection struct { - Name string `json:"collection"` - DataServices []string `json:"data_services"` - AllServices []string `json:"all_services"` - Pins []string `json:"pinned_packages"` - ExtraPkgs []string `json:"extra_packages"` - Binlinks BinlinkMap `json:"binlinks"` -} - -type BinlinkMap map[string][]string - -func ServiceCollectionsFromJSON(serviceData []byte) ([]ServiceCollection, error) { - collections := []ServiceCollection{} - err := json.Unmarshal(serviceData, &collections) - - if err != nil { - return []ServiceCollection{}, errors.Wrap(err, "invalid JSON in services file") - } - return collections, nil -} - -func ServiceCollectionFromJSON(serviceData []byte, collectionName string) (ServiceCollection, error) { - collections, err := ServiceCollectionsFromJSON(serviceData) - if err != nil { - return ServiceCollection{}, err - } - - for _, c := range collections { - if c.Name == collectionName { - return c, nil - } - } - return ServiceCollection{}, errors.Errorf("service collection '%s' not found in services JSON data", collectionName) -} - -func ServiceNamesFromJSON(serviceData []byte, collectionName string) ([]string, error) { - c, err := ServiceCollectionFromJSON(serviceData, collectionName) - if err != nil { - return []string{}, err - } - return c.AllServices, nil -} - -func ParseServiceBinds(data []byte) (bind.Binds, error) { - b, err := bind.ParseData(data) - if err != nil { - return bind.Binds{}, errors.Wrap(err, "service binds data is not parsable") - } - return b, nil -} diff --git a/components/automate-deployment/pkg/services/services.go b/components/automate-deployment/pkg/services/services.go index 7cd664ff723..0a27ec9cddb 100644 --- a/components/automate-deployment/pkg/services/services.go +++ b/components/automate-deployment/pkg/services/services.go @@ -3,212 +3,232 @@ package services import ( + "encoding/json" + "strings" + + "github.com/chef/automate/lib/product" "github.com/pkg/errors" "github.com/chef/automate/components/automate-deployment/pkg/assets" "github.com/chef/automate/components/automate-deployment/pkg/bind" "github.com/chef/automate/components/automate-deployment/pkg/habpkg" - "github.com/chef/automate/components/automate-deployment/pkg/services/parser" + "github.com/chef/automate/components/automate-deployment/pkg/services/internal/generated" ) -var servicesByCollection map[string][]habpkg.HabPkg - -var supplementaryPackagesByCollection map[string][]habpkg.HabPkg - -var serviceCollections map[string]parser.ServiceCollection +//go:generate go run ../../tools/services-pkg-gen/main.go ../../../../ internal/generated/gen.go // AllBinds contains all the bindings that the deployment service is aware of var AllBinds bind.Binds -// automateFull is the full ServiceCollection object with all the data on -// packages and services we install/manage -var automateFull parser.ServiceCollection +var serviceList []habpkg.HabPkg +var productList []string +var packageMetadataMap map[string]*product.Package +var collectionMap map[string]*product.Collection -// AllPackages returns a list of all packages (service, binary, or whatever) that -// were specified in the manifest -func AllPackages() []habpkg.HabPkg { - pkgs := []habpkg.HabPkg{} - mark := make(map[string]bool) - for _, serviceCollection := range serviceCollections { - for _, section := range [][]string{ - serviceCollection.AllServices, - serviceCollection.ExtraPkgs, - serviceCollection.Pins, - serviceCollection.DataServices} { - for _, pkgStr := range section { - if !mark[pkgStr] { - mark[pkgStr] = true - pkg, err := habpkg.FromString(pkgStr) - if err != nil { - panic(errors.Wrapf(err, "failed to parse hab pkg name '%s' in services list", pkgStr)) - } - pkgs = append(pkgs, pkg) +func AllServices() ([]habpkg.HabPkg, error) { + return serviceList, nil +} + +func ContainsCollection(needle string, haystack []string) bool { + desiredCollection := collectionMap[needle] + if desiredCollection == nil { + return false + } + + visited := map[string]bool{} + + for _, collectionName := range haystack { + collection := collectionMap[collectionName] + if collection != nil { + if collection == desiredCollection { + return true + } + deps := getRequiredCollections(collectionName, visited) + for _, d := range deps { + if d.Type != product.ProductType && desiredCollection == d { + return true } } } } - return pkgs + return false } -func AllServices() ([]habpkg.HabPkg, error) { - names := []string{} - for name := range servicesByCollection { - names = append(names, name) - } - return ServicesInCollections(names) +func ListProducts() []string { + return productList } -func ListProducts() []string { - collections := make([]string, len(serviceCollections)) - i := 0 - for k := range serviceCollections { - collections[i] = k - i++ +func ValidateProductDeployment(products []string) error { + visited := map[string]bool{} + requiredCollectionSet := make(map[string]*product.Collection) + desiredCollectionSet := make(map[string]*product.Collection) + for _, p := range products { + collection := collectionMap[p] + if collection == nil || collection.Type != product.ProductType { + return errors.Errorf("Unknown product %q. Must be one of (%s)", p, strings.Join(ListProducts(), ", ")) + } + desiredCollectionSet[collection.Name] = collection + deps := getRequiredCollections(p, visited) + for _, d := range deps { + requiredCollectionSet[d.Name] = d + } } - return collections -} -func ServicesInCollections(collections []string) ([]habpkg.HabPkg, error) { - combined := []habpkg.HabPkg{} - for _, collectionName := range collections { - serviceIDs, err := ServicesInCollection(collectionName) - if err != nil { - return combined, err + for requiredCollectionName, requiredCollection := range requiredCollectionSet { + if desiredCollectionSet[requiredCollectionName] == nil && + requiredCollection.Type == product.ProductType { + return errors.Errorf("You must deploy %q to deploy %s", requiredCollectionName, strings.Join(products, ", ")) } - combined = append(combined, serviceIDs...) } - return combined, nil + + return nil } func ServicesInCollection(collection string) ([]habpkg.HabPkg, error) { - list, ok := servicesByCollection[collection] - if !ok { - return nil, errors.Errorf("services collection '%s' not found", collection) + return ServicesInCollections([]string{collection}) +} + +func ServicesInCollections(collections []string) ([]habpkg.HabPkg, error) { + requiredServices := map[string]bool{} + visited := map[string]bool{} + for _, collection := range collections { + if collectionMap[collection] == nil { + return nil, errors.Errorf("unknown collection %q", collection) + } + deps := getRequiredServices(collection, visited) + for _, d := range deps { + requiredServices[d.Name] = true + } } - return list, nil + return sortServices(requiredServices), nil } func SupplementaryPackagesInCollection(collection string) ([]habpkg.HabPkg, error) { - list, ok := supplementaryPackagesByCollection[collection] - if !ok { - return []habpkg.HabPkg{}, errors.Errorf("services collection '%s' not found", collection) + if collectionMap[collection] == nil { + return nil, errors.Errorf("unknown collection %q", collection) } - return list, nil + + visited := map[string]bool{} + deps := getRequiredCollections(collection, visited) + pkgs := []habpkg.HabPkg{} + for _, c := range deps { + for _, p := range c.Packages { + pkgs = append(pkgs, habpkg.New(p.Origin, p.Name)) + } + } + + return pkgs, nil } // IsDataService is used during A1 -> A2 upgrades to start postgres/elasticsearch // separately from the domain services. -func IsDataService(candidate string) bool { - serviceIDs, err := habpkg.FromList(automateFull.DataServices) - - if err != nil { - panic(err.Error()) - } - - for _, iter := range serviceIDs { - if iter.Name() == candidate { - return true - } +func IsDataService(pkgName string) bool { + if packageMetadataMap[pkgName].Metadata != nil { + return packageMetadataMap[pkgName].Metadata.DataService } return false } -var binlinkMap map[string][]string - -func BinlinksForService(serviceName string) []string { - return binlinkMap[serviceName] +func BinlinksForPackage(pkgName string) []string { + if packageMetadataMap[pkgName] == nil { + return nil + } + if packageMetadataMap[pkgName].Metadata != nil { + return packageMetadataMap[pkgName].Metadata.Binlinks + } + return nil } -func loadServiceCollections() map[string]parser.ServiceCollection { - bytes := assets.MustAsset("data/services.json") - collections, err := parser.ServiceCollectionsFromJSON(bytes) +// loadServiceBinds parses the bindings for the services +func loadServiceBinds() bind.Binds { + data := assets.MustAsset("data/binds.txt") + + b, err := bind.ParseData(data) if err != nil { - panic(err.Error()) + panic(errors.Wrap(err, "binds.txt is not parsable")) } + return b +} - collectionMap := make(map[string]parser.ServiceCollection) +func getRequiredServices(collectionName string, visitedCollections map[string]bool) []product.PackageName { + if visitedCollections[collectionName] { + return []product.PackageName{} + } - for _, collection := range collections { - // IsDataService panics on invalid data; force the panic to happen in init if - // the data is bad. - for _, s := range collection.AllServices { - pkg, err := habpkg.FromString(s) - if err != nil { - panic(errors.Wrapf(err, "failed to parse hab pkg name '%s' in services list", s)) - } - _ = IsDataService(pkg.Name()) - } - collectionMap[collection.Name] = collection + visitedCollections[collectionName] = true + requiredServices := []product.PackageName{} + collection := collectionMap[collectionName] + for _, c := range collection.Dependencies { + services := getRequiredServices(c, visitedCollections) + requiredServices = append(requiredServices, services...) } - return collectionMap -} + requiredServices = append(requiredServices, collection.Services...) + return requiredServices -func loadAutomateServiceCollection() parser.ServiceCollection { - return serviceCollections["automate-full"] } -func loadServiceIDs() map[string][]habpkg.HabPkg { - serviceMap := make(map[string][]habpkg.HabPkg) - for name, collection := range serviceCollections { - serviceIDs, err := habpkg.FromList(collection.AllServices) - if err != nil { - panic(err.Error()) +func sortServices(requiredServices map[string]bool) []habpkg.HabPkg { + sortedRequiredServices := make([]habpkg.HabPkg, 0, len(requiredServices)) + for _, s := range serviceList { + if requiredServices[s.Name()] { + sortedRequiredServices = append(sortedRequiredServices, s) } - serviceMap[name] = removeDeploymentService(name, serviceIDs) } - return serviceMap + return sortedRequiredServices } -func removeDeploymentService(collectionName string, serviceIDs []habpkg.HabPkg) []habpkg.HabPkg { - // remove deployment-services from Services slice - for i := range serviceIDs { - if serviceIDs[i].Name() == "deployment-service" { - serviceIDs = append(serviceIDs[:i], serviceIDs[i+1:]...) - return serviceIDs - } +func getRequiredCollections(collectionName string, visitedCollections map[string]bool) []*product.Collection { + collection := collectionMap[collectionName] + + if visitedCollections[collection.Name] { + return []*product.Collection{} } - return serviceIDs -} -func loadSupplementaryPackages() map[string][]habpkg.HabPkg { - pkgMap := make(map[string][]habpkg.HabPkg) - for name, collection := range serviceCollections { - rawPackageList := collection.ExtraPkgs - pkgIDs, err := habpkg.FromList(rawPackageList) - if err != nil { - panic(err.Error()) - } - pkgMap[name] = pkgIDs + visitedCollections[collection.Name] = true + requiredCollections := []*product.Collection{} + for _, c := range collection.Dependencies { + collections := getRequiredCollections(c, visitedCollections) + requiredCollections = append(requiredCollections, collections...) } - return pkgMap + + requiredCollections = append(requiredCollections, collection) + return requiredCollections + } -// loadServiceBinds parses the bindings for the services -func loadServiceBinds() bind.Binds { - data := assets.MustAsset("data/binds.txt") - b, err := parser.ParseServiceBinds(data) +func init() { + AllBinds = loadServiceBinds() + + packageMetadataMap = make(map[string]*product.Package) + collectionMap = make(map[string]*product.Collection) + + metadata := product.Metadata{} + err := json.Unmarshal([]byte(generated.ProductMetadataJSON), &metadata) if err != nil { - panic(errors.Wrap(err, "binds.txt is not parsable")) + panic(err) } - return b -} -func loadBinlinkMap() map[string][]string { - m := make(map[string][]string) - for _, collection := range serviceCollections { - for pkgName, binlinks := range collection.Binlinks { - m[pkgName] = binlinks + serviceSet := make(map[product.PackageName]bool) + for _, c := range metadata.Collections { + if c.Type == product.ProductType && !c.Hidden { + productList = append(productList, c.Name) + } + + collectionMap[c.Name] = c + for _, alias := range c.Aliases { + collectionMap[alias] = c + } + + for _, s := range c.Services { + serviceSet[s] = true } } - return m -} -func init() { - serviceCollections = loadServiceCollections() - automateFull = loadAutomateServiceCollection() - AllBinds = loadServiceBinds() - servicesByCollection = loadServiceIDs() - supplementaryPackagesByCollection = loadSupplementaryPackages() - binlinkMap = loadBinlinkMap() + for _, p := range metadata.Packages { + packageMetadataMap[p.Name.Name] = p + if serviceSet[p.Name] { + serviceList = append(serviceList, habpkg.New(p.Name.Origin, p.Name.Name)) + } + } } diff --git a/components/automate-deployment/pkg/services/services_test.go b/components/automate-deployment/pkg/services/services_test.go index bd2dd1aa88c..a28fd988385 100644 --- a/components/automate-deployment/pkg/services/services_test.go +++ b/components/automate-deployment/pkg/services/services_test.go @@ -3,8 +3,6 @@ package services import ( - "encoding/json" - "io/ioutil" "strings" "testing" @@ -13,9 +11,16 @@ import ( "github.com/chef/automate/components/automate-deployment/pkg/bind" "github.com/chef/automate/components/automate-deployment/pkg/habpkg" - "github.com/chef/automate/components/automate-deployment/pkg/services/parser" ) +func TestServicesSorted(t *testing.T) { + ids, err := ServicesInCollections(productList) + require.NoError(t, err) + + sortedServices, err := bind.TopoSortAll(ids, AllBinds) + require.Nil(t, err, "Services are no longer topologically sortable. Please check any newly added binds") + assert.Equal(t, sortedServices, ids) +} func TestServicesSortable(t *testing.T) { ids, err := ServicesInCollection("automate-full") require.NoError(t, err) @@ -40,58 +45,6 @@ func TestChefServices(t *testing.T) { "services.json can only contain service packages in the chef origin: %s", strings.Join(nonChefPackages, ",")) } -func TestLoadServices(t *testing.T) { - ids, err := ServicesInCollection("automate-full") - require.NoError(t, err) - - expectedServices := expectedServices() - assert.Equal(t, expectedServices, ids) -} - -func expectedServices() []habpkg.HabPkg { - raw, err := ioutil.ReadFile("../assets/data/services.json") - if err != nil { - panic("unable to read data/services.json") - } - - var collections []parser.ServiceCollection - - err = json.Unmarshal(raw, &collections) - - if err != nil { - panic(err.Error) - } - - var paths []string - - for _, c := range collections { - if c.Name == "automate-full" { - paths = c.AllServices - } - } - - if paths == nil { - panic("unable to load the services paths from data/services.json") - } - - sPaths := make([]habpkg.HabPkg, 0, len(paths)) - for _, path := range paths { - path = strings.Trim(path, "\t\n ") - if len(path) == 0 { - continue - } - sp, err := habpkg.FromString(path) - if err != nil { - panic("bad path for service") - } - if sp.Name() == "deployment-service" { - continue - } - sPaths = append(sPaths, sp) - } - return sPaths -} - func TestServicePathsByCollection(t *testing.T) { automateServiceIDs, err := ServicesInCollection("automate-full") assert.NoError(t, err) @@ -175,7 +128,6 @@ func TestSupplementaryPackages(t *testing.T) { } mustExist := []habpkg.HabPkg{ - habpkg.New("chef", "automate-debug"), habpkg.New("core", "rsync"), } @@ -216,19 +168,44 @@ func TestDoLoadServiceBinds(t *testing.T) { func TestBinlinksLoad(t *testing.T) { t.Run("a service with no binlinks has an empty list of binlinks", func(t *testing.T) { var nilSlice []string - assert.Equal(t, nilSlice, BinlinksForService("compliance-service")) + assert.Equal(t, nilSlice, BinlinksForPackage("compliance-service")) }) t.Run("automate-cli has a binlink entry for chef-automate exe", func(t *testing.T) { - assert.Equal(t, []string{"chef-automate"}, BinlinksForService("automate-cli")) + assert.Equal(t, []string{"chef-automate"}, BinlinksForPackage("automate-cli")) }) t.Run("automate-cs-nginx has a binlink entry for knife", func(t *testing.T) { - assert.Equal(t, []string{"knife", "chef-server-ctl"}, BinlinksForService("automate-cs-nginx")) + assert.Equal(t, []string{"knife", "chef-server-ctl"}, BinlinksForPackage("automate-cs-nginx")) }) } func TestAllPackagesUniq(t *testing.T) { - packageIDs := AllPackages() + packageIDs, err := ServicesInCollections([]string{"automate-full", "chef-server", "workflow"}) + require.NoError(t, err) + assert.True(t, len(packageIDs) > 0) + + pkgSet := make(map[habpkg.HabPkg]struct{}) + for _, p := range packageIDs { + pkgSet[p] = struct{}{} + } + assert.Equal(t, len(packageIDs), len(pkgSet)) +} + +func TestServicesUniq(t *testing.T) { + packageIDs, err := ServicesInCollections([]string{"automate-full", "core"}) + require.NoError(t, err) + assert.True(t, len(packageIDs) > 0) + + pkgSet := make(map[habpkg.HabPkg]struct{}) + for _, p := range packageIDs { + pkgSet[p] = struct{}{} + } + assert.Equal(t, len(packageIDs), len(pkgSet)) +} + +func TestServicesUniqWithAliases(t *testing.T) { + packageIDs, err := ServicesInCollections([]string{"automate-full", "automate"}) + require.NoError(t, err) assert.True(t, len(packageIDs) > 0) pkgSet := make(map[habpkg.HabPkg]struct{}) @@ -237,3 +214,71 @@ func TestAllPackagesUniq(t *testing.T) { } assert.Equal(t, len(packageIDs), len(pkgSet)) } + +func TestAutomateFullAliases(t *testing.T) { + packageIDsAutomateFull, err := ServicesInCollections([]string{"automate-full"}) + require.NoError(t, err) + packageIDsAutomate, err := ServicesInCollections([]string{"automate"}) + require.NoError(t, err) + assert.Equal(t, packageIDsAutomate, packageIDsAutomateFull) +} + +func TestListProducts(t *testing.T) { + assert.Subset(t, ListProducts(), []string{"automate", "chef-server", "workflow"}) + assert.NotContains(t, ListProducts(), "monitoring") +} + +func TestValidateProductDeployment(t *testing.T) { + t.Run("error unknown product", func(t *testing.T) { + assert.Error(t, ValidateProductDeployment([]string{"foo"})) + assert.Error(t, ValidateProductDeployment([]string{"automate", "foo", "bar"})) + }) + t.Run("error on base collection", func(t *testing.T) { + assert.Error(t, ValidateProductDeployment([]string{"automate", "core"})) + }) + t.Run("error unspecified product dependencies", func(t *testing.T) { + assert.Error(t, ValidateProductDeployment([]string{"workflow"})) + }) + t.Run("specified product dependencies", func(t *testing.T) { + assert.NoError(t, ValidateProductDeployment([]string{"automate", "workflow"})) + assert.NoError(t, ValidateProductDeployment([]string{"automate-full", "workflow"})) + assert.NoError(t, ValidateProductDeployment([]string{"automate", "workflow"})) + }) + t.Run("we support these for sure", func(t *testing.T) { + assert.NoError(t, ValidateProductDeployment([]string{"automate", "monitoring"})) + assert.NoError(t, ValidateProductDeployment([]string{"automate", "workflow"})) + assert.NoError(t, ValidateProductDeployment([]string{"automate", "workflow", "chef-server"})) + assert.NoError(t, ValidateProductDeployment([]string{"automate", "chef-server"})) + assert.NoError(t, ValidateProductDeployment([]string{"chef-server"})) + assert.NoError(t, ValidateProductDeployment([]string{"automate-full", "workflow"})) + assert.NoError(t, ValidateProductDeployment([]string{"automate-full", "workflow", "chef-server"})) + assert.NoError(t, ValidateProductDeployment([]string{"automate-full", "chef-server"})) + }) +} + +func TestContainsCollection(t *testing.T) { + t.Run("returns false if desired collection is unknown", func(t *testing.T) { + assert.False(t, ContainsCollection("asdf", []string{"automate", "chef-server"})) + }) + t.Run("returns false if desired collection is not in the list", func(t *testing.T) { + assert.False(t, ContainsCollection("workflow", []string{"automate", "chef-server"})) + }) + t.Run("returns false if desired collection is not in the list and not a base dependency", func(t *testing.T) { + assert.False(t, ContainsCollection("automate", []string{"workflow"})) + }) + t.Run("returns false list is empty", func(t *testing.T) { + assert.False(t, ContainsCollection("core", []string{})) + assert.False(t, ContainsCollection("core", nil)) + }) + t.Run("returns true when product is in the list", func(t *testing.T) { + assert.True(t, ContainsCollection("workflow", []string{"workflow"})) + assert.True(t, ContainsCollection("automate", []string{"automate"})) + assert.True(t, ContainsCollection("automate-full", []string{"automate"})) + assert.True(t, ContainsCollection("automate-full", []string{"automate-full"})) + assert.True(t, ContainsCollection("automate", []string{"automate-full"})) + }) + t.Run("returns true if a base component is implicitly included", func(t *testing.T) { + assert.True(t, ContainsCollection("core", []string{"automate"})) + assert.True(t, ContainsCollection("core", []string{"automate-full"})) + }) +} diff --git a/components/automate-deployment/pkg/target/local_target.go b/components/automate-deployment/pkg/target/local_target.go index 5a573440f6d..ca8dfdb1a66 100644 --- a/components/automate-deployment/pkg/target/local_target.go +++ b/components/automate-deployment/pkg/target/local_target.go @@ -398,11 +398,11 @@ func (t *LocalTarget) RemoveService(svc habpkg.VersionedPackage) error { } func supplementaryPackages() ([]habpkg.HabPkg, error) { - return services.SupplementaryPackagesInCollection("automate-full") + return services.SupplementaryPackagesInCollection("core") } // InstallSupPackages installs non-service Habitat packages included -// in automate-deployment's data/services.json +// in product.meta core func (t *LocalTarget) InstallSupPackages(releaseManifest manifest.ReleaseManifest, writer cli.BodyWriter) error { writer.Body("Installing supplementary Habitat packages") packages, err := supplementaryPackages() diff --git a/components/automate-deployment/tools/services-pkg-gen/main.go b/components/automate-deployment/tools/services-pkg-gen/main.go new file mode 100644 index 00000000000..42c96b1a0eb --- /dev/null +++ b/components/automate-deployment/tools/services-pkg-gen/main.go @@ -0,0 +1,120 @@ +package main + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path" + "text/template" + + "github.com/chef/automate/components/automate-deployment/pkg/bind" + "github.com/chef/automate/components/automate-deployment/pkg/habpkg" + "github.com/chef/automate/lib/product" +) + +var packageTemplate = template.Must(template.New("").Parse(`// Code generated by go generate; DO NOT EDIT. +package generated + +var ProductMetadataJSON = ` + "`" + + ` +{{ .ProductMetadataJSON }} +` + "`\n")) + +const deploymentServiceName = "deployment-service" + +func fatal(msg string, err error) { + fmt.Fprintf(os.Stderr, "%s: %s", msg, err.Error()) + os.Exit(1) +} + +func removeDeploymentServiceFromPackages(packages []*product.Package) []*product.Package { + for i := range packages { + if packages[i].Name.Name == deploymentServiceName { + return append(packages[:i], packages[i+1:]...) + } + } + panic("deployment-service not found") +} + +func removeDeploymentServiceFromCollections(collections []*product.Collection) { + for _, c := range collections { + COL: + for i := range c.Services { + if c.Services[i].Name == deploymentServiceName { + c.Services = append(c.Services[:i], c.Services[i+1:]...) + break COL + } + } + } + +} + +func sortPackages(packages []*product.Package, bindsPath string) []*product.Package { + bindData, err := ioutil.ReadFile(bindsPath) + if err != nil { + fatal("failed to read bind data", err) + } + + binds, err := bind.ParseData(bindData) + if err != nil { + fatal("failed to parse bind data", err) + } + + allPkgs := []habpkg.HabPkg{} + packageSet := make(map[string]*product.Package) + for _, p := range packages { + allPkgs = append(allPkgs, habpkg.New(p.Name.Origin, p.Name.Name)) + packageSet[p.Name.Name] = p + } + + allSortedPkgs, err := bind.TopoSortAll(allPkgs, binds) + if err != nil { + fatal("failed to sort services", err) + } + + sorted := make([]*product.Package, len(allSortedPkgs)) + for i := range allSortedPkgs { + sorted[i] = packageSet[allSortedPkgs[i].Name()] + } + return sorted +} + +func main() { + repoRoot := os.Args[1] + bindsPath := path.Join(repoRoot, "components/automate-deployment/pkg/assets/data/binds.txt") + + metadata, err := product.Parse(repoRoot) + if err != nil { + panic(err) + } + + packages := removeDeploymentServiceFromPackages(metadata.Packages) + metadata.Packages = sortPackages(packages, bindsPath) + removeDeploymentServiceFromCollections(metadata.Collections) + + outStruct := product.Metadata{ + Packages: metadata.Packages, + Collections: metadata.Collections, + } + + encoder := json.NewEncoder(os.Stdout) + encoder.SetIndent("", " ") + data, err := json.MarshalIndent(outStruct, "", " ") + if err != nil { + panic(err) + } + f, err := os.Create(os.Args[2]) + if err != nil { + fatal("failed to write file", err) + } + defer f.Close() + err = packageTemplate.Execute(f, struct { + ProductMetadataJSON string + }{ + ProductMetadataJSON: string(data), + }) + if err != nil { + fatal("failed to write template", err) + } +} diff --git a/components/automate-deployment/tools/sort-services/sort-services.go b/components/automate-deployment/tools/sort-services/sort-services.go deleted file mode 100644 index 2a69d4d728e..00000000000 --- a/components/automate-deployment/tools/sort-services/sort-services.go +++ /dev/null @@ -1,94 +0,0 @@ -package main - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "os" - "path/filepath" - - "github.com/chef/automate/components/automate-deployment/pkg/bind" - "github.com/chef/automate/components/automate-deployment/pkg/habpkg" - "github.com/chef/automate/components/automate-deployment/pkg/services/parser" - "github.com/chef/automate/lib/io/fileutils" -) - -func fatal(msg string, err error) { - fmt.Fprintf(os.Stderr, "%s: %s", msg, err.Error()) - os.Exit(1) -} - -func main() { - if len(os.Args) < 3 { - fmt.Fprintln(os.Stderr, "usage: sort_services SERVICE_FILE_PATH BIND_FILE_PATH") - os.Exit(1) - } - - servicePath, err := filepath.Abs(os.Args[1]) - if err != nil { - fatal("failed to expand service file path", err) - } - - bindsPath, err := filepath.Abs(os.Args[2]) - if err != nil { - fatal("failed to expand binds file path", err) - } - - serviceData, err := ioutil.ReadFile(servicePath) - if err != nil { - fatal("failed to read service data", err) - } - - collections, err := parser.ServiceCollectionsFromJSON(serviceData) - if err != nil { - fatal("failed to parse service data", err) - } - - bindData, err := ioutil.ReadFile(bindsPath) - if err != nil { - fatal("failed to read bind data", err) - } - - binds, err := bind.ParseData(bindData) - if err != nil { - fatal("failed to parse bind data", err) - } - - for i, c := range collections { - serviceList, err := habpkg.FromList(c.AllServices) - if err != nil { - fatal("failed to parse service data", err) - } - - sorted, err := bind.TopoSortAll(serviceList, binds) - if err != nil { - fatal("failed to sort services", err) - } - - sortedNames := make([]string, len(sorted)) - - for j, pkg := range sorted { - sortedNames[j] = habpkg.Ident(&pkg) - } - - collections[i].AllServices = sortedNames - } - - newContent := new(bytes.Buffer) - encoder := json.NewEncoder(newContent) - encoder.SetIndent("", " ") - - err = encoder.Encode(collections) - if err != nil { - fatal("could not encode services list back to JSON", err) - } - - err = fileutils.AtomicWrite(servicePath, newContent, fileutils.WithAtomicWriteNoSync(true)) - if err != nil { - // Not cleaning up tempfile in this case, just in case - fatal("failed to move new service file into place", err) - } - - os.Exit(0) -} diff --git a/components/automate-elasticsearch/package.meta b/components/automate-elasticsearch/package.meta new file mode 100644 index 00000000000..22348e59f76 --- /dev/null +++ b/components/automate-elasticsearch/package.meta @@ -0,0 +1,4 @@ +{ + "name": "chef/automate-elasticsearch", + "data_service": true +} diff --git a/components/automate-postgresql/package.meta b/components/automate-postgresql/package.meta new file mode 100644 index 00000000000..7df2e13cc69 --- /dev/null +++ b/components/automate-postgresql/package.meta @@ -0,0 +1,4 @@ +{ + "name": "chef/automate-postgresql", + "data_service": true +} diff --git a/components/automate-prometheus/package.meta b/components/automate-prometheus/package.meta new file mode 100644 index 00000000000..fc0fab4e8ba --- /dev/null +++ b/components/automate-prometheus/package.meta @@ -0,0 +1,4 @@ +{ + "name": "chef/automate-prometheus", + "data_service": true +} diff --git a/components/automate-workflow-server/package.meta b/components/automate-workflow-server/package.meta new file mode 100644 index 00000000000..bf9e2faea6d --- /dev/null +++ b/components/automate-workflow-server/package.meta @@ -0,0 +1,4 @@ +{ + "name": "chef/automate-workflow-server", + "binlinks": ["workflow-ctl"] +} diff --git a/components/backup-gateway/package.meta b/components/backup-gateway/package.meta new file mode 100644 index 00000000000..7b108446f89 --- /dev/null +++ b/components/backup-gateway/package.meta @@ -0,0 +1,4 @@ +{ + "name": "chef/backup-gateway", + "data_service": true +} diff --git a/integration/helpers/deployment.sh b/integration/helpers/deployment.sh index ea80c3b1380..08b94a4b2ee 100644 --- a/integration/helpers/deployment.sh +++ b/integration/helpers/deployment.sh @@ -64,8 +64,8 @@ wait_for_upgrade() { log_info "Waiting for services to upgrade" upgrade_complete="false" - for try in {1..60}; do - log_info "Checking upgrade status (try $try/60)" + for try in {1..90}; do + log_info "Checking upgrade status (try $try/90)" local upgrade_status_output local errcode errcode="0" diff --git a/lib/product/parser.go b/lib/product/parser.go new file mode 100644 index 00000000000..3862dabd606 --- /dev/null +++ b/lib/product/parser.go @@ -0,0 +1,154 @@ +package product + +import ( + "encoding/json" + "os" + "path" + "path/filepath" + + "github.com/pkg/errors" +) + +func parsePackageMetadata(packagePath string) (*PackageMetadata, error) { + f, err := os.Open(packagePath) + if err != nil { + return nil, errors.Wrapf(err, "Couldn't load %s", packagePath) + } + defer f.Close() // nolint: errcheck + decoder := json.NewDecoder(f) + decoder.DisallowUnknownFields() + + metadata := PackageMetadata{} + if err := decoder.Decode(&metadata); err != nil { + return nil, errors.Wrapf(err, "Failed to read metadata for %s", packagePath) + } + + if err := metadata.validate(); err != nil { + return nil, errors.Wrapf(err, "Failed to validate metadata for %s", packagePath) + } + return &metadata, nil +} + +type productsmeta struct { + // Packages is a list of the root level packages/services that will + // make up a release of automate. + Packages []PackageName `json:"packages"` + // Collections is a list of collections that automate is broken down + // into. Some of these are deployable by the user. + Collections []*Collection `json:"collections"` +} + +func (b *productsmeta) validate() error { + if len(b.Collections) <= 0 { + return errors.New("No collections found") + } + + if len(b.Packages) <= 0 { + return errors.New("No packages found") + } + + allowedPackageSet := make(map[PackageName]bool) + for _, p := range b.Packages { + if err := p.validate(); err != nil { + return err + } + allowedPackageSet[p] = true + } + + collectionMap := make(map[string]*Collection) + for _, c := range b.Collections { + if err := c.validate(allowedPackageSet); err != nil { + return err + } + if _, found := collectionMap[c.Name]; found { + return errors.Errorf("collection name/alias %q was repeated", c.Name) + + } + collectionMap[c.Name] = c + for _, alias := range c.Aliases { + if _, found := collectionMap[alias]; found { + return errors.Errorf("collection name/alias %q was repeated", c.Name) + + } + collectionMap[alias] = c + } + } + + for _, c := range b.Collections { + for _, dep := range c.Dependencies { + depCollection, found := collectionMap[dep] + if !found { + return errors.Errorf("%q is listed as a dependency for %q but was not found", dep, c.Name) + } + if c.Type == BaseType && depCollection.Type == ProductType { + return errors.Errorf("base collection %q may not depend on product collection (%q)", + c.Name, depCollection.Name) + } + } + } + + return nil +} + +func parseProductsMeta(filePath string) (*productsmeta, error) { + f, err := os.Open(filePath) + if err != nil { + return nil, errors.Wrapf(err, "Failed to open %s", filePath) + } + defer f.Close() // nolint: errcheck + + decoder := json.NewDecoder(f) + decoder.DisallowUnknownFields() + + val := productsmeta{} + if err := decoder.Decode(&val); err != nil { + return nil, errors.Wrapf(err, "Failed to parse %s", filePath) + } + + if err := val.validate(); err != nil { + return nil, err + } + + return &val, nil +} + +// Parse parses the root level product.meta along with the package.meta files that +// may be provided by each component. +func Parse(repoRootPath string) (*Metadata, error) { + componentsPath := path.Join(repoRootPath, "components") + packageMetadataMap := make(map[PackageName]*PackageMetadata) + packagesWithMetadata, err := filepath.Glob(path.Join(componentsPath, "*", "package.meta")) + if err != nil { + return nil, errors.Wrap(err, "Failed to glob packages") + } + + if len(packagesWithMetadata) <= 0 { + return nil, errors.New("Found no package metadata") + } + + for _, packagePath := range packagesWithMetadata { + metadata, err := parsePackageMetadata(packagePath) + if err != nil { + return nil, err + } + packageMetadataMap[metadata.Name] = metadata + } + + metadata, err := parseProductsMeta(path.Join(repoRootPath, "products.meta")) + if err != nil { + return nil, err + } + + packages := make([]*Package, len(metadata.Packages)) + for i := range metadata.Packages { + packages[i] = &Package{ + Name: metadata.Packages[i], + Metadata: packageMetadataMap[metadata.Packages[i]], + } + } + + return &Metadata{ + Packages: packages, + Collections: metadata.Collections, + }, nil +} diff --git a/lib/product/types.go b/lib/product/types.go new file mode 100644 index 00000000000..6895a6a3f9c --- /dev/null +++ b/lib/product/types.go @@ -0,0 +1,154 @@ +package product + +import ( + "fmt" + "strings" + + "github.com/pkg/errors" +) + +type CollectionType string + +const ( + // BaseType is a collection that may not be directly + // deployed by the user. It provides a way to group + // services which may be depended upon by other products. + BaseType CollectionType = "base" + + // ProductType is a collection that can be deployed by the user. + ProductType CollectionType = "product" +) + +func (c CollectionType) validate() error { + switch c { + case BaseType, ProductType: + return nil + default: + return errors.Errorf("%s is not a valid collection type", c) + } +} + +// PackageName represents a package or service by origin/name +type PackageName struct { + // Origin is the habitat origin this package belongs to + Origin string + // Name is the habitat name of the package + Name string +} + +func (p *PackageName) UnmarshalText(text []byte) error { + parts := strings.Split(string(text), "/") + if len(parts) == 2 { + p.Origin = parts[0] + p.Name = parts[1] + } else { + return errors.Errorf("%s is not a valid package. Must be origin/name", parts) + } + return nil +} + +func (p *PackageName) MarshalText() ([]byte, error) { + return []byte(fmt.Sprintf("%s/%s", p.Origin, p.Name)), nil +} + +func (p *PackageName) validate() error { + if p.Origin == "" || p.Name == "" { + return errors.New("Invalid package") + } + return nil +} + +func (p PackageName) String() string { + return fmt.Sprintf("%s/%s", p.Origin, p.Name) +} + +// Collection is a group of services and packages that provide some +// desired functionality. For example, postgres requires 3 services +// to fully work in Automate: postgresql, a sidecar, and a tcp +// gateway. +// Collections can be of type base, meaning they are deployed as +// part of other collections. The other type is product, which allows +// them to be deployed by users (automate, chef-server, workflow, etc). +type Collection struct { + Name string `json:"name"` + + // Aliases is a list of alternative names that can be used for the + // collection. As an example, we might want to allow both chef-server + // and chef-infra-server + Aliases []string `json:"aliases"` + + // Type is one of base or product. Products can be deployed, where as + // base collections can be included in other collections as deps + Type CollectionType `json:"type"` + + // Services is the list of packages that will be deployed as services + // for the collection. + Services []PackageName `json:"services"` + + // Packages is a list of packages that are needed for this collection. + // They will likely be providing binlinks for the user + Packages []PackageName `json:"packages"` + + // Dependencies is a list of collections this collection depends on. + Dependencies []string `json:"dependencies"` + + // Hidden allows adding collections that we don't want to present to the + // user + Hidden bool `json:"hidden"` +} + +func (c *Collection) validate(allowedPackageSet map[PackageName]bool) error { + if err := c.Type.validate(); err != nil { + return err + } + for _, p := range c.Services { + if err := p.validate(); err != nil { + return err + } + if !allowedPackageSet[p] { + return errors.Errorf("Expected %s in packages list", p) + } + } + + for _, p := range c.Packages { + if err := p.validate(); err != nil { + return err + } + if !allowedPackageSet[p] { + return errors.Errorf("Expected %s in packages list", p) + } + } + return nil +} + +// PackageMetadata is a set of metadata that components may optionally provide. +type PackageMetadata struct { + Name PackageName `json:"name"` + + // DataService is set to true if this service is a a data service. + // For example, postgres, elasticsearch, s3 + DataService bool `json:"data_service"` + + // A list of binaries to be binlinked when the package is deployed + Binlinks []string `json:"binlinks"` +} + +func (p *PackageMetadata) validate() error { + return nil +} + +// Package represents a package/service along with its metadata +type Package struct { + Name PackageName `json:"name"` + // Metadata is optional metadata a package may provide. This + // field is nullable + + Metadata *PackageMetadata `json:"metadata"` +} + +// Metadata is the top level metadata the describes the automate +// collections and its packages. +type Metadata struct { + Packages []*Package `json:"packages"` + Collections []*Collection `json:"collections"` +} diff --git a/products.meta b/products.meta new file mode 100644 index 00000000000..48fa6f33cfb --- /dev/null +++ b/products.meta @@ -0,0 +1,142 @@ +{ + "packages": [ + "chef/backup-gateway", + "chef/automate-postgresql", + "chef/automate-pg-gateway", + "chef/automate-elasticsearch", + "chef/automate-es-gateway", + "chef/automate-ui", + "chef/pg-sidecar-service", + "chef/event-service", + "chef/authz-service", + "chef/es-sidecar-service", + "chef/automate-dex", + "chef/teams-service", + "chef/authn-service", + "chef/secrets-service", + "chef/applications-service", + "chef/notifications-service", + "chef/nodemanager-service", + "chef/compliance-service", + "chef/license-control-service", + "chef/local-user-service", + "chef/session-service", + "chef/ingest-service", + "chef/config-mgmt-service", + "chef/deployment-service", + "chef/data-feed-service", + "chef/data-lifecycle-service", + "chef/event-gateway", + "chef/automate-gateway", + "chef/automate-load-balancer", + "chef/automate-cs-bookshelf", + "chef/automate-cs-oc-bifrost", + "chef/automate-cs-oc-erchef", + "chef/automate-cs-nginx", + "chef/automate-workflow-server", + "chef/automate-workflow-nginx", + "chef/automate-prometheus", + "chef/automate-cli", + "core/rsync" + ], + + "collections": [ + { + "name": "core", + "type": "base", + "services": [ + "chef/backup-gateway", + "chef/deployment-service", + "chef/license-control-service", + "chef/automate-load-balancer" + ], + "packages": [ + "chef/automate-cli", + "core/rsync" + ] + }, + { + "name": "postgresql", + "type": "base", + "services": [ + "chef/automate-postgresql", + "chef/automate-pg-gateway", + "chef/pg-sidecar-service" + ] + }, + { + "name": "elasticsearch", + "type": "base", + "services": [ + "chef/automate-elasticsearch", + "chef/automate-es-gateway", + "chef/es-sidecar-service" + ] + }, + { + "name": "auth", + "type": "base", + "dependencies": ["core", "postgresql"], + "services": [ + "chef/authz-service", + "chef/authn-service", + "chef/automate-dex", + "chef/local-user-service", + "chef/session-service" + ] + }, + { + "name": "automate", + "type": "product", + "dependencies": ["core", "postgresql", "elasticsearch", "auth"], + "aliases": ["automate-full"], + "services": [ + "chef/automate-ui", + "chef/event-service", + "chef/teams-service", + "chef/authn-service", + "chef/secrets-service", + "chef/applications-service", + "chef/notifications-service", + "chef/nodemanager-service", + "chef/compliance-service", + "chef/ingest-service", + "chef/config-mgmt-service", + "chef/data-feed-service", + "chef/data-lifecycle-service", + "chef/event-gateway", + "chef/automate-gateway" + ] + }, + { + "name": "chef-server", + "type": "product", + "aliases": ["chef-infra-server"], + "dependencies": ["core", "postgresql", "elasticsearch"], + "services": [ + "chef/automate-cs-bookshelf", + "chef/automate-cs-oc-bifrost", + "chef/automate-cs-oc-erchef", + "chef/automate-cs-nginx" + ] + }, + { + "name": "workflow", + "type": "product", + "dependencies": ["automate"], + "services": [ + "chef/automate-workflow-server", + "chef/automate-workflow-nginx" + ] + }, + { + "name": "monitoring", + "type": "product", + "hidden": true, + "dependencies": ["automate"], + "services": [ + "chef/automate-prometheus" + ] + } + ] +}