Skip to content

Commit

Permalink
New stack provider for environment variables (#2298)
Browse files Browse the repository at this point in the history
Add a new stack provider that can be used when elastic-package is
configured with environment variables. This provider fills the gaps
for using elastic-package for testing on clusters that are not originally
managed by elastic-package.

To use it, at least ELASTIC_PACKAGE_KIBANA_HOST and
ELASTIC_PACKAGE_ELASTICSEARCH_HOST environment variables
need to be set, other variables can be used to configure authentication,
SSL and so on.

Then, run `elastic-package stack up --provider environment`.

This provider manages:
- Local elastic-agent and initial agent policy.
- Optional local logstash and logstash output.
- If no Fleet Server is available, it configures the host, a Fleet Server policy,
  a new service token and starts a local Fleet Server.

Most commands should work if the provider is able to setup everything.
  • Loading branch information
jsoriano authored Jan 21, 2025
1 parent c975508 commit b6b74e5
Show file tree
Hide file tree
Showing 26 changed files with 1,051 additions and 145 deletions.
7 changes: 7 additions & 0 deletions internal/agentdeployer/_static/docker-agent-base.yml.tmpl
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
{{- $dockerfile_hash := fact "dockerfile_hash" -}}
{{- $stack_version := fact "stack_version" }}
{{- $agent_image := fact "agent_image" }}
{{- $enrollment_token := fact "enrollment_token" }}
services:
elastic-agent:
hostname: ${AGENT_HOSTNAME}
Expand Down Expand Up @@ -40,9 +41,13 @@ services:
- FLEET_ENROLL=1
- FLEET_URL={{ fact "fleet_url" }}
- KIBANA_HOST={{ fact "kibana_host" }}
{{ if eq $enrollment_token "" }}
- FLEET_TOKEN_POLICY_NAME=${FLEET_TOKEN_POLICY_NAME}
- ELASTICSEARCH_USERNAME={{ fact "elasticsearch_username" }}
- ELASTICSEARCH_PASSWORD={{ fact "elasticsearch_password" }}
{{ else }}
- FLEET_ENROLLMENT_TOKEN={{ $enrollment_token }}
{{ end }}
volumes:
- type: bind
source: ${LOCAL_CA_CERT}
Expand All @@ -57,3 +62,5 @@ services:
source: ${SERVICE_LOGS_DIR}
target: /run/service_logs/
read_only: false
extra_hosts:
- "host.docker.internal:host-gateway"
Original file line number Diff line number Diff line change
Expand Up @@ -44,15 +44,15 @@ spec:
value: {{ .fleetURL }}
# If left empty KIBANA_HOST, KIBANA_FLEET_USERNAME, KIBANA_FLEET_PASSWORD are needed
- name: FLEET_ENROLLMENT_TOKEN
value: ""
value: "{{ .enrollmentToken }}"
- name: FLEET_TOKEN_POLICY_NAME
value: "{{ .elasticAgentTokenPolicyName }}"
- name: KIBANA_HOST
value: {{ .kibanaURL }}
- name: KIBANA_FLEET_USERNAME
value: "elastic"
value: {{ .username }}
- name: KIBANA_FLEET_PASSWORD
value: "changeme"
value: {{ .password }}
- name: SSL_CERT_DIR
value: "/etc/ssl/certs:/etc/ssl/elastic-package"
- name: NODE_NAME
Expand Down
30 changes: 24 additions & 6 deletions internal/agentdeployer/agent.go
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,7 @@ func (d *DockerComposeAgentDeployer) SetUp(ctx context.Context, agentInfo AgentI
fmt.Sprintf("%s=%s", agentHostnameEnv, d.agentHostname()),
)

configDir, err := d.installDockerCompose(agentInfo)
configDir, err := d.installDockerCompose(ctx, agentInfo)
if err != nil {
return nil, fmt.Errorf("could not create resources for custom agent: %w", err)
}
Expand Down Expand Up @@ -233,7 +233,7 @@ func (d *DockerComposeAgentDeployer) agentName() string {

// installDockerCompose creates the files needed to run the custom elastic agent and returns
// the directory with these files.
func (d *DockerComposeAgentDeployer) installDockerCompose(agentInfo AgentInfo) (string, error) {
func (d *DockerComposeAgentDeployer) installDockerCompose(ctx context.Context, agentInfo AgentInfo) (string, error) {
customAgentDir, err := CreateDeployerDir(d.profile, fmt.Sprintf("docker-agent-%s-%s", d.agentName(), d.agentRunID))
if err != nil {
return "", fmt.Errorf("failed to create directory for custom agent files: %w", err)
Expand All @@ -254,14 +254,31 @@ func (d *DockerComposeAgentDeployer) installDockerCompose(agentInfo AgentInfo) (
if err != nil {
return "", fmt.Errorf("failed to load config from profile: %w", err)
}
enrollmentToken := ""
if config.ElasticsearchAPIKey != "" {
// TODO: Review if this is the correct place to get the enrollment token.
kibanaClient, err := stack.NewKibanaClientFromProfile(d.profile)
if err != nil {
return "", fmt.Errorf("failed to create kibana client: %w", err)
}
enrollmentToken, err = kibanaClient.GetEnrollmentTokenForPolicyID(ctx, agentInfo.Policy.ID)
if err != nil {
return "", fmt.Errorf("failed to get enrollment token for policy %q: %w", agentInfo.Policy.Name, err)
}
}

// TODO: Include these settings more explicitly in `config`.
fleetURL := "https://fleet-server:8220"
kibanaHost := "https://kibana:5601"
stackVersion := d.stackVersion
if config.Provider == stack.ProviderServerless {
fleetURL = config.Parameters[stack.ParamServerlessFleetURL]
if config.Provider != stack.ProviderCompose {
kibanaHost = config.KibanaHost
stackVersion = config.Parameters[stack.ParamServerlessLocalStackVersion]
}
if url, ok := config.Parameters[stack.ParamServerlessFleetURL]; ok {
fleetURL = url
}
if version, ok := config.Parameters[stack.ParamServerlessLocalStackVersion]; ok {
stackVersion = version
}

agentImage, err := selectElasticAgentImage(stackVersion, agentInfo.Agent.BaseImage)
Expand All @@ -280,9 +297,10 @@ func (d *DockerComposeAgentDeployer) installDockerCompose(agentInfo AgentInfo) (
"dockerfile_hash": hex.EncodeToString(hashDockerfile),
"stack_version": stackVersion,
"fleet_url": fleetURL,
"kibana_host": kibanaHost,
"kibana_host": stack.DockerInternalHost(kibanaHost),
"elasticsearch_username": config.ElasticsearchUsername,
"elasticsearch_password": config.ElasticsearchPassword,
"enrollment_token": enrollmentToken,
})

resourceManager.RegisterProvider("file", &resource.FileProvider{
Expand Down
3 changes: 0 additions & 3 deletions internal/agentdeployer/info.go
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,4 @@ type AgentInfo struct {

AgentSettings
}

// CustomProperties store additional data used to boot up the service, e.g. AWS credentials.
CustomProperties map[string]interface{}
}
47 changes: 39 additions & 8 deletions internal/agentdeployer/kubernetes.go
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ type kubernetesDeployedAgent struct {
}

func (s kubernetesDeployedAgent) TearDown(ctx context.Context) error {
elasticAgentManagedYaml, err := getElasticAgentYAML(s.profile, s.stackVersion, s.agentInfo.Policy.Name, s.agentName)
elasticAgentManagedYaml, err := getElasticAgentYAML(ctx, s.profile, s.agentInfo, s.stackVersion, s.agentName)
if err != nil {
return fmt.Errorf("can't retrieve Kubernetes file for Elastic Agent: %w", err)
}
Expand Down Expand Up @@ -123,7 +123,7 @@ func (ksd *KubernetesAgentDeployer) SetUp(ctx context.Context, agentInfo AgentIn
if ksd.runTearDown || ksd.runTestsOnly {
logger.Debug("Skip install Elastic Agent in cluster")
} else {
err = installElasticAgentInCluster(ctx, ksd.profile, ksd.stackVersion, agentInfo.Policy.Name, agentName)
err = installElasticAgentInCluster(ctx, ksd.profile, agentInfo, ksd.stackVersion, agentName)
if err != nil {
return nil, fmt.Errorf("can't install Elastic-Agent in the Kubernetes cluster: %w", err)
}
Expand Down Expand Up @@ -155,10 +155,10 @@ func (ksd *KubernetesAgentDeployer) agentName() string {

var _ AgentDeployer = new(KubernetesAgentDeployer)

func installElasticAgentInCluster(ctx context.Context, profile *profile.Profile, stackVersion, policyName, agentName string) error {
func installElasticAgentInCluster(ctx context.Context, profile *profile.Profile, agentInfo AgentInfo, stackVersion, agentName string) error {
logger.Debug("install Elastic Agent in the Kubernetes cluster")

elasticAgentManagedYaml, err := getElasticAgentYAML(profile, stackVersion, policyName, agentName)
elasticAgentManagedYaml, err := getElasticAgentYAML(ctx, profile, agentInfo, stackVersion, agentName)
if err != nil {
return fmt.Errorf("can't retrieve Kubernetes file for Elastic Agent: %w", err)
}
Expand All @@ -176,8 +176,36 @@ func installElasticAgentInCluster(ctx context.Context, profile *profile.Profile,
//go:embed _static/elastic-agent-managed.yaml.tmpl
var elasticAgentManagedYamlTmpl string

func getElasticAgentYAML(profile *profile.Profile, stackVersion, policyName, agentName string) ([]byte, error) {
func getElasticAgentYAML(ctx context.Context, profile *profile.Profile, agentInfo AgentInfo, stackVersion, agentName string) ([]byte, error) {
logger.Debugf("Prepare YAML definition for Elastic Agent running in stack v%s", stackVersion)
config, err := stack.LoadConfig(profile)
if err != nil {
return nil, fmt.Errorf("failed to load config from profile: %w", err)
}
fleetURL := "https://fleet-server:8220"
kibanaURL := "https://kibana:5601"
if config.Provider != stack.ProviderCompose {
kibanaURL = config.KibanaHost
}
if url, ok := config.Parameters[stack.ParamServerlessFleetURL]; ok {
fleetURL = url
}
if version, ok := config.Parameters[stack.ParamServerlessLocalStackVersion]; ok {
stackVersion = version
}

enrollmentToken := ""
if config.ElasticsearchAPIKey != "" {
// TODO: Review if this is the correct place to get the enrollment token.
kibanaClient, err := stack.NewKibanaClientFromProfile(profile)
if err != nil {
return nil, fmt.Errorf("failed to create kibana client: %w", err)
}
enrollmentToken, err = kibanaClient.GetEnrollmentTokenForPolicyID(ctx, agentInfo.Policy.ID)
if err != nil {
return nil, fmt.Errorf("failed to get enrollment token for policy %q: %w", agentInfo.Policy.Name, err)
}
}

appConfig, err := install.Configuration(install.OptionWithStackVersion(stackVersion))
if err != nil {
Expand All @@ -193,11 +221,14 @@ func getElasticAgentYAML(profile *profile.Profile, stackVersion, policyName, age

var elasticAgentYaml bytes.Buffer
err = tmpl.Execute(&elasticAgentYaml, map[string]string{
"fleetURL": "https://fleet-server:8220",
"kibanaURL": "https://kibana:5601",
"fleetURL": fleetURL,
"kibanaURL": kibanaURL,
"username": config.ElasticsearchUsername,
"password": config.ElasticsearchPassword,
"enrollmentToken": enrollmentToken,
"caCertPem": caCert,
"elasticAgentImage": appConfig.StackImageRefs().ElasticAgent,
"elasticAgentTokenPolicyName": getTokenPolicyName(stackVersion, policyName),
"elasticAgentTokenPolicyName": getTokenPolicyName(stackVersion, agentInfo.Policy.Name),
"agentName": agentName,
})
if err != nil {
Expand Down
2 changes: 2 additions & 0 deletions internal/kubectl/kubectl_apply.go
Original file line number Diff line number Diff line change
Expand Up @@ -135,6 +135,8 @@ func waitForReadyResources(resources []resource) error {
// be unavailable (DaemonSet.spec.updateStrategy.rollingUpdate.maxUnavailable defaults to 1).
// daemonSetReady will return true regardless of the pod not being ready yet.
// Can be solved with multi-node clusters.
// TODO: Support context cancelation in this wait. We rely on a helm waiter
// that doesn't support it.
err := kubeClient.Wait(resList, readinessTimeout)
if err != nil {
return fmt.Errorf("waiter failed: %w", err)
Expand Down
6 changes: 3 additions & 3 deletions internal/serverless/project.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,8 +31,8 @@ type Project struct {
Region string `json:"region_id"`

Credentials struct {
Username string `json:"username"`
Password string `json:"password"`
Username string `json:"username,omitempty"`
Password string `json:"password,omitempty"`
} `json:"credentials"`

Endpoints struct {
Expand Down Expand Up @@ -150,7 +150,7 @@ func (p *Project) getFleetHealth(ctx context.Context) error {

if status.Status != "HEALTHY" {
return fmt.Errorf("fleet status %s", status.Status)

}

return nil
}
9 changes: 7 additions & 2 deletions internal/stack/_static/elastic-agent.env.tmpl
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,13 @@ FLEET_ENROLL=1
FLEET_URL={{ fact "fleet_url" }}
KIBANA_FLEET_HOST={{ fact "kibana_host" }}
KIBANA_HOST={{ fact "kibana_host" }}
{{- $enrollment_token := fact "enrollment_token" }}
{{- if eq $enrollment_token "" }}
ELASTICSEARCH_USERNAME={{ fact "username" }}
ELASTICSEARCH_PASSWORD={{ fact "password" }}
{{ if not (semverLessThan $version "8.0.0") }}
{{- if not (semverLessThan $version "8.0.0") }}
FLEET_TOKEN_POLICY_NAME=Elastic-Agent (elastic-package)
{{ end }}
{{- end }}
{{- else }}
FLEET_ENROLLMENT_TOKEN={{ $enrollment_token }}
{{- end }}
2 changes: 1 addition & 1 deletion internal/stack/_static/fleet-server-healthcheck.sh
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ NUMBER_SUCCESSES="$1"
WAITING_TIME="$2"

healthcheck() {
curl -s --cacert /etc/ssl/elastic-agent/ca-cert.pem -f https://localhost:8220/api/status | grep -i healthy 2>&1 >/dev/null
curl -s --cacert /etc/ssl/certs/elastic-package.pem -f https://localhost:8220/api/status | grep -i healthy 2>&1 >/dev/null
}

# Fleet Server can restart after announcing to be healthy, agents connecting during this restart will
Expand Down
116 changes: 116 additions & 0 deletions internal/stack/_static/local-services-docker-compose.yml.tmpl
Original file line number Diff line number Diff line change
@@ -0,0 +1,116 @@
services:
{{- $fleet_server_managed := fact "fleet_server_managed" }}
{{- if eq $fleet_server_managed "true" }}
{{- $fleet_healthcheck_success_checks := 3 -}}
{{- $fleet_healthcheck_waiting_time := 1 -}}
{{- $version := fact "agent_version" -}}
{{- if semverLessThan $version "8.0.0" -}}
{{- $fleet_healthcheck_success_checks = 10 -}}
{{- $fleet_healthcheck_waiting_time = 2 -}}
{{- end }}
fleet-server:
image: "{{ fact "agent_image" }}"
healthcheck:
test: "bash /healthcheck.sh {{ $fleet_healthcheck_success_checks }} {{ $fleet_healthcheck_waiting_time }}"
start_period: 60s
interval: 5s
hostname: docker-fleet-server
environment:
- "ELASTICSEARCH_HOST={{ fact "elasticsearch_host" }}"
- "FLEET_SERVER_CERT=/etc/ssl/fleet-server/cert.pem"
- "FLEET_SERVER_CERT_KEY=/etc/ssl/fleet-server/key.pem"
- "FLEET_SERVER_ELASTICSEARCH_HOST={{ fact "elasticsearch_host" }}"
- "FLEET_SERVER_ENABLE=1"
- "FLEET_SERVER_HOST=0.0.0.0"
- "FLEET_SERVER_SERVICE_TOKEN={{ fact "fleet_service_token" }}"
- "FLEET_SERVER_POLICY={{ fact "fleet_server_policy" }}"
- "FLEET_URL={{ fact "fleet_url" }}"
- "KIBANA_FLEET_HOST={{ fact "kibana_host" }}"
- "KIBANA_FLEET_SERVICE_TOKEN={{ fact "fleet_service_token" }}"
- "KIBANA_FLEET_SERVER_POLICY={{ fact "fleet_server_policy" }}"
- "KIBANA_FLEET_SETUP=1"
- "KIBANA_HOST={{ fact "kibana_host" }}"
volumes:
- "../certs/ca-cert.pem:/etc/ssl/certs/elastic-package.pem:ro"
- "../certs/fleet-server:/etc/ssl/fleet-server:ro"
- "./fleet-server-healthcheck.sh:/healthcheck.sh:ro"
ports:
- "127.0.0.1:8220:8220"
extra_hosts:
- "host.docker.internal:host-gateway"

fleet-server_is_ready:
image: tianon/true:multiarch
depends_on:
fleet-server:
condition: service_healthy
{{- end }}

elastic-agent:
image: "{{ fact "agent_image" }}"
{{- if eq $fleet_server_managed "true" }}
depends_on:
fleet-server:
condition: service_healthy
{{- end }}
healthcheck:
test: "elastic-agent status"
timeout: 2s
start_period: 360s
retries: 180
interval: 5s
hostname: docker-fleet-agent
env_file: "./elastic-agent.env"
cap_drop:
- ALL
volumes:
- type: bind
source: ../../../tmp/service_logs/
target: /tmp/service_logs/
# Mount service_logs under /run too as a testing workaround for the journald input (see elastic-package#1235).
- type: bind
source: ../../../tmp/service_logs/
target: /run/service_logs/
- "../certs/ca-cert.pem:/etc/ssl/certs/elastic-package.pem"
extra_hosts:
- "host.docker.internal:host-gateway"

elastic-agent_is_ready:
image: tianon/true:multiarch
depends_on:
elastic-agent:
condition: service_healthy

{{ $logstash_enabled := fact "logstash_enabled" }}
{{ if eq $logstash_enabled "true" }}
logstash:
build:
dockerfile: "./Dockerfile.logstash"
args:
IMAGE: "{{ fact "logstash_image" }}"
healthcheck:
test: bin/logstash -t
start_period: 120s
interval: 60s
timeout: 60s
retries: 5
volumes:
- "../certs/logstash:/usr/share/logstash/config/certs"
ports:
- "127.0.0.1:5044:5044"
- "127.0.0.1:9600:9600"
environment:
- XPACK_MONITORING_ENABLED=false
- ELASTIC_API_KEY={{ fact "api_key" }}
- ELASTIC_USER={{ fact "username" }}
- ELASTIC_PASSWORD={{ fact "password" }}
- ELASTIC_HOSTS={{ fact "elasticsearch_host" }}
extra_hosts:
- "host.docker.internal:host-gateway"

logstash_is_ready:
image: tianon/true:multiarch
depends_on:
logstash:
condition: service_healthy
{{ end }}
Loading

0 comments on commit b6b74e5

Please sign in to comment.