From 3874725d1e9c645a549b270be800a266bb2b3021 Mon Sep 17 00:00:00 2001 From: Blake Rouse Date: Mon, 19 Oct 2020 09:16:52 -0400 Subject: [PATCH 01/27] [Elastic Agent] Fix index for Agent monitoring to to elastic_agent. (#21932) * Change to elastic_agent. * Add changelog. --- x-pack/elastic-agent/CHANGELOG.next.asciidoc | 1 + .../pkg/agent/operation/monitoring.go | 18 +++++++++--------- 2 files changed, 10 insertions(+), 9 deletions(-) diff --git a/x-pack/elastic-agent/CHANGELOG.next.asciidoc b/x-pack/elastic-agent/CHANGELOG.next.asciidoc index 64d1a3b589b0..5284da8db2bd 100644 --- a/x-pack/elastic-agent/CHANGELOG.next.asciidoc +++ b/x-pack/elastic-agent/CHANGELOG.next.asciidoc @@ -18,6 +18,7 @@ - Prevent reporting ecs version twice {pull}21616[21616] - Partial extracted beat result in failure to spawn beat {issue}21718[21718] - Use local temp instead of system one {pull}21883[21883] +- Rename monitoring index from `elastic.agent` to `elastic_agent` {pull}21932[21932] ==== New features diff --git a/x-pack/elastic-agent/pkg/agent/operation/monitoring.go b/x-pack/elastic-agent/pkg/agent/operation/monitoring.go index c4d895eb6eee..74d542d58e9d 100644 --- a/x-pack/elastic-agent/pkg/agent/operation/monitoring.go +++ b/x-pack/elastic-agent/pkg/agent/operation/monitoring.go @@ -186,14 +186,14 @@ func (o *Operator) getMonitoringFilebeatConfig(output interface{}) (map[string]i "paths": []string{ filepath.Join(paths.Home(), "logs", "elastic-agent-json.log"), }, - "index": "logs-elastic.agent-default", + "index": "logs-elastic_agent-default", "processors": []map[string]interface{}{ { "add_fields": map[string]interface{}{ "target": "data_stream", "fields": map[string]interface{}{ "type": "logs", - "dataset": "elastic.agent", + "dataset": "elastic_agent", "namespace": "default", }, }, @@ -202,7 +202,7 @@ func (o *Operator) getMonitoringFilebeatConfig(output interface{}) (map[string]i "add_fields": map[string]interface{}{ "target": "event", "fields": map[string]interface{}{ - "dataset": "elastic.agent", + "dataset": "elastic_agent", }, }, }, @@ -220,14 +220,14 @@ func (o *Operator) getMonitoringFilebeatConfig(output interface{}) (map[string]i "message_key": "message", }, "paths": paths, - "index": fmt.Sprintf("logs-elastic.agent.%s-default", name), + "index": fmt.Sprintf("logs-elastic_agent.%s-default", name), "processors": []map[string]interface{}{ { "add_fields": map[string]interface{}{ "target": "data_stream", "fields": map[string]interface{}{ "type": "logs", - "dataset": fmt.Sprintf("elastic.agent.%s", name), + "dataset": fmt.Sprintf("elastic_agent.%s", name), "namespace": "default", }, }, @@ -236,7 +236,7 @@ func (o *Operator) getMonitoringFilebeatConfig(output interface{}) (map[string]i "add_fields": map[string]interface{}{ "target": "event", "fields": map[string]interface{}{ - "dataset": fmt.Sprintf("elastic.agent.%s", name), + "dataset": fmt.Sprintf("elastic_agent.%s", name), }, }, }, @@ -270,14 +270,14 @@ func (o *Operator) getMonitoringMetricbeatConfig(output interface{}) (map[string "metricsets": []string{"stats", "state"}, "period": "10s", "hosts": endpoints, - "index": fmt.Sprintf("metrics-elastic.agent.%s-default", name), + "index": fmt.Sprintf("metrics-elastic_agent.%s-default", name), "processors": []map[string]interface{}{ { "add_fields": map[string]interface{}{ "target": "data_stream", "fields": map[string]interface{}{ "type": "metrics", - "dataset": fmt.Sprintf("elastic.agent.%s", name), + "dataset": fmt.Sprintf("elastic_agent.%s", name), "namespace": "default", }, }, @@ -286,7 +286,7 @@ func (o *Operator) getMonitoringMetricbeatConfig(output interface{}) (map[string "add_fields": map[string]interface{}{ "target": "event", "fields": map[string]interface{}{ - "dataset": fmt.Sprintf("elastic.agent.%s", name), + "dataset": fmt.Sprintf("elastic_agent.%s", name), }, }, }, From 803ddcada71fcb0ccb398a7f64db38836bb9f472 Mon Sep 17 00:00:00 2001 From: Blake Rouse Date: Mon, 19 Oct 2020 10:13:10 -0400 Subject: [PATCH 02/27] [Elastic Agent] Fix named pipe communication on Windows 7 (#21931) * Fix named pipes on Windows 7. * Add changelog fix notice. --- NOTICE.txt | 6 +++--- go.mod | 1 + go.sum | 4 ++-- x-pack/elastic-agent/CHANGELOG.next.asciidoc | 1 + 4 files changed, 7 insertions(+), 5 deletions(-) diff --git a/NOTICE.txt b/NOTICE.txt index 349fe58b3d15..477f0b53201b 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -2183,12 +2183,12 @@ Contents of probable licence file $GOMODCACHE/github.com/!azure/go-autorest/auto -------------------------------------------------------------------------------- -Dependency : github.com/Microsoft/go-winio -Version: v0.4.15-0.20190919025122-fc70bd9a86b5 +Dependency : github.com/bi-zone/go-winio +Version: v0.4.15 Licence type (autodetected): MIT -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/!microsoft/go-winio@v0.4.15-0.20190919025122-fc70bd9a86b5/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/bi-zone/go-winio@v0.4.15/LICENSE: The MIT License (MIT) diff --git a/go.mod b/go.mod index 720690f1f2fa..2ef656063192 100644 --- a/go.mod +++ b/go.mod @@ -189,6 +189,7 @@ require ( replace ( github.com/Azure/go-autorest => github.com/Azure/go-autorest v12.2.0+incompatible + github.com/Microsoft/go-winio => github.com/bi-zone/go-winio v0.4.15 github.com/Shopify/sarama => github.com/elastic/sarama v1.19.1-0.20200629123429-0e7b69039eec github.com/cucumber/godog => github.com/cucumber/godog v0.8.1 github.com/docker/docker => github.com/docker/engine v0.0.0-20191113042239-ea84732a7725 diff --git a/go.sum b/go.sum index 5c01c612fe32..97f31d792922 100644 --- a/go.sum +++ b/go.sum @@ -80,8 +80,6 @@ github.com/DataDog/zstd v1.4.1 h1:3oxKN3wbHibqx897utPC2LTQU4J+IHWWJO+glkAkpFM= github.com/DataDog/zstd v1.4.1/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= github.com/Masterminds/semver v1.4.2 h1:WBLTQ37jOCzSLtXNdoo8bNM8876KhNqOKvrlGITgsTc= github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= -github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5 h1:ygIc8M6trr62pF5DucadTWGdEB4mEyvzi0e2nbcmcyA= -github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= github.com/Microsoft/hcsshim v0.8.7 h1:ptnOoufxGSzauVTsdE+wMYnCWA301PdoN4xg5oRdZpg= github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= @@ -132,6 +130,8 @@ github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24 github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bi-zone/go-winio v0.4.15 h1:viLHm+U7bzIkfVHuWgc3Wp/sT5zaLoRG7XdOEy1b12w= +github.com/bi-zone/go-winio v0.4.15/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= github.com/blakerouse/service v1.1.1-0.20200924160513-057808572ffa h1:aXHPZwx8Y5z8r+1WPylnu095usTf6QSshaHs6nVMBc0= github.com/blakerouse/service v1.1.1-0.20200924160513-057808572ffa/go.mod h1:RrJI2xn5vve/r32U5suTbeaSGoMU6GbNPoj36CVYcHc= github.com/blakesmith/ar v0.0.0-20150311145944-8bd4349a67f2 h1:oMCHnXa6CCCafdPDbMh/lWRhRByN0VFLvv+g+ayx1SI= diff --git a/x-pack/elastic-agent/CHANGELOG.next.asciidoc b/x-pack/elastic-agent/CHANGELOG.next.asciidoc index 5284da8db2bd..fa0198a6628e 100644 --- a/x-pack/elastic-agent/CHANGELOG.next.asciidoc +++ b/x-pack/elastic-agent/CHANGELOG.next.asciidoc @@ -19,6 +19,7 @@ - Partial extracted beat result in failure to spawn beat {issue}21718[21718] - Use local temp instead of system one {pull}21883[21883] - Rename monitoring index from `elastic.agent` to `elastic_agent` {pull}21932[21932] +- Fix issue with named pipes on Windows 7 {pull}21931[21931] ==== New features From b2d1929bff02393360cc0292975b82da448151c3 Mon Sep 17 00:00:00 2001 From: Chris Mark Date: Mon, 19 Oct 2020 18:02:06 +0300 Subject: [PATCH 03/27] Stop storing stateless kubernetes keystores (#21880) --- CHANGELOG.next.asciidoc | 1 + .../k8skeystore/kubernetes_keystore.go | 20 +++++-------------- 2 files changed, 6 insertions(+), 15 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 51255305f42c..9f5d45e6a8e6 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -375,6 +375,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - Fix retrieving resources by ID for the azure module. {pull}21711[21711] {issue}21707[21707] - Use timestamp from CloudWatch API when creating events. {pull}21498[21498] - Report the correct windows events for system/filesystem {pull}21758[21758] +- Fix panic in kubernetes autodiscover related to keystores {issue}21843[21843] {pull}21880[21880] *Packetbeat* diff --git a/libbeat/common/kubernetes/k8skeystore/kubernetes_keystore.go b/libbeat/common/kubernetes/k8skeystore/kubernetes_keystore.go index 616525b432a6..e17b4258232e 100644 --- a/libbeat/common/kubernetes/k8skeystore/kubernetes_keystore.go +++ b/libbeat/common/kubernetes/k8skeystore/kubernetes_keystore.go @@ -30,14 +30,10 @@ import ( "github.com/elastic/beats/v7/libbeat/logp" ) -type KubernetesKeystores map[string]keystore.Keystore - -// KubernetesKeystoresRegistry holds KubernetesKeystores for known namespaces. Once a Keystore for one k8s namespace -// is initialized it will be reused every time it is needed. +// KubernetesKeystoresRegistry implements a Provider for Keystore. type KubernetesKeystoresRegistry struct { - kubernetesKeystores KubernetesKeystores - logger *logp.Logger - client k8s.Interface + logger *logp.Logger + client k8s.Interface } // KubernetesSecretsKeystore allows to retrieve passwords from Kubernetes secrets for a given namespace @@ -56,9 +52,8 @@ func Factoryk8s(keystoreNamespace string, ks8client k8s.Interface, logger *logp. // NewKubernetesKeystoresRegistry initializes a KubernetesKeystoresRegistry func NewKubernetesKeystoresRegistry(logger *logp.Logger, client k8s.Interface) keystore.Provider { return &KubernetesKeystoresRegistry{ - kubernetesKeystores: KubernetesKeystores{}, - logger: logger, - client: client, + logger: logger, + client: client, } } @@ -75,12 +70,7 @@ func (kr *KubernetesKeystoresRegistry) GetKeystore(event bus.Event) keystore.Key namespace = ns.(string) } if namespace != "" { - // either retrieve already stored keystore or create a new one for the namespace - if storedKeystore, ok := kr.kubernetesKeystores[namespace]; ok { - return storedKeystore - } k8sKeystore, _ := Factoryk8s(namespace, kr.client, kr.logger) - kr.kubernetesKeystores["namespace"] = k8sKeystore return k8sKeystore } kr.logger.Debugf("Cannot retrieve kubernetes namespace from event: %s", event) From e29c3fae4adbdd066e58ca0df00ed8ca24b74d0d Mon Sep 17 00:00:00 2001 From: Niels Hofmans Date: Mon, 19 Oct 2020 17:19:31 +0200 Subject: [PATCH 04/27] filebeat: add SSL options to checkpoint module (#19560) * feat(firewall): add tls config * feat(firewall): add vars to manifest * chore(checkpoint): add tls to example * chore(checkpoint): run mage fmt update * cleanup(checkpoint): remove obsolete log_level * refactor(checkpoint): move to .ssl * chore(x-pack): revert ide fix * chore(changelog): add f5 asm ref * revert(changelog): remove f5 asm mod * chore(changelog): add checkpoint tls * chore: fix lint warnings * Undo some changes and move docs to checkpoint * Move changelog entry Co-authored-by: Marc Guasch --- CHANGELOG.next.asciidoc | 1 + filebeat/docs/modules/checkpoint.asciidoc | 12 ++++++++++++ .../filebeat/module/checkpoint/_meta/docs.asciidoc | 12 ++++++++++++ .../module/checkpoint/firewall/config/firewall.yml | 8 +++++++- .../filebeat/module/checkpoint/firewall/manifest.yml | 1 + 5 files changed, 33 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 9f5d45e6a8e6..fd2970596390 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -631,6 +631,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - New juniper.srx dataset for Juniper SRX logs. {pull}20017[20017] - Adding support for Microsoft 365 Defender (Microsoft Threat Protection) {pull}21446[21446] - Adding support for FIPS in s3 input {pull}21446[21446] +- Add SSL option to checkpoint module {pull}19560[19560] *Heartbeat* diff --git a/filebeat/docs/modules/checkpoint.asciidoc b/filebeat/docs/modules/checkpoint.asciidoc index c4e453b452df..841e66fdbabc 100644 --- a/filebeat/docs/modules/checkpoint.asciidoc +++ b/filebeat/docs/modules/checkpoint.asciidoc @@ -70,6 +70,18 @@ A list of tags to include in events. Including `forwarded` indicates that the events did not originate on this host and causes `host.name` to not be added to events. Defaults to `[checkpoint-firewall, forwarded]`. +*`var.ssl`*:: + +The SSL/TLS configuration for the filebeat instance. This can be used to enforce mutual TLS. +```yaml +ssl: + enabled: true + certificate_authorities: ["my-ca.pem"] + certificate: "filebeat-cert.pem" + key: "filebeat-key.pem" + client_authentication: "required" +``` + [float] ==== Check Point devices diff --git a/x-pack/filebeat/module/checkpoint/_meta/docs.asciidoc b/x-pack/filebeat/module/checkpoint/_meta/docs.asciidoc index ecd8e0d3e815..385206f03ff0 100644 --- a/x-pack/filebeat/module/checkpoint/_meta/docs.asciidoc +++ b/x-pack/filebeat/module/checkpoint/_meta/docs.asciidoc @@ -65,6 +65,18 @@ A list of tags to include in events. Including `forwarded` indicates that the events did not originate on this host and causes `host.name` to not be added to events. Defaults to `[checkpoint-firewall, forwarded]`. +*`var.ssl`*:: + +The SSL/TLS configuration for the filebeat instance. This can be used to enforce mutual TLS. +```yaml +ssl: + enabled: true + certificate_authorities: ["my-ca.pem"] + certificate: "filebeat-cert.pem" + key: "filebeat-key.pem" + client_authentication: "required" +``` + [float] ==== Check Point devices diff --git a/x-pack/filebeat/module/checkpoint/firewall/config/firewall.yml b/x-pack/filebeat/module/checkpoint/firewall/config/firewall.yml index 4892400a8b97..9ac586c6b5cf 100644 --- a/x-pack/filebeat/module/checkpoint/firewall/config/firewall.yml +++ b/x-pack/filebeat/module/checkpoint/firewall/config/firewall.yml @@ -1,4 +1,10 @@ -{{ if eq .input "syslog" }} +{{ if .ssl }} + +type: tcp +host: "{{.syslog_host}}:{{.syslog_port}}" +ssl: {{ .ssl | tojson }} + +{{ else if eq .input "syslog" }} type: udp host: "{{.syslog_host}}:{{.syslog_port}}" diff --git a/x-pack/filebeat/module/checkpoint/firewall/manifest.yml b/x-pack/filebeat/module/checkpoint/firewall/manifest.yml index 849c20fafe25..69301541669a 100644 --- a/x-pack/filebeat/module/checkpoint/firewall/manifest.yml +++ b/x-pack/filebeat/module/checkpoint/firewall/manifest.yml @@ -9,6 +9,7 @@ var: default: 9001 - name: input default: syslog + - name: ssl ingest_pipeline: - ingest/pipeline.yml From a79dddc8f9dcf5bad68f4a26e67840403e9e2cf7 Mon Sep 17 00:00:00 2001 From: Jaime Soriano Pastor Date: Mon, 19 Oct 2020 17:44:42 +0200 Subject: [PATCH 05/27] Fix TestDockerStart flaky test (#21681) Some changes are done to give more resilience to the test: * Wait till image pull is finished, and retry in case of failure. * Checked events are filtered by container id instead of image name, so tests are not affected by other containers that may be running in the system. * Check timeout is for all events now, instead of being reset after an event is received. * Container is removed after test is finished. --- .../docker/docker_integration_test.go | 20 ++++----- libbeat/tests/docker/docker.go | 41 ++++++++++++++++--- 2 files changed, 46 insertions(+), 15 deletions(-) diff --git a/libbeat/autodiscover/providers/docker/docker_integration_test.go b/libbeat/autodiscover/providers/docker/docker_integration_test.go index bbb2bc979bcc..898f3cd254cb 100644 --- a/libbeat/autodiscover/providers/docker/docker_integration_test.go +++ b/libbeat/autodiscover/providers/docker/docker_integration_test.go @@ -36,8 +36,6 @@ import ( // Test docker start emits an autodiscover event func TestDockerStart(t *testing.T) { - t.Skip("#20360 Flaky TestDockerStart skipped") - log := logp.NewLogger("docker") d, err := dk.NewClient() @@ -70,15 +68,17 @@ func TestDockerStart(t *testing.T) { // Start cmd := []string{"echo", "Hi!"} labels := map[string]string{"label": "foo", "label.child": "bar"} - ID, err := d.ContainerStart("busybox", cmd, labels) + ID, err := d.ContainerStart("busybox:latest", cmd, labels) if err != nil { t.Fatal(err) } - checkEvent(t, listener, true) + defer d.ContainerRemove(ID) + + checkEvent(t, listener, ID, true) // Kill d.ContainerKill(ID) - checkEvent(t, listener, false) + checkEvent(t, listener, ID, false) } func getValue(e bus.Event, key string) interface{} { @@ -89,12 +89,13 @@ func getValue(e bus.Event, key string) interface{} { return val } -func checkEvent(t *testing.T, listener bus.Listener, start bool) { +func checkEvent(t *testing.T, listener bus.Listener, id string, start bool) { + timeout := time.After(60 * time.Second) for { select { case e := <-listener.Events(): // Ignore any other container - if getValue(e, "docker.container.image") != "busybox" { + if getValue(e, "container.id") != id { continue } if start { @@ -104,7 +105,7 @@ func checkEvent(t *testing.T, listener bus.Listener, start bool) { assert.Equal(t, getValue(e, "stop"), true) assert.Nil(t, getValue(e, "start")) } - assert.Equal(t, getValue(e, "container.image.name"), "busybox") + assert.Equal(t, getValue(e, "container.image.name"), "busybox:latest") // labels.dedot=true by default assert.Equal(t, common.MapStr{ @@ -122,8 +123,7 @@ func checkEvent(t *testing.T, listener bus.Listener, start bool) { assert.Equal(t, getValue(e, "docker.container.name"), getValue(e, "meta.container.name")) assert.Equal(t, getValue(e, "docker.container.image"), getValue(e, "meta.container.image.name")) return - - case <-time.After(10 * time.Second): + case <-timeout: t.Fatal("Timeout waiting for provider events") return } diff --git a/libbeat/tests/docker/docker.go b/libbeat/tests/docker/docker.go index 888347c5cc7c..8bb5efadbfa4 100644 --- a/libbeat/tests/docker/docker.go +++ b/libbeat/tests/docker/docker.go @@ -19,6 +19,8 @@ package docker import ( "context" + "io" + "io/ioutil" "github.com/pkg/errors" @@ -42,13 +44,12 @@ func NewClient() (Client, error) { // ContainerStart pulls and starts the given container func (c Client) ContainerStart(image string, cmd []string, labels map[string]string) (string, error) { - ctx := context.Background() - respBody, err := c.cli.ImagePull(ctx, image, types.ImagePullOptions{}) + err := c.imagePull(image) if err != nil { - return "", errors.Wrapf(err, "pullling image %s", image) + return "", err } - defer respBody.Close() + ctx := context.Background() resp, err := c.cli.ContainerCreate(ctx, &container.Config{ Image: image, Cmd: cmd, @@ -65,6 +66,36 @@ func (c Client) ContainerStart(image string, cmd []string, labels map[string]str return resp.ID, nil } +// imagePull pulls an image +func (c Client) imagePull(image string) (err error) { + ctx := context.Background() + _, _, err = c.cli.ImageInspectWithRaw(ctx, image) + if err == nil { + // Image already available, do nothing + return nil + } + for retry := 0; retry < 3; retry++ { + err = func() error { + respBody, err := c.cli.ImagePull(ctx, image, types.ImagePullOptions{}) + if err != nil { + return errors.Wrapf(err, "pullling image %s", image) + } + defer respBody.Close() + + // Read all the response, to be sure that the pull has finished before returning. + _, err = io.Copy(ioutil.Discard, respBody) + if err != nil { + return errors.Wrapf(err, "reading response for image %s", image) + } + return nil + }() + if err == nil { + break + } + } + return +} + // ContainerWait waits for a container to finish func (c Client) ContainerWait(ID string) error { ctx := context.Background() @@ -89,7 +120,7 @@ func (c Client) ContainerKill(ID string) error { return c.cli.ContainerKill(ctx, ID, "KILL") } -// ContainerRemove kills and removed the given container +// ContainerRemove kills and removes the given container func (c Client) ContainerRemove(ID string) error { ctx := context.Background() return c.cli.ContainerRemove(ctx, ID, types.ContainerRemoveOptions{ From 6955665227cb13504c52a01d3cbccd0a28c7ed9e Mon Sep 17 00:00:00 2001 From: Alex K <8418476+fearful-symmetry@users.noreply.github.com> Date: Mon, 19 Oct 2020 13:24:56 -0700 Subject: [PATCH 06/27] fix diskio and memory bugs under windows (#21992) --- metricbeat/module/system/diskio/diskio.go | 3 ++- metricbeat/module/system/memory/memory.go | 6 +++--- metricbeat/module/system/process/process.go | 4 +++- 3 files changed, 8 insertions(+), 5 deletions(-) diff --git a/metricbeat/module/system/diskio/diskio.go b/metricbeat/module/system/diskio/diskio.go index 1359180cff60..4a7e2e2b5fec 100644 --- a/metricbeat/module/system/diskio/diskio.go +++ b/metricbeat/module/system/diskio/diskio.go @@ -21,6 +21,7 @@ package diskio import ( "fmt" + "runtime" "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/metric/system/diskio" @@ -114,7 +115,7 @@ func (m *MetricSet) Fetch(r mb.ReporterV2) error { diskWriteBytes += counters.WriteBytes //Add linux-only data if agent is off as not to make breaking changes. - if !m.IsAgent { + if !m.IsAgent && runtime.GOOS == "linux" { result, err := m.statistics.CalcIOStatistics(counters) if err != nil { return errors.Wrap(err, "error calculating iostat") diff --git a/metricbeat/module/system/memory/memory.go b/metricbeat/module/system/memory/memory.go index 27e76b854895..26c6bea18678 100644 --- a/metricbeat/module/system/memory/memory.go +++ b/metricbeat/module/system/memory/memory.go @@ -42,7 +42,7 @@ func init() { // MetricSet for fetching system memory metrics. type MetricSet struct { mb.BaseMetricSet - IsFleet bool + IsAgent bool } // New is a mb.MetricSetFactory that returns a memory.MetricSet. @@ -53,7 +53,7 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { return nil, fmt.Errorf("unexpected module type") } - return &MetricSet{BaseMetricSet: base, IsFleet: systemModule.IsAgent}, nil + return &MetricSet{BaseMetricSet: base, IsAgent: systemModule.IsAgent}, nil } // Fetch fetches memory metrics from the OS. @@ -117,7 +117,7 @@ func (m *MetricSet) Fetch(r mb.ReporterV2) error { } // for backwards compatibility, only report if we're not in fleet mode - if !m.IsFleet { + if !m.IsAgent { err := linux.FetchLinuxMemStats(memory) if err != nil { return errors.Wrap(err, "error getting page stats") diff --git a/metricbeat/module/system/process/process.go b/metricbeat/module/system/process/process.go index 804c62d06d62..c99ffaa1123f 100644 --- a/metricbeat/module/system/process/process.go +++ b/metricbeat/module/system/process/process.go @@ -156,10 +156,12 @@ func (m *MetricSet) Fetch(r mb.ReporterV2) error { // There's some more Windows memory quirks we need to deal with. // "rss" is a linux concept, but "wss" is a direct match on Windows. // "share" is also unavailable on Windows. + if runtime.GOOS == "windows" { + proc.Delete("memory.share") + } if m.IsAgent { if runtime.GOOS == "windows" { - proc.Delete("memory.share") if setSize := getAndRemove(proc, "memory.rss"); setSize != nil { proc.Put("memory.wss", setSize) } From fa50a44556a2c7d7f78855f98e12bf13848a0f9a Mon Sep 17 00:00:00 2001 From: Mariana Dima Date: Tue, 20 Oct 2020 09:28:30 +0200 Subject: [PATCH 07/27] Azure storage metricset values not inside the metricset name (#21845) * mofidy doc * fix * changelog --- CHANGELOG.next.asciidoc | 1 + x-pack/metricbeat/module/azure/storage/storage.go | 2 ++ 2 files changed, 3 insertions(+) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index fd2970596390..6ead76346b33 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -375,6 +375,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - Fix retrieving resources by ID for the azure module. {pull}21711[21711] {issue}21707[21707] - Use timestamp from CloudWatch API when creating events. {pull}21498[21498] - Report the correct windows events for system/filesystem {pull}21758[21758] +- Fix azure storage event format. {pull}21845[21845] - Fix panic in kubernetes autodiscover related to keystores {issue}21843[21843] {pull}21880[21880] *Packetbeat* diff --git a/x-pack/metricbeat/module/azure/storage/storage.go b/x-pack/metricbeat/module/azure/storage/storage.go index 9f54871b3194..4178b911d117 100644 --- a/x-pack/metricbeat/module/azure/storage/storage.go +++ b/x-pack/metricbeat/module/azure/storage/storage.go @@ -41,6 +41,8 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { if err != nil { return nil, err } + // set default resource type to indicate this is not the generic monitor metricset + ms.Client.Config.DefaultResourceType = defaultStorageAccountNamespace // if no options are entered we will retrieve all the vm's from the entire subscription if len(ms.Client.Config.Resources) == 0 { ms.Client.Config.Resources = []azure.ResourceConfig{ From e7fd212d8c4974927a295002b399a09401a629f7 Mon Sep 17 00:00:00 2001 From: Michal Pristas Date: Tue, 20 Oct 2020 11:28:32 +0200 Subject: [PATCH 08/27] [Ingest Manager] Always try snapshot repo for agent upgrade (#21951) [Ingest Manager] Always try snapshot repo for agent upgrade (#21951) --- x-pack/elastic-agent/pkg/agent/application/stream.go | 4 ++-- .../pkg/agent/application/upgrade/step_download.go | 4 ++-- x-pack/elastic-agent/pkg/agent/application/upgrade/upgrade.go | 3 --- .../pkg/artifact/download/localremote/downloader.go | 4 ++-- .../pkg/artifact/download/localremote/verifier.go | 4 ++-- 5 files changed, 8 insertions(+), 11 deletions(-) diff --git a/x-pack/elastic-agent/pkg/agent/application/stream.go b/x-pack/elastic-agent/pkg/agent/application/stream.go index 41999fcb8329..784038e77ab7 100644 --- a/x-pack/elastic-agent/pkg/agent/application/stream.go +++ b/x-pack/elastic-agent/pkg/agent/application/stream.go @@ -56,9 +56,9 @@ func streamFactory(ctx context.Context, cfg *configuration.SettingsConfig, srv * } func newOperator(ctx context.Context, log *logger.Logger, id routingKey, config *configuration.SettingsConfig, srv *server.Server, r state.Reporter, m monitoring.Monitor) (*operation.Operator, error) { - fetcher := downloader.NewDownloader(log, config.DownloadConfig) + fetcher := downloader.NewDownloader(log, config.DownloadConfig, false) allowEmptyPgp, pgp := release.PGP() - verifier, err := downloader.NewVerifier(log, config.DownloadConfig, allowEmptyPgp, pgp) + verifier, err := downloader.NewVerifier(log, config.DownloadConfig, allowEmptyPgp, pgp, false) if err != nil { return nil, errors.New(err, "initiating verifier") } diff --git a/x-pack/elastic-agent/pkg/agent/application/upgrade/step_download.go b/x-pack/elastic-agent/pkg/agent/application/upgrade/step_download.go index 3aea96da0ab0..0294308ff3a4 100644 --- a/x-pack/elastic-agent/pkg/agent/application/upgrade/step_download.go +++ b/x-pack/elastic-agent/pkg/agent/application/upgrade/step_download.go @@ -27,12 +27,12 @@ func (u *Upgrader) downloadArtifact(ctx context.Context, version, sourceURI stri } allowEmptyPgp, pgp := release.PGP() - verifier, err := downloader.NewVerifier(u.log, &settings, allowEmptyPgp, pgp) + verifier, err := downloader.NewVerifier(u.log, &settings, allowEmptyPgp, pgp, true) if err != nil { return "", errors.New(err, "initiating verifier") } - fetcher := downloader.NewDownloader(u.log, &settings) + fetcher := downloader.NewDownloader(u.log, &settings, true) path, err := fetcher.Download(ctx, agentName, agentArtifactName, version) if err != nil { return "", errors.New(err, "failed upgrade of agent binary") diff --git a/x-pack/elastic-agent/pkg/agent/application/upgrade/upgrade.go b/x-pack/elastic-agent/pkg/agent/application/upgrade/upgrade.go index 1a21bc154a1f..d7e69fc39723 100644 --- a/x-pack/elastic-agent/pkg/agent/application/upgrade/upgrade.go +++ b/x-pack/elastic-agent/pkg/agent/application/upgrade/upgrade.go @@ -183,9 +183,6 @@ func (u *Upgrader) Ack(ctx context.Context) error { } func (u *Upgrader) sourceURI(version, retrievedURI string) (string, error) { - if strings.HasSuffix(version, "-SNAPSHOT") && retrievedURI == "" { - return "", errors.New("snapshot upgrade requires source uri", errors.TypeConfig) - } if retrievedURI != "" { return retrievedURI, nil } diff --git a/x-pack/elastic-agent/pkg/artifact/download/localremote/downloader.go b/x-pack/elastic-agent/pkg/artifact/download/localremote/downloader.go index 6448af25aca0..ba82195ffbd1 100644 --- a/x-pack/elastic-agent/pkg/artifact/download/localremote/downloader.go +++ b/x-pack/elastic-agent/pkg/artifact/download/localremote/downloader.go @@ -17,12 +17,12 @@ import ( // NewDownloader creates a downloader which first checks local directory // and then fallbacks to remote if configured. -func NewDownloader(log *logger.Logger, config *artifact.Config) download.Downloader { +func NewDownloader(log *logger.Logger, config *artifact.Config, forceSnapshot bool) download.Downloader { downloaders := make([]download.Downloader, 0, 3) downloaders = append(downloaders, fs.NewDownloader(config)) // try snapshot repo before official - if release.Snapshot() { + if release.Snapshot() || forceSnapshot { snapDownloader, err := snapshot.NewDownloader(config) if err != nil { log.Error(err) diff --git a/x-pack/elastic-agent/pkg/artifact/download/localremote/verifier.go b/x-pack/elastic-agent/pkg/artifact/download/localremote/verifier.go index 4f33cbbdb8e2..30517d12d3d2 100644 --- a/x-pack/elastic-agent/pkg/artifact/download/localremote/verifier.go +++ b/x-pack/elastic-agent/pkg/artifact/download/localremote/verifier.go @@ -17,7 +17,7 @@ import ( // NewVerifier creates a downloader which first checks local directory // and then fallbacks to remote if configured. -func NewVerifier(log *logger.Logger, config *artifact.Config, allowEmptyPgp bool, pgp []byte) (download.Verifier, error) { +func NewVerifier(log *logger.Logger, config *artifact.Config, allowEmptyPgp bool, pgp []byte, forceSnapshot bool) (download.Verifier, error) { verifiers := make([]download.Verifier, 0, 3) fsVer, err := fs.NewVerifier(config, allowEmptyPgp, pgp) @@ -27,7 +27,7 @@ func NewVerifier(log *logger.Logger, config *artifact.Config, allowEmptyPgp bool verifiers = append(verifiers, fsVer) // try snapshot repo before official - if release.Snapshot() { + if release.Snapshot() || forceSnapshot { snapshotVerifier, err := snapshot.NewVerifier(config, allowEmptyPgp, pgp) if err != nil { log.Error(err) From 0d5ef7b3ccdbd4c83d2a2df018b654164383ecee Mon Sep 17 00:00:00 2001 From: Chris Mark Date: Tue, 20 Oct 2020 12:49:18 +0300 Subject: [PATCH 09/27] [Kubernetes] Remove redundant dockersock volume mount (#22009) --- CHANGELOG.next.asciidoc | 1 + deploy/kubernetes/metricbeat-kubernetes.yaml | 5 ----- deploy/kubernetes/metricbeat/metricbeat-daemonset.yaml | 5 ----- 3 files changed, 1 insertion(+), 10 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 6ead76346b33..fa8d1fc2791e 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -377,6 +377,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - Report the correct windows events for system/filesystem {pull}21758[21758] - Fix azure storage event format. {pull}21845[21845] - Fix panic in kubernetes autodiscover related to keystores {issue}21843[21843] {pull}21880[21880] +- [Kubernetes] Remove redundant dockersock volume mount {pull}22009[22009] *Packetbeat* diff --git a/deploy/kubernetes/metricbeat-kubernetes.yaml b/deploy/kubernetes/metricbeat-kubernetes.yaml index 32d1010f4d0f..db1eb25d7a54 100644 --- a/deploy/kubernetes/metricbeat-kubernetes.yaml +++ b/deploy/kubernetes/metricbeat-kubernetes.yaml @@ -189,8 +189,6 @@ spec: - name: modules mountPath: /usr/share/metricbeat/modules.d readOnly: true - - name: dockersock - mountPath: /var/run/docker.sock - name: proc mountPath: /hostfs/proc readOnly: true @@ -204,9 +202,6 @@ spec: - name: cgroup hostPath: path: /sys/fs/cgroup - - name: dockersock - hostPath: - path: /var/run/docker.sock - name: config configMap: defaultMode: 0640 diff --git a/deploy/kubernetes/metricbeat/metricbeat-daemonset.yaml b/deploy/kubernetes/metricbeat/metricbeat-daemonset.yaml index 0197fe136b6d..34bcf5360680 100644 --- a/deploy/kubernetes/metricbeat/metricbeat-daemonset.yaml +++ b/deploy/kubernetes/metricbeat/metricbeat-daemonset.yaml @@ -64,8 +64,6 @@ spec: - name: modules mountPath: /usr/share/metricbeat/modules.d readOnly: true - - name: dockersock - mountPath: /var/run/docker.sock - name: proc mountPath: /hostfs/proc readOnly: true @@ -79,9 +77,6 @@ spec: - name: cgroup hostPath: path: /sys/fs/cgroup - - name: dockersock - hostPath: - path: /var/run/docker.sock - name: config configMap: defaultMode: 0640 From 0bb45f25cc4de6849ec419f2cecfca2aaa193cf7 Mon Sep 17 00:00:00 2001 From: Victor Martinez Date: Tue, 20 Oct 2020 13:57:25 +0100 Subject: [PATCH 10/27] [beats-tester][packaging] store packages in another location (#21903) --- .ci/beats-tester.groovy | 3 +++ .ci/packaging.groovy | 11 ++++++++++- 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/.ci/beats-tester.groovy b/.ci/beats-tester.groovy index eb1357700b62..91781a98d31c 100644 --- a/.ci/beats-tester.groovy +++ b/.ci/beats-tester.groovy @@ -54,6 +54,7 @@ pipeline { options { skipDefaultCheckout() } when { branch 'master' } steps { + // TODO: to use the git commit that triggered the upstream build runBeatsTesterJob(version: "${env.VERSION}-SNAPSHOT") } } @@ -61,6 +62,7 @@ pipeline { options { skipDefaultCheckout() } when { branch '*.x' } steps { + // TODO: to use the git commit that triggered the upstream build runBeatsTesterJob(version: "${env.VERSION}-SNAPSHOT") } } @@ -84,6 +86,7 @@ pipeline { } } steps { + // TODO: to use the git commit that triggered the upstream build runBeatsTesterJob(version: "${env.VERSION}-SNAPSHOT") } } diff --git a/.ci/packaging.groovy b/.ci/packaging.groovy index 4145ee6bdd13..8936de2fb3e1 100644 --- a/.ci/packaging.groovy +++ b/.ci/packaging.groovy @@ -327,7 +327,16 @@ def publishPackages(baseDir){ bucketUri = "gs://${JOB_GCS_BUCKET}/pull-requests/pr-${env.CHANGE_ID}" } def beatsFolderName = getBeatsName(baseDir) - googleStorageUpload(bucket: "${bucketUri}/${beatsFolderName}", + uploadPackages("${bucketUri}/${beatsFolderName}", baseDir) + + // Copy those files to another location with the sha commit to test them + // aftewords. + bucketUri = "gs://${JOB_GCS_BUCKET}/commits/${env.GIT_BASE_COMMIT}" + uploadPackages("${bucketUri}/${beatsFolderName}", baseDir) +} + +def uploadPackages(bucketUri, baseDir){ + googleStorageUpload(bucket: bucketUri, credentialsId: "${JOB_GCS_CREDENTIALS}", pathPrefix: "${baseDir}/build/distributions/", pattern: "${baseDir}/build/distributions/**/*", From 38add00bffb8565c1b5e0cfe17776934af3ef525 Mon Sep 17 00:00:00 2001 From: Chris Mark Date: Tue, 20 Oct 2020 16:49:11 +0300 Subject: [PATCH 11/27] Fix Istio docs (#22019) Signed-off-by: chrismark --- metricbeat/docs/modules/istio.asciidoc | 4 ++-- x-pack/metricbeat/module/istio/_meta/docs.asciidoc | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/metricbeat/docs/modules/istio.asciidoc b/metricbeat/docs/modules/istio.asciidoc index c80e2d84c098..fee401e19830 100644 --- a/metricbeat/docs/modules/istio.asciidoc +++ b/metricbeat/docs/modules/istio.asciidoc @@ -10,8 +10,8 @@ beta[] This is the Istio module. This module is compatible with versions before `1.5` of Istio where microservices architecture is used. If using -versions priot to `1.5` then `mesh`, `mixer`, `pilot`, `galley`, `citadel` metricsets should be used. -where the Istio module collects metrics from the +versions prior to `1.5` then `mesh`, `mixer`, `pilot`, `galley`, `citadel` metricsets should be used where the Istio +module collects metrics from the Istio https://istio.io/v1.4/docs/tasks/observability/metrics/querying-metrics/#about-the-prometheus-add-on[prometheus exporters endpoints]. For versions after `1.5`, `istiod` and `proxy` metricsets can be used. diff --git a/x-pack/metricbeat/module/istio/_meta/docs.asciidoc b/x-pack/metricbeat/module/istio/_meta/docs.asciidoc index f3b1825a9b1b..39eb93b40955 100644 --- a/x-pack/metricbeat/module/istio/_meta/docs.asciidoc +++ b/x-pack/metricbeat/module/istio/_meta/docs.asciidoc @@ -1,7 +1,7 @@ This is the Istio module. This module is compatible with versions before `1.5` of Istio where microservices architecture is used. If using -versions priot to `1.5` then `mesh`, `mixer`, `pilot`, `galley`, `citadel` metricsets should be used. -where the Istio module collects metrics from the +versions prior to `1.5` then `mesh`, `mixer`, `pilot`, `galley`, `citadel` metricsets should be used where the Istio +module collects metrics from the Istio https://istio.io/v1.4/docs/tasks/observability/metrics/querying-metrics/#about-the-prometheus-add-on[prometheus exporters endpoints]. For versions after `1.5`, `istiod` and `proxy` metricsets can be used. From 37dc557b2c04ab4c87eceea5271b6733e23d356e Mon Sep 17 00:00:00 2001 From: Lee Hinman <57081003+leehinman@users.noreply.github.com> Date: Tue, 20 Oct 2020 09:16:44 -0500 Subject: [PATCH 12/27] dynamically find librpm (#21936) - use elf header of rpm binary to find version of librpm - use librpm.so as fallback, provided by rpm-devel Closes #19287 --- .../module/system/package/rpm_linux.go | 63 ++++++++++++++----- 1 file changed, 47 insertions(+), 16 deletions(-) diff --git a/x-pack/auditbeat/module/system/package/rpm_linux.go b/x-pack/auditbeat/module/system/package/rpm_linux.go index fa6fc66f4cdc..6e5df7e0c6ec 100644 --- a/x-pack/auditbeat/module/system/package/rpm_linux.go +++ b/x-pack/auditbeat/module/system/package/rpm_linux.go @@ -10,9 +10,12 @@ import ( "errors" "fmt" "runtime" + "strings" "time" "unsafe" + "debug/elf" + "github.com/coreos/pkg/dlopen" ) @@ -204,29 +207,57 @@ func (lib *librpm) close() error { return nil } -func openLibrpm() (*librpm, error) { - var librpmNames = []string{ - "librpm.so", // with rpm-devel installed - "librpm.so.9", // Fedora 31/32 - "librpm.so.8", // Fedora 29/30 - "librpm.so.3", // CentOS 7 - "librpm.so.1", // CentOS 6 - - // Following for completeness, but not explicitly tested - "librpm.so.10", - "librpm.so.7", - "librpm.so.6", - "librpm.so.5", - "librpm.so.4", - "librpm.so.2", +// getLibrpmNames determines the versions of librpm.so that are +// installed on a system. rpm-devel rpm installs the librpm.so +// symbolic link to the correct version of librpm, but that isn't a +// required package. rpm will install librpm.so.X, where X is the +// version number. getLibrpmNames looks at the elf header for the rpm +// binary to determine what version of librpm.so it is linked against. +func getLibrpmNames() []string { + var rpmPaths = []string{ + "/usr/bin/rpm", + "/bin/rpm", + } + var libNames = []string{ + "librpm.so", } + var rpmElf *elf.File + var err error + + for _, path := range rpmPaths { + rpmElf, err = elf.Open(path) + if err == nil { + break + } + } + if err != nil { + return libNames + } + + impLibs, err := rpmElf.ImportedLibraries() + if err != nil { + return libNames + } + + for _, lib := range impLibs { + if strings.Contains(lib, "librpm.so") { + libNames = append(libNames, lib) + } + } + + return libNames +} + +func openLibrpm() (*librpm, error) { var librpm librpm var err error + librpmNames := getLibrpmNames() + librpm.handle, err = dlopen.GetHandle(librpmNames) if err != nil { - return nil, err + return nil, fmt.Errorf("Couldn't open %v", librpmNames) } librpm.rpmtsCreate, err = librpm.handle.GetSymbolPointer("rpmtsCreate") From 283641ec6ad66e09c2bf04be85b062764c6ce711 Mon Sep 17 00:00:00 2001 From: EamonnTP Date: Tue, 20 Oct 2020 16:08:23 +0100 Subject: [PATCH 13/27] Update links (#22012) --- libbeat/docs/getting-started.asciidoc | 4 ++-- libbeat/docs/howto/load-dashboards.asciidoc | 4 ++-- libbeat/docs/overview.asciidoc | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/libbeat/docs/getting-started.asciidoc b/libbeat/docs/getting-started.asciidoc index b1a85fddb466..5291f755e5b8 100644 --- a/libbeat/docs/getting-started.asciidoc +++ b/libbeat/docs/getting-started.asciidoc @@ -13,5 +13,5 @@ Each Beat is a separately installable product. To learn how to get started, see: * {winlogbeat-ref}/winlogbeat-installation-configuration.html[Winlogbeat] If you're planning to use the {metrics-app} or the {logs-app} in {kib}, -also see the {metrics-guide}[Metrics Monitoring Guide] -and the {logs-guide}[Logs Monitoring Guide]. +see {observability-guide}/analyze-metrics.html[Analyze metrics] +and {observability-guide}/monitor-logs.html[Monitor logs]. \ No newline at end of file diff --git a/libbeat/docs/howto/load-dashboards.asciidoc b/libbeat/docs/howto/load-dashboards.asciidoc index 781789d3ae46..c03b512d6362 100644 --- a/libbeat/docs/howto/load-dashboards.asciidoc +++ b/libbeat/docs/howto/load-dashboards.asciidoc @@ -15,8 +15,8 @@ ifdef::has_solutions[] TIP: For deeper observability into your infrastructure, you can use the {metrics-app} and the {logs-app} in {kib}. -For more details, see the {metrics-guide}[Metrics Monitoring Guide] -and the {logs-guide}[Logs Monitoring Guide]. +For more details, see {observability-guide}/analyze-metrics.html[Analyze metrics] +and {observability-guide}/monitor-logs.html[Monitor logs]. endif::has_solutions[] {beatname_uc} comes packaged with example Kibana dashboards, visualizations, diff --git a/libbeat/docs/overview.asciidoc b/libbeat/docs/overview.asciidoc index 11dc10f2b8fc..bdc46aaaf280 100644 --- a/libbeat/docs/overview.asciidoc +++ b/libbeat/docs/overview.asciidoc @@ -28,8 +28,8 @@ To get started, see <>. Want to get up and running quickly with infrastructure metrics monitoring and centralized log analytics? Try out the {metrics-app} and the {logs-app} in {kib}. -For more details, see the {metrics-guide}[Metrics Monitoring Guide] -and the {logs-guide}[Logs Monitoring Guide]. +For more details, see {observability-guide}/analyze-metrics.html[Analyze metrics] +and {observability-guide}/monitor-logs.html[Monitor logs]. [float] === Need to capture other kinds of data? From e0d06541847dd27b04d2ac328dadf73ac7f883d3 Mon Sep 17 00:00:00 2001 From: Andrew Kroh Date: Tue, 20 Oct 2020 11:52:56 -0400 Subject: [PATCH 14/27] Document auditbeat system process module config (#21766) The documentation for the system/process dataset was missing information on the configuration options. Closes #16869 --- x-pack/auditbeat/docs/modules/system.asciidoc | 2 +- .../module/system/_meta/docs.asciidoc | 2 +- .../module/system/process/_meta/docs.asciidoc | 22 ++++++++++++++++++- 3 files changed, 23 insertions(+), 3 deletions(-) diff --git a/x-pack/auditbeat/docs/modules/system.asciidoc b/x-pack/auditbeat/docs/modules/system.asciidoc index 15eafc34116b..e850c0651974 100644 --- a/x-pack/auditbeat/docs/modules/system.asciidoc +++ b/x-pack/auditbeat/docs/modules/system.asciidoc @@ -97,7 +97,7 @@ This module also supports the <> described later. -*`state.period`*:: The frequency at which the datasets send full state information. +*`state.period`*:: The interval at which the datasets send full state information. This option can be overridden per dataset using `{dataset}.state.period`. *`user.detect_password_changes`*:: If the `user` dataset is configured and diff --git a/x-pack/auditbeat/module/system/_meta/docs.asciidoc b/x-pack/auditbeat/module/system/_meta/docs.asciidoc index 083435d94aea..a2a36987c513 100644 --- a/x-pack/auditbeat/module/system/_meta/docs.asciidoc +++ b/x-pack/auditbeat/module/system/_meta/docs.asciidoc @@ -90,7 +90,7 @@ This module also supports the <> described later. -*`state.period`*:: The frequency at which the datasets send full state information. +*`state.period`*:: The interval at which the datasets send full state information. This option can be overridden per dataset using `{dataset}.state.period`. *`user.detect_password_changes`*:: If the `user` dataset is configured and diff --git a/x-pack/auditbeat/module/system/process/_meta/docs.asciidoc b/x-pack/auditbeat/module/system/process/_meta/docs.asciidoc index e1d930e1fbf3..e84f72469338 100644 --- a/x-pack/auditbeat/module/system/process/_meta/docs.asciidoc +++ b/x-pack/auditbeat/module/system/process/_meta/docs.asciidoc @@ -2,10 +2,30 @@ beta[] -This is the `process` dataset of the system module. +This is the `process` dataset of the system module. It generates an event when +a process starts and stops. It is implemented for Linux, macOS (Darwin), and Windows. +[float] +=== Configuration options + +*`process.state.period`*:: The interval at which the dataset sends full state +information. If set this will take precedence over `state.period`. The default +value is `12h`. + +*`process.hash.max_file_size`*:: The maximum size of a file in bytes for which +{beatname_uc} will compute hashes. Files larger than this size will not be +hashed. The default value is 100 MiB. For convenience units can be specified as +a suffix to the value. The supported units are `b` (default), `kib`, `kb`, +`mib`, `mb`, `gib`, `gb`, `tib`, `tb`, `pib`, `pb`, `eib`, and `eb`. + +*`process.hash.hash_types`*:: A list of hash types to compute when the file +changes. The supported hash types are `blake2b_256`, `blake2b_384`, +`blake2b_512`, `md5`, `sha1`, `sha224`, `sha256`, `sha384`, `sha512`, +`sha512_224`, `sha512_256`, `sha3_224`, `sha3_256`, `sha3_384`, `sha3_512`, and +`xxh64`. The default value is `sha1`. + [float] ==== Example dashboard From 610e998c121e9453363a0f429c5f8d197eb1350d Mon Sep 17 00:00:00 2001 From: Blake Rouse Date: Tue, 20 Oct 2020 12:29:00 -0400 Subject: [PATCH 15/27] [Elastic Agent] Fix missing elastic_agent event data (#21994) * Fix fields. * Remove from monitoring decorator. * Add changelog. * Fix tests. * Fix tests. * Fix import. --- x-pack/elastic-agent/CHANGELOG.next.asciidoc | 1 + .../pkg/agent/application/local_mode.go | 2 +- .../pkg/agent/application/managed_mode.go | 2 +- .../agent/application/monitoring_decorator.go | 1 - .../pkg/agent/application/stream.go | 8 +++-- .../pkg/agent/operation/common_test.go | 4 ++- .../pkg/agent/operation/monitoring.go | 30 +++++++++++++++++++ .../pkg/agent/operation/monitoring_test.go | 4 ++- .../pkg/agent/operation/operator.go | 4 +++ .../testdata/enabled_output_true-filebeat.yml | 8 ++--- .../testdata/enabled_true-filebeat.yml | 8 ++--- .../testdata/single_config-filebeat.yml | 16 +++++----- .../testdata/single_config-metricbeat.yml | 24 +++++++-------- .../pkg/agent/transpiler/rules.go | 8 ++--- .../pkg/agent/transpiler/rules_test.go | 16 +++++----- 15 files changed, 88 insertions(+), 48 deletions(-) diff --git a/x-pack/elastic-agent/CHANGELOG.next.asciidoc b/x-pack/elastic-agent/CHANGELOG.next.asciidoc index fa0198a6628e..3882ba197123 100644 --- a/x-pack/elastic-agent/CHANGELOG.next.asciidoc +++ b/x-pack/elastic-agent/CHANGELOG.next.asciidoc @@ -20,6 +20,7 @@ - Use local temp instead of system one {pull}21883[21883] - Rename monitoring index from `elastic.agent` to `elastic_agent` {pull}21932[21932] - Fix issue with named pipes on Windows 7 {pull}21931[21931] +- Fix missing elastic_agent event data {pull}21994[21994] ==== New features diff --git a/x-pack/elastic-agent/pkg/agent/application/local_mode.go b/x-pack/elastic-agent/pkg/agent/application/local_mode.go index b58e260cab66..f0c4153f474e 100644 --- a/x-pack/elastic-agent/pkg/agent/application/local_mode.go +++ b/x-pack/elastic-agent/pkg/agent/application/local_mode.go @@ -100,7 +100,7 @@ func newLocal( return nil, errors.New(err, "failed to initialize monitoring") } - router, err := newRouter(log, streamFactory(localApplication.bgContext, cfg.Settings, localApplication.srv, reporter, monitor)) + router, err := newRouter(log, streamFactory(localApplication.bgContext, agentInfo, cfg.Settings, localApplication.srv, reporter, monitor)) if err != nil { return nil, errors.New(err, "fail to initialize pipeline router") } diff --git a/x-pack/elastic-agent/pkg/agent/application/managed_mode.go b/x-pack/elastic-agent/pkg/agent/application/managed_mode.go index e38685741c3b..fa31215f75d3 100644 --- a/x-pack/elastic-agent/pkg/agent/application/managed_mode.go +++ b/x-pack/elastic-agent/pkg/agent/application/managed_mode.go @@ -154,7 +154,7 @@ func newManaged( return nil, errors.New(err, "failed to initialize monitoring") } - router, err := newRouter(log, streamFactory(managedApplication.bgContext, cfg.Settings, managedApplication.srv, combinedReporter, monitor)) + router, err := newRouter(log, streamFactory(managedApplication.bgContext, agentInfo, cfg.Settings, managedApplication.srv, combinedReporter, monitor)) if err != nil { return nil, errors.New(err, "fail to initialize pipeline router") } diff --git a/x-pack/elastic-agent/pkg/agent/application/monitoring_decorator.go b/x-pack/elastic-agent/pkg/agent/application/monitoring_decorator.go index 3fc49ef17d37..920b1a4b5bff 100644 --- a/x-pack/elastic-agent/pkg/agent/application/monitoring_decorator.go +++ b/x-pack/elastic-agent/pkg/agent/application/monitoring_decorator.go @@ -94,7 +94,6 @@ func getMonitoringRule(outputName string) *transpiler.RuleList { return transpiler.NewRuleList( transpiler.Copy(monitoringOutputSelector, outputKey), transpiler.Rename(fmt.Sprintf("%s.%s", outputsKey, outputName), elasticsearchKey), - transpiler.InjectAgentInfo(), transpiler.Filter(monitoringKey, programsKey, outputKey), ) } diff --git a/x-pack/elastic-agent/pkg/agent/application/stream.go b/x-pack/elastic-agent/pkg/agent/application/stream.go index 784038e77ab7..2d372ef43870 100644 --- a/x-pack/elastic-agent/pkg/agent/application/stream.go +++ b/x-pack/elastic-agent/pkg/agent/application/stream.go @@ -7,6 +7,7 @@ package application import ( "context" + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/application/info" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/configrequest" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/configuration" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/errors" @@ -40,10 +41,10 @@ func (b *operatorStream) Shutdown() { b.configHandler.Shutdown() } -func streamFactory(ctx context.Context, cfg *configuration.SettingsConfig, srv *server.Server, r state.Reporter, m monitoring.Monitor) func(*logger.Logger, routingKey) (stream, error) { +func streamFactory(ctx context.Context, agentInfo *info.AgentInfo, cfg *configuration.SettingsConfig, srv *server.Server, r state.Reporter, m monitoring.Monitor) func(*logger.Logger, routingKey) (stream, error) { return func(log *logger.Logger, id routingKey) (stream, error) { // new operator per stream to isolate processes without using tags - operator, err := newOperator(ctx, log, id, cfg, srv, r, m) + operator, err := newOperator(ctx, log, agentInfo, id, cfg, srv, r, m) if err != nil { return nil, err } @@ -55,7 +56,7 @@ func streamFactory(ctx context.Context, cfg *configuration.SettingsConfig, srv * } } -func newOperator(ctx context.Context, log *logger.Logger, id routingKey, config *configuration.SettingsConfig, srv *server.Server, r state.Reporter, m monitoring.Monitor) (*operation.Operator, error) { +func newOperator(ctx context.Context, log *logger.Logger, agentInfo *info.AgentInfo, id routingKey, config *configuration.SettingsConfig, srv *server.Server, r state.Reporter, m monitoring.Monitor) (*operation.Operator, error) { fetcher := downloader.NewDownloader(log, config.DownloadConfig, false) allowEmptyPgp, pgp := release.PGP() verifier, err := downloader.NewVerifier(log, config.DownloadConfig, allowEmptyPgp, pgp, false) @@ -81,6 +82,7 @@ func newOperator(ctx context.Context, log *logger.Logger, id routingKey, config return operation.NewOperator( ctx, log, + agentInfo, id, config, fetcher, diff --git a/x-pack/elastic-agent/pkg/agent/operation/common_test.go b/x-pack/elastic-agent/pkg/agent/operation/common_test.go index e9d40bece876..ea16cfe77b8d 100644 --- a/x-pack/elastic-agent/pkg/agent/operation/common_test.go +++ b/x-pack/elastic-agent/pkg/agent/operation/common_test.go @@ -13,6 +13,7 @@ import ( "time" "github.com/elastic/beats/v7/libbeat/logp" + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/application/info" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/configuration" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/program" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/stateresolver" @@ -48,6 +49,7 @@ func getTestOperator(t *testing.T, downloadPath string, installPath string, p *a } l := getLogger() + agentInfo, _ := info.NewAgentInfo() fetcher := &DummyDownloader{} verifier := &DummyVerifier{} @@ -67,7 +69,7 @@ func getTestOperator(t *testing.T, downloadPath string, installPath string, p *a t.Fatal(err) } - operator, err := NewOperator(context.Background(), l, "p1", operatorCfg, fetcher, verifier, installer, uninstaller, stateResolver, srv, nil, noop.NewMonitor()) + operator, err := NewOperator(context.Background(), l, agentInfo, "p1", operatorCfg, fetcher, verifier, installer, uninstaller, stateResolver, srv, nil, noop.NewMonitor()) if err != nil { t.Fatal(err) } diff --git a/x-pack/elastic-agent/pkg/agent/operation/monitoring.go b/x-pack/elastic-agent/pkg/agent/operation/monitoring.go index 74d542d58e9d..1959cd528186 100644 --- a/x-pack/elastic-agent/pkg/agent/operation/monitoring.go +++ b/x-pack/elastic-agent/pkg/agent/operation/monitoring.go @@ -206,6 +206,16 @@ func (o *Operator) getMonitoringFilebeatConfig(output interface{}) (map[string]i }, }, }, + { + "add_fields": map[string]interface{}{ + "target": "elastic_agent", + "fields": map[string]interface{}{ + "id": o.agentInfo.AgentID(), + "version": o.agentInfo.Version(), + "snapshot": o.agentInfo.Snapshot(), + }, + }, + }, }, }, } @@ -240,6 +250,16 @@ func (o *Operator) getMonitoringFilebeatConfig(output interface{}) (map[string]i }, }, }, + { + "add_fields": map[string]interface{}{ + "target": "elastic_agent", + "fields": map[string]interface{}{ + "id": o.agentInfo.AgentID(), + "version": o.agentInfo.Version(), + "snapshot": o.agentInfo.Snapshot(), + }, + }, + }, }, }) } @@ -290,6 +310,16 @@ func (o *Operator) getMonitoringMetricbeatConfig(output interface{}) (map[string }, }, }, + { + "add_fields": map[string]interface{}{ + "target": "elastic_agent", + "fields": map[string]interface{}{ + "id": o.agentInfo.AgentID(), + "version": o.agentInfo.Version(), + "snapshot": o.agentInfo.Snapshot(), + }, + }, + }, }, }) } diff --git a/x-pack/elastic-agent/pkg/agent/operation/monitoring_test.go b/x-pack/elastic-agent/pkg/agent/operation/monitoring_test.go index eef904096f73..3ca6a5f6b14d 100644 --- a/x-pack/elastic-agent/pkg/agent/operation/monitoring_test.go +++ b/x-pack/elastic-agent/pkg/agent/operation/monitoring_test.go @@ -11,6 +11,7 @@ import ( "github.com/elastic/elastic-agent-client/v7/pkg/proto" + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/application/info" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/configrequest" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/configuration" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/stateresolver" @@ -112,6 +113,7 @@ func getMonitorableTestOperator(t *testing.T, installPath string, m monitoring.M } l := getLogger() + agentInfo, _ := info.NewAgentInfo() fetcher := &DummyDownloader{} verifier := &DummyVerifier{} @@ -128,7 +130,7 @@ func getMonitorableTestOperator(t *testing.T, installPath string, m monitoring.M } ctx := context.Background() - operator, err := NewOperator(ctx, l, "p1", cfg, fetcher, verifier, installer, uninstaller, stateResolver, srv, nil, m) + operator, err := NewOperator(ctx, l, agentInfo, "p1", cfg, fetcher, verifier, installer, uninstaller, stateResolver, srv, nil, m) if err != nil { t.Fatal(err) } diff --git a/x-pack/elastic-agent/pkg/agent/operation/operator.go b/x-pack/elastic-agent/pkg/agent/operation/operator.go index b49382788210..1a39e73500e7 100644 --- a/x-pack/elastic-agent/pkg/agent/operation/operator.go +++ b/x-pack/elastic-agent/pkg/agent/operation/operator.go @@ -12,6 +12,7 @@ import ( "sync" "time" + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/application/info" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/configrequest" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/configuration" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/errors" @@ -43,6 +44,7 @@ type Operator struct { bgContext context.Context pipelineID string logger *logger.Logger + agentInfo *info.AgentInfo config *configuration.SettingsConfig handlers map[string]handleFunc stateResolver *stateresolver.StateResolver @@ -66,6 +68,7 @@ type Operator struct { func NewOperator( ctx context.Context, logger *logger.Logger, + agentInfo *info.AgentInfo, pipelineID string, config *configuration.SettingsConfig, fetcher download.Downloader, @@ -85,6 +88,7 @@ func NewOperator( config: config, pipelineID: pipelineID, logger: logger, + agentInfo: agentInfo, downloader: fetcher, verifier: verifier, installer: installer, diff --git a/x-pack/elastic-agent/pkg/agent/program/testdata/enabled_output_true-filebeat.yml b/x-pack/elastic-agent/pkg/agent/program/testdata/enabled_output_true-filebeat.yml index 38b251d95dc0..82a47adc999b 100644 --- a/x-pack/elastic-agent/pkg/agent/program/testdata/enabled_output_true-filebeat.yml +++ b/x-pack/elastic-agent/pkg/agent/program/testdata/enabled_output_true-filebeat.yml @@ -17,11 +17,11 @@ filebeat: fields: dataset: generic - add_fields: - target: "elastic" + target: "elastic_agent" fields: - agent.id: agent-id - agent.version: 8.0.0 - agent.snapshot: false + id: agent-id + version: 8.0.0 + snapshot: false output: elasticsearch: enabled: true diff --git a/x-pack/elastic-agent/pkg/agent/program/testdata/enabled_true-filebeat.yml b/x-pack/elastic-agent/pkg/agent/program/testdata/enabled_true-filebeat.yml index 6e768db6aa4d..1406a2dff65b 100644 --- a/x-pack/elastic-agent/pkg/agent/program/testdata/enabled_true-filebeat.yml +++ b/x-pack/elastic-agent/pkg/agent/program/testdata/enabled_true-filebeat.yml @@ -18,11 +18,11 @@ filebeat: fields: dataset: generic - add_fields: - target: "elastic" + target: "elastic_agent" fields: - agent.id: agent-id - agent.version: 8.0.0 - agent.snapshot: false + id: agent-id + version: 8.0.0 + snapshot: false output: elasticsearch: hosts: diff --git a/x-pack/elastic-agent/pkg/agent/program/testdata/single_config-filebeat.yml b/x-pack/elastic-agent/pkg/agent/program/testdata/single_config-filebeat.yml index 01ee955e4ec3..524d6451f281 100644 --- a/x-pack/elastic-agent/pkg/agent/program/testdata/single_config-filebeat.yml +++ b/x-pack/elastic-agent/pkg/agent/program/testdata/single_config-filebeat.yml @@ -19,11 +19,11 @@ filebeat: fields: dataset: generic - add_fields: - target: "elastic" + target: "elastic_agent" fields: - agent.id: agent-id - agent.version: 8.0.0 - agent.snapshot: false + id: agent-id + version: 8.0.0 + snapshot: false - type: log paths: - /var/log/hello3.log @@ -43,11 +43,11 @@ filebeat: fields: dataset: generic - add_fields: - target: "elastic" + target: "elastic_agent" fields: - agent.id: agent-id - agent.version: 8.0.0 - agent.snapshot: false + id: agent-id + version: 8.0.0 + snapshot: false output: elasticsearch: hosts: diff --git a/x-pack/elastic-agent/pkg/agent/program/testdata/single_config-metricbeat.yml b/x-pack/elastic-agent/pkg/agent/program/testdata/single_config-metricbeat.yml index d09e80accf18..2889e7605eb2 100644 --- a/x-pack/elastic-agent/pkg/agent/program/testdata/single_config-metricbeat.yml +++ b/x-pack/elastic-agent/pkg/agent/program/testdata/single_config-metricbeat.yml @@ -16,11 +16,11 @@ metricbeat: fields: dataset: docker.status - add_fields: - target: "elastic" + target: "elastic_agent" fields: - agent.id: agent-id - agent.version: 8.0.0 - agent.snapshot: false + id: agent-id + version: 8.0.0 + snapshot: false - module: docker metricsets: [info] index: metrics-generic-default @@ -37,11 +37,11 @@ metricbeat: fields: dataset: generic - add_fields: - target: "elastic" + target: "elastic_agent" fields: - agent.id: agent-id - agent.version: 8.0.0 - agent.snapshot: false + id: agent-id + version: 8.0.0 + snapshot: false - module: apache metricsets: [info] index: metrics-generic-testing @@ -61,11 +61,11 @@ metricbeat: fields: dataset: generic - add_fields: - target: "elastic" + target: "elastic_agent" fields: - agent.id: agent-id - agent.version: 8.0.0 - agent.snapshot: false + id: agent-id + version: 8.0.0 + snapshot: false output: elasticsearch: hosts: [127.0.0.1:9200, 127.0.0.1:9300] diff --git a/x-pack/elastic-agent/pkg/agent/transpiler/rules.go b/x-pack/elastic-agent/pkg/agent/transpiler/rules.go index 29ff1786d1ec..42acd53d21a1 100644 --- a/x-pack/elastic-agent/pkg/agent/transpiler/rules.go +++ b/x-pack/elastic-agent/pkg/agent/transpiler/rules.go @@ -715,11 +715,11 @@ func (r *InjectAgentInfoRule) Apply(agentInfo AgentInfo, ast *AST) error { // elastic.agent processorMap := &Dict{value: make([]Node, 0)} - processorMap.value = append(processorMap.value, &Key{name: "target", value: &StrVal{value: "elastic"}}) + processorMap.value = append(processorMap.value, &Key{name: "target", value: &StrVal{value: "elastic_agent"}}) processorMap.value = append(processorMap.value, &Key{name: "fields", value: &Dict{value: []Node{ - &Key{name: "agent.id", value: &StrVal{value: agentInfo.AgentID()}}, - &Key{name: "agent.version", value: &StrVal{value: agentInfo.Version()}}, - &Key{name: "agent.snapshot", value: &BoolVal{value: agentInfo.Snapshot()}}, + &Key{name: "id", value: &StrVal{value: agentInfo.AgentID()}}, + &Key{name: "version", value: &StrVal{value: agentInfo.Version()}}, + &Key{name: "snapshot", value: &BoolVal{value: agentInfo.Snapshot()}}, }}}) addFieldsMap := &Dict{value: []Node{&Key{"add_fields", processorMap}}} processorsList.value = mergeStrategy("").InjectItem(processorsList.value, addFieldsMap) diff --git a/x-pack/elastic-agent/pkg/agent/transpiler/rules_test.go b/x-pack/elastic-agent/pkg/agent/transpiler/rules_test.go index d92ba0de985b..0fb591078442 100644 --- a/x-pack/elastic-agent/pkg/agent/transpiler/rules_test.go +++ b/x-pack/elastic-agent/pkg/agent/transpiler/rules_test.go @@ -184,11 +184,11 @@ inputs: type: file processors: - add_fields: - target: elastic + target: elastic_agent fields: - agent.id: agent-id - agent.snapshot: false - agent.version: 8.0.0 + id: agent-id + snapshot: false + version: 8.0.0 - name: With processors type: file processors: @@ -197,11 +197,11 @@ inputs: fields: data: more - add_fields: - target: elastic + target: elastic_agent fields: - agent.id: agent-id - agent.snapshot: false - agent.version: 8.0.0 + id: agent-id + snapshot: false + version: 8.0.0 `, rule: &RuleList{ Rules: []Rule{ From a10dca7959a5c09391e853d6e8d3e45bbee0b10f Mon Sep 17 00:00:00 2001 From: kaiyan-sheng Date: Tue, 20 Oct 2020 10:32:11 -0600 Subject: [PATCH 16/27] [Filebeat] Add max_number_of_messages config parameter for S3 input (#21993) --- CHANGELOG.next.asciidoc | 1 + .../docs/inputs/input-aws-s3.asciidoc | 89 ++++++++++--------- x-pack/filebeat/input/s3/collector.go | 18 ++-- x-pack/filebeat/input/s3/config.go | 22 +++-- x-pack/filebeat/input/s3/input.go | 2 +- 5 files changed, 68 insertions(+), 64 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index fa8d1fc2791e..f27501759696 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -634,6 +634,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - Adding support for Microsoft 365 Defender (Microsoft Threat Protection) {pull}21446[21446] - Adding support for FIPS in s3 input {pull}21446[21446] - Add SSL option to checkpoint module {pull}19560[19560] +- Add max_number_of_messages config into s3 input. {pull}21993[21993] *Heartbeat* diff --git a/x-pack/filebeat/docs/inputs/input-aws-s3.asciidoc b/x-pack/filebeat/docs/inputs/input-aws-s3.asciidoc index 5cbe4685cb8f..3ea37b3c754b 100644 --- a/x-pack/filebeat/docs/inputs/input-aws-s3.asciidoc +++ b/x-pack/filebeat/docs/inputs/input-aws-s3.asciidoc @@ -38,24 +38,32 @@ The `s3` input supports the following configuration options plus the <<{beatname_lc}-input-{type}-common-options>> described later. [float] -==== `queue_url` - -URL of the AWS SQS queue that messages will be received from. Required. - -[float] -==== `fips_enabled` - -Enabling this option changes the service name from `s3` to `s3-fips` for connecting to the correct service endpoint. For example: `s3-fips.us-gov-east-1.amazonaws.com`. +==== `api_timeout` -[float] -==== `visibility_timeout` +The maximum duration of the AWS API call. If it exceeds the timeout, the AWS API +call will be interrupted. +The default AWS API call timeout for a message is 120 seconds. The minimum +is 0 seconds. The maximum is half of the visibility timeout value. -The duration that the received messages are hidden from subsequent -retrieve requests after being retrieved by a ReceiveMessage request. -This value needs to be a lot bigger than {beatname_uc} collection frequency so -if it took too long to read the s3 log, this sqs message will not be reprocessed. -The default visibility timeout for a message is 300 seconds. The minimum -is 0 seconds. The maximum is 12 hours. +["source","json"] +---- +{ + "Records": [ + { + "eventVersion": "1.07", + "eventTime": "2019-11-14T00:51:00Z", + "awsRegion": "us-east-1", + "eventID": "EXAMPLE8-9621-4d00-b913-beca2EXAMPLE", + }, + { + "eventVersion": "1.07", + "eventTime": "2019-11-14T00:52:00Z", + "awsRegion": "us-east-1", + "eventID": "EXAMPLEc-28be-486c-8928-49ce6EXAMPLE", + } + ] +} +---- [float] ==== `expand_event_list_from_field` @@ -93,40 +101,33 @@ file_selectors: - regex: '^AWSLogs/\d+/CloudTrail/' expand_event_list_from_field: 'Records' - regex: '^AWSLogs/\d+/CloudTrail-Digest' -``` ---- +[float] +==== `fips_enabled` + +Enabling this option changes the service name from `s3` to `s3-fips` for connecting to the correct service endpoint. For example: `s3-fips.us-gov-east-1.amazonaws.com`. [float] -==== `api_timeout` +==== `max_number_of_messages` +The maximum number of messages to return. Amazon SQS never returns more messages +than this value (however, fewer messages might be returned). +Valid values: 1 to 10. Default: 5. -The maximum duration of AWS API can take. If it exceeds the timeout, AWS API -will be interrupted. -The default AWS API timeout for a message is 120 seconds. The minimum -is 0 seconds. The maximum is half of the visibility timeout value. +[float] +==== `queue_url` -["source","json"] ----- -{ - "Records": [ - { - "eventVersion": "1.07", - "eventTime": "2019-11-14T00:51:00Z", - "awsRegion": "us-east-1", - "eventID": "EXAMPLE8-9621-4d00-b913-beca2EXAMPLE", - ... - }, - { - "eventVersion": "1.07", - "eventTime": "2019-11-14T00:52:00Z", - "awsRegion": "us-east-1", - "eventID": "EXAMPLEc-28be-486c-8928-49ce6EXAMPLE", - ... - } - ] -} -``` ----- +URL of the AWS SQS queue that messages will be received from. Required. + +[float] +==== `visibility_timeout` + +The duration that the received messages are hidden from subsequent +retrieve requests after being retrieved by a ReceiveMessage request. +This value needs to be a lot bigger than {beatname_uc} collection frequency so +if it took too long to read the s3 log, this sqs message will not be reprocessed. +The default visibility timeout for a message is 300 seconds. The minimum +is 0 seconds. The maximum is 12 hours. [float] ==== `aws credentials` diff --git a/x-pack/filebeat/input/s3/collector.go b/x-pack/filebeat/input/s3/collector.go index 1b8905132845..c3d3114c723b 100644 --- a/x-pack/filebeat/input/s3/collector.go +++ b/x-pack/filebeat/input/s3/collector.go @@ -82,17 +82,11 @@ type s3Context struct { errC chan error } -var ( - // The maximum number of messages to return. Amazon SQS never returns more messages - // than this value (however, fewer messages might be returned). - maxNumberOfMessage uint8 = 10 - - // The duration (in seconds) for which the call waits for a message to arrive - // in the queue before returning. If a message is available, the call returns - // sooner than WaitTimeSeconds. If no messages are available and the wait time - // expires, the call returns successfully with an empty list of messages. - waitTimeSecond uint8 = 10 -) +// The duration (in seconds) for which the call waits for a message to arrive +// in the queue before returning. If a message is available, the call returns +// sooner than WaitTimeSeconds. If no messages are available and the wait time +// expires, the call returns successfully with an empty list of messages. +var waitTimeSecond uint8 = 10 func (c *s3Collector) run() { defer c.logger.Info("s3 input worker has stopped.") @@ -205,7 +199,7 @@ func (c *s3Collector) receiveMessage(svcSQS sqsiface.ClientAPI, visibilityTimeou &sqs.ReceiveMessageInput{ QueueUrl: &c.config.QueueURL, MessageAttributeNames: []string{"All"}, - MaxNumberOfMessages: awssdk.Int64(int64(maxNumberOfMessage)), + MaxNumberOfMessages: awssdk.Int64(int64(c.config.MaxNumberOfMessages)), VisibilityTimeout: &visibilityTimeout, WaitTimeSeconds: awssdk.Int64(int64(waitTimeSecond)), }) diff --git a/x-pack/filebeat/input/s3/config.go b/x-pack/filebeat/input/s3/config.go index cc3c5318289a..6dc0746ce5ff 100644 --- a/x-pack/filebeat/input/s3/config.go +++ b/x-pack/filebeat/input/s3/config.go @@ -13,13 +13,14 @@ import ( ) type config struct { + APITimeout time.Duration `config:"api_timeout"` + ExpandEventListFromField string `config:"expand_event_list_from_field"` + FileSelectors []FileSelectorCfg `config:"file_selectors"` + FipsEnabled bool `config:"fips_enabled"` + MaxNumberOfMessages int `config:"max_number_of_messages"` QueueURL string `config:"queue_url" validate:"nonzero,required"` VisibilityTimeout time.Duration `config:"visibility_timeout"` - FipsEnabled bool `config:"fips_enabled"` AwsConfig awscommon.ConfigAWS `config:",inline"` - ExpandEventListFromField string `config:"expand_event_list_from_field"` - APITimeout time.Duration `config:"api_timeout"` - FileSelectors []FileSelectorCfg `config:"file_selectors"` } // FileSelectorCfg defines type and configuration of FileSelectors @@ -31,9 +32,10 @@ type FileSelectorCfg struct { func defaultConfig() config { return config{ - VisibilityTimeout: 300 * time.Second, - APITimeout: 120 * time.Second, - FipsEnabled: false, + APITimeout: 120 * time.Second, + FipsEnabled: false, + MaxNumberOfMessages: 5, + VisibilityTimeout: 300 * time.Second, } } @@ -42,10 +44,12 @@ func (c *config) Validate() error { return fmt.Errorf("visibility timeout %v is not within the "+ "required range 0s to 12h", c.VisibilityTimeout) } + if c.APITimeout < 0 || c.APITimeout > c.VisibilityTimeout/2 { return fmt.Errorf("api timeout %v needs to be larger than"+ " 0s and smaller than half of the visibility timeout", c.APITimeout) } + for i := range c.FileSelectors { r, err := regexp.Compile(c.FileSelectors[i].RegexString) if err != nil { @@ -53,5 +57,9 @@ func (c *config) Validate() error { } c.FileSelectors[i].Regex = r } + + if c.MaxNumberOfMessages > 10 || c.MaxNumberOfMessages < 1 { + return fmt.Errorf(" max_number_of_messages %v needs to be between 1 and 10", c.MaxNumberOfMessages) + } return nil } diff --git a/x-pack/filebeat/input/s3/input.go b/x-pack/filebeat/input/s3/input.go index d76e5b8b728d..36f160d759e5 100644 --- a/x-pack/filebeat/input/s3/input.go +++ b/x-pack/filebeat/input/s3/input.go @@ -106,7 +106,7 @@ func (in *s3Input) createCollector(ctx v2.Context, pipeline beat.Pipeline) (*s3C } log.Debug("s3 service name = ", s3Servicename) - + log.Debug("s3 input config max_number_of_messages = ", in.config.MaxNumberOfMessages) return &s3Collector{ cancellation: ctxtool.FromCanceller(ctx.Cancelation), logger: log, From 5935293e6efa2bb3900fe31d58111f7e557e795a Mon Sep 17 00:00:00 2001 From: Luca Belluccini Date: Wed, 21 Oct 2020 02:22:16 +0200 Subject: [PATCH 17/27] [DOC] Add firewall as possible troubleshooting issue (#21743) * [DOC] Add firewall as possible troubleshooting issue In case a firewall closes long persistent connections between Beats & Logstash, errors such as `write tcp ... write: connection reset by peer` will be reported by a given Beat. This documentation page should be useful to identify this kind of issues. * Update shared-faq.asciidoc Amend * Update libbeat/docs/shared-faq.asciidoc Co-authored-by: DeDe Morton * Update libbeat/docs/shared-faq.asciidoc Co-authored-by: DeDe Morton * Update libbeat/docs/shared-faq.asciidoc Co-authored-by: DeDe Morton * Make title more descriptive Co-authored-by: Luca Belluccini Co-authored-by: DeDe Morton --- libbeat/docs/shared-faq.asciidoc | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/libbeat/docs/shared-faq.asciidoc b/libbeat/docs/shared-faq.asciidoc index 9aa8c3442c1c..d6c48b73aa97 100644 --- a/libbeat/docs/shared-faq.asciidoc +++ b/libbeat/docs/shared-faq.asciidoc @@ -54,6 +54,27 @@ connect to the Lumberjack input plugin. To learn how to install and update plugins, see {logstash-ref}/working-with-plugins.html[Working with plugins]. endif::[] +ifndef::no-output-logstash[] +[[publishing-ls-fails-connection-reset-by-peer]] +=== Publishing to {ls} fails with "connection reset by peer" message + +{beatname_uc} requires a persistent TCP connection to {ls}. If a firewall interferes +with the connection, you might see errors like this: + +[source,shell] +---------------------------------------------------------------------- +Failed to publish events caused by: write tcp ... write: connection reset by peer +---------------------------------------------------------------------- + + +To solve the problem: + +* make sure the firewall is not closing connections between {beatname_uc} and {ls}, or +* set the `ttl` value in the <> to a value that's +lower than the maximum time allowed by the firewall, and set `pipelining` to 0 +(pipelining cannot be enabled when `ttl` is used). +endif::[] + ifndef::no-output-logstash[] [[metadata-missing]] === @metadata is missing in {ls} From 65df4e14ebacfd71fc24564385c5662cd8261786 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Manuel=20de=20la=20Pe=C3=B1a?= Date: Wed, 21 Oct 2020 12:33:23 +0200 Subject: [PATCH 18/27] feat: package aliases for snapshots (#21960) * feat: push aliases for docker images * feat: build alias for snapshots * fix: only update alias on snapshots Co-authored-by: Jaime Soriano Pastor * fix: wrong image name for alias * fix: reuse variable as groovy does not hide variables by scope * chore: extract common logic to a method * Revert "fix: only update alias on snapshots" This reverts commit cff2cef82cb107bfddeca5caf225a9307db72135. * Revert "feat: build alias for snapshots" This reverts commit 707e0d71556553b15388adec0c7118ff89210ac9. * chore: do not push aliases for PRs Co-authored-by: Jaime Soriano Pastor --- .ci/packaging.groovy | 56 ++++++++++++++++++++++++++++---------------- 1 file changed, 36 insertions(+), 20 deletions(-) diff --git a/.ci/packaging.groovy b/.ci/packaging.groovy index 8936de2fb3e1..91902595a3cf 100644 --- a/.ci/packaging.groovy +++ b/.ci/packaging.groovy @@ -191,10 +191,14 @@ def pushCIDockerImages(){ } } -def tagAndPush(name){ +def tagAndPush(beatName){ def libbetaVer = sh(label: 'Get libbeat version', script: 'grep defaultBeatVersion ${BASE_DIR}/libbeat/version/version.go|cut -d "=" -f 2|tr -d \\"', returnStdout: true)?.trim() + def aliasVersion = "" if("${env.SNAPSHOT}" == "true"){ + aliasVersion = libbetaVer.substring(0, libbetaVer.lastIndexOf(".")) // remove third number in version + libbetaVer += "-SNAPSHOT" + aliasVersion += "-SNAPSHOT" } def tagName = "${libbetaVer}" @@ -207,25 +211,37 @@ def tagAndPush(name){ // supported image flavours def variants = ["", "-oss", "-ubi8"] variants.each { variant -> - def oldName = "${DOCKER_REGISTRY}/beats/${name}${variant}:${libbetaVer}" - def newName = "${DOCKER_REGISTRY}/observability-ci/${name}${variant}:${tagName}" - def commitName = "${DOCKER_REGISTRY}/observability-ci/${name}${variant}:${env.GIT_BASE_COMMIT}" - - def iterations = 0 - retryWithSleep(retries: 3, seconds: 5, backoff: true) { - iterations++ - def status = sh(label:'Change tag and push', script: """ - docker tag ${oldName} ${newName} - docker push ${newName} - docker tag ${oldName} ${commitName} - docker push ${commitName} - """, returnStatus: true) - - if ( status > 0 && iterations < 3) { - error('tag and push failed, retry') - } else if ( status > 0 ) { - log(level: 'WARN', text: "${name} doesn't have ${variant} docker images. See https://github.com/elastic/beats/pull/21621") - } + doTagAndPush(beatName, variant, libbetaVer, tagName) + doTagAndPush(beatName, variant, libbetaVer, "${env.GIT_BASE_COMMIT}") + + if (!isPR() && aliasVersion != "") { + doTagAndPush(beatName, variant, libbetaVer, aliasVersion) + } + } +} + +/** +* @param beatName name of the Beat +* @param variant name of the variant used to build the docker image name +* @param sourceTag tag to be used as source for the docker tag command, usually under the 'beats' namespace +* @param targetTag tag to be used as target for the docker tag command, usually under the 'observability-ci' namespace +*/ +def doTagAndPush(beatName, variant, sourceTag, targetTag) { + def sourceName = "${DOCKER_REGISTRY}/beats/${beatName}${variant}:${sourceTag}" + def targetName = "${DOCKER_REGISTRY}/observability-ci/${beatName}${variant}:${targetTag}" + + def iterations = 0 + retryWithSleep(retries: 3, seconds: 5, backoff: true) { + iterations++ + def status = sh(label: "Change tag and push ${targetName}", script: """ + docker tag ${sourceName} ${targetName} + docker push ${targetName} + """, returnStatus: true) + + if ( status > 0 && iterations < 3) { + error("tag and push failed for ${beatName}, retry") + } else if ( status > 0 ) { + log(level: 'WARN', text: "${beatName} doesn't have ${variant} docker images. See https://github.com/elastic/beats/pull/21621") } } } From bb50d32ead2945b5c982e7975ab6ef6b3625860a Mon Sep 17 00:00:00 2001 From: William Deurwaarder Date: Wed, 21 Oct 2020 14:45:12 +0200 Subject: [PATCH 19/27] Prevent log input from sending duplicate messages due to file renaming (#21911) Input:Log: Reset TTL of registry state when a file is renamed. In some rare cases the registry state is marked for removal (TTL is set to 0) while the file is only renamed. Log detects the renaming of the file and updates the name of the file. As the file still exists it should also update the TTL of the renamed file. --- filebeat/input/log/input.go | 1 + 1 file changed, 1 insertion(+) diff --git a/filebeat/input/log/input.go b/filebeat/input/log/input.go index 365da416ed38..1b203adcf5ea 100644 --- a/filebeat/input/log/input.go +++ b/filebeat/input/log/input.go @@ -566,6 +566,7 @@ func (p *Input) harvestExistingFile(newState file.State, oldState file.State) { logp.Debug("input", "Updating state for renamed file: %s -> %s, Current offset: %v", oldState.Source, newState.Source, oldState.Offset) // Update state because of file rotation oldState.Source = newState.Source + oldState.TTL = newState.TTL err := p.updateState(oldState) if err != nil { logp.Err("File rotation state update error: %s", err) From 374467e49016706dfdd927e04b5ea8a86cebdc66 Mon Sep 17 00:00:00 2001 From: Michal Pristas Date: Wed, 21 Oct 2020 14:56:44 +0200 Subject: [PATCH 20/27] [Ingest Manager] Use ML_SYSTEM to detect if agent is running as a service (#21884) [Ingest Manager] Use ML_SYSTEM to detect if agent is running as a service (#21884) --- x-pack/elastic-agent/CHANGELOG.next.asciidoc | 1 + x-pack/elastic-agent/pkg/agent/install/svc_windows.go | 8 ++++++-- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/x-pack/elastic-agent/CHANGELOG.next.asciidoc b/x-pack/elastic-agent/CHANGELOG.next.asciidoc index 3882ba197123..7088904a8203 100644 --- a/x-pack/elastic-agent/CHANGELOG.next.asciidoc +++ b/x-pack/elastic-agent/CHANGELOG.next.asciidoc @@ -17,6 +17,7 @@ - Fix issue where inputs without processors defined would panic {pull}21628[21628] - Prevent reporting ecs version twice {pull}21616[21616] - Partial extracted beat result in failure to spawn beat {issue}21718[21718] +- Use ML_SYSTEM to detect if agent is running as a service {pull}21884[21884] - Use local temp instead of system one {pull}21883[21883] - Rename monitoring index from `elastic.agent` to `elastic_agent` {pull}21932[21932] - Fix issue with named pipes on Windows 7 {pull}21931[21931] diff --git a/x-pack/elastic-agent/pkg/agent/install/svc_windows.go b/x-pack/elastic-agent/pkg/agent/install/svc_windows.go index 9084f3b5ea70..a60aadb54944 100644 --- a/x-pack/elastic-agent/pkg/agent/install/svc_windows.go +++ b/x-pack/elastic-agent/pkg/agent/install/svc_windows.go @@ -10,10 +10,14 @@ import ( "golang.org/x/sys/windows" ) +const ( + ML_SYSTEM_RID = 0x4000 +) + // RunningUnderSupervisor returns true when executing Agent is running under // the supervisor processes of the OS. func RunningUnderSupervisor() bool { - serviceSid, err := allocSid(windows.SECURITY_SERVICE_RID) + serviceSid, err := allocSid(ML_SYSTEM_RID) if err != nil { return false } @@ -40,7 +44,7 @@ func RunningUnderSupervisor() bool { func allocSid(subAuth0 uint32) (*windows.SID, error) { var sid *windows.SID - err := windows.AllocateAndInitializeSid(&windows.SECURITY_NT_AUTHORITY, + err := windows.AllocateAndInitializeSid(&windows.SECURITY_MANDATORY_LABEL_AUTHORITY, 1, subAuth0, 0, 0, 0, 0, 0, 0, 0, &sid) if err != nil { return nil, err From fc007701ecc42f7c6dc0e11762029944539fe1b2 Mon Sep 17 00:00:00 2001 From: DeDe Morton Date: Wed, 21 Oct 2020 11:26:19 -0700 Subject: [PATCH 21/27] Fix typo (#19585) (#22061) Co-authored-by: Byungjin Park (BJ) --- heartbeat/docs/monitors/monitor-http.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/heartbeat/docs/monitors/monitor-http.asciidoc b/heartbeat/docs/monitors/monitor-http.asciidoc index ea981ea62b71..33d29dec89a7 100644 --- a/heartbeat/docs/monitors/monitor-http.asciidoc +++ b/heartbeat/docs/monitors/monitor-http.asciidoc @@ -161,7 +161,7 @@ Under `check.response`, specify these options: *`status`*:: A list of expected status codes. 4xx and 5xx codes are considered `down` by default. Other codes are considered `up`. *`headers`*:: The required response headers. -*`body`*:: A list of regular expressions to match the the body output. Only a single expression needs to match. HTTP response +*`body`*:: A list of regular expressions to match the body output. Only a single expression needs to match. HTTP response bodies of up to 100MiB are supported. Example configuration: From ba2b2f935f1c6badc316f62417d87d630991ad2f Mon Sep 17 00:00:00 2001 From: Alex K <8418476+fearful-symmetry@users.noreply.github.com> Date: Wed, 21 Oct 2020 11:43:43 -0700 Subject: [PATCH 22/27] revert WSS process reporting for windows (#22055) * revert WSS process reporting for windows * add changelog --- CHANGELOG.next.asciidoc | 1 + metricbeat/module/system/process/process.go | 15 +-------------- 2 files changed, 2 insertions(+), 14 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index f27501759696..059bbdb1cf6c 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -378,6 +378,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - Fix azure storage event format. {pull}21845[21845] - Fix panic in kubernetes autodiscover related to keystores {issue}21843[21843] {pull}21880[21880] - [Kubernetes] Remove redundant dockersock volume mount {pull}22009[22009] +- Revert change to report `process.memory.rss` as `process.memory.wss` on Windows. {pull}22055[22055] *Packetbeat* diff --git a/metricbeat/module/system/process/process.go b/metricbeat/module/system/process/process.go index c99ffaa1123f..141a4a3a62de 100644 --- a/metricbeat/module/system/process/process.go +++ b/metricbeat/module/system/process/process.go @@ -150,24 +150,11 @@ func (m *MetricSet) Fetch(r mb.ReporterV2) error { rootFields.Put("process.args", args) } - // This is a temporary fix until we make these changes global across libbeat - // This logic should happen in libbeat getProcessEvent() - - // There's some more Windows memory quirks we need to deal with. - // "rss" is a linux concept, but "wss" is a direct match on Windows. - // "share" is also unavailable on Windows. + // "share" is unavailable on Windows. if runtime.GOOS == "windows" { proc.Delete("memory.share") } - if m.IsAgent { - if runtime.GOOS == "windows" { - if setSize := getAndRemove(proc, "memory.rss"); setSize != nil { - proc.Put("memory.wss", setSize) - } - } - } - e := mb.Event{ RootFields: rootFields, MetricSetFields: proc, From 215f49cf50a079d5f0963eeab3d1336d897c36f9 Mon Sep 17 00:00:00 2001 From: Ichinose Shogo Date: Thu, 22 Oct 2020 17:03:50 +0900 Subject: [PATCH 23/27] Fix the url of reviewdog (#21981) --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index c21c0a7346ee..d64bb07776b7 100644 --- a/Makefile +++ b/Makefile @@ -12,7 +12,7 @@ GOLINT=golint GOLINT_REPO=golang.org/x/lint/golint REVIEWDOG=reviewdog REVIEWDOG_OPTIONS?=-diff "git diff master" -REVIEWDOG_REPO=github.com/haya14busa/reviewdog/cmd/reviewdog +REVIEWDOG_REPO=github.com/reviewdog/reviewdog/cmd/reviewdog XPACK_SUFFIX=x-pack/ # PROJECTS_XPACK_PKG is a list of Beats that have independent packaging support From 69cddaa1a0979a65c0bd8e3362ad69f5f9125652 Mon Sep 17 00:00:00 2001 From: Victor Martinez Date: Thu, 22 Oct 2020 11:38:09 +0100 Subject: [PATCH 24/27] [build][packaging] Add resilience when docker build (#22050) --- dev-tools/mage/dockerbuilder.go | 9 ++++++++- x-pack/elastic-agent/magefile.go | 13 +++++++++++-- 2 files changed, 19 insertions(+), 3 deletions(-) diff --git a/dev-tools/mage/dockerbuilder.go b/dev-tools/mage/dockerbuilder.go index 90a994348846..503fcae9cfc2 100644 --- a/dev-tools/mage/dockerbuilder.go +++ b/dev-tools/mage/dockerbuilder.go @@ -26,6 +26,7 @@ import ( "os/exec" "path/filepath" "strings" + "time" "github.com/magefile/mage/sh" "github.com/pkg/errors" @@ -71,7 +72,13 @@ func (b *dockerBuilder) Build() error { tag, err := b.dockerBuild() if err != nil { - return errors.Wrap(err, "failed to build docker") + fmt.Println(">> Building docker images again (after 10 seconds)") + // This sleep is to avoid hitting the docker build issues when resources are not available. + time.Sleep(10) + tag, err = b.dockerBuild() + if err != nil { + return errors.Wrap(err, "failed to build docker") + } } if err := b.dockerSave(tag); err != nil { diff --git a/x-pack/elastic-agent/magefile.go b/x-pack/elastic-agent/magefile.go index a1aaba840fbe..fad5ef935aac 100644 --- a/x-pack/elastic-agent/magefile.go +++ b/x-pack/elastic-agent/magefile.go @@ -513,8 +513,13 @@ func runAgent(env map[string]string) error { } // build docker image - if err := sh.Run("docker", "build", "-t", tag, "."); err != nil { - return err + if err := dockerBuild(tag); err != nil { + fmt.Println(">> Building docker images again (after 10 seconds)") + // This sleep is to avoid hitting the docker build issues when resources are not available. + time.Sleep(10) + if err := dockerBuild(tag); err != nil { + return err + } } } @@ -625,6 +630,10 @@ func copyAll(from, to string) error { }) } +func dockerBuild(tag string) error { + return sh.Run("docker", "build", "-t", tag, ".") +} + func dockerTag() string { const commitLen = 7 tagBase := "elastic-agent" From 5553dc24d26e0a12119083a39df0a904dbb7e2d9 Mon Sep 17 00:00:00 2001 From: Ivan Fernandez Calvo Date: Thu, 22 Oct 2020 13:23:38 +0200 Subject: [PATCH 25/27] docs: Prepare Changelog for 6.8.13 (#22072) (#22079) * docs: Close changelog for 6.8.13 * Apply suggestions from code review Co-authored-by: kuisathaverat Co-authored-by: Ivan Fernandez Calvo # Conflicts: # CHANGELOG.asciidoc # libbeat/docs/release.asciidoc Co-authored-by: Elastic Machine --- CHANGELOG.asciidoc | 40 +++++++++++++++++++++++++++++++++++ libbeat/docs/release.asciidoc | 4 ++++ 2 files changed, 44 insertions(+) diff --git a/CHANGELOG.asciidoc b/CHANGELOG.asciidoc index 1dfbb2fb8897..5c364aeae64b 100644 --- a/CHANGELOG.asciidoc +++ b/CHANGELOG.asciidoc @@ -2575,6 +2575,46 @@ https://github.com/elastic/beats/compare/v6.5.0...v7.0.0-alpha1[View commits] - Added support to calculate certificates' fingerprints (MD5, SHA-1, SHA-256). {issue}8180[8180] - Support new TLS version negotiation introduced in TLS 1.3. {issue}8647[8647]. +[[release-notes-6.8.13]] +=== Beats version 6.8.13 +https://github.com/elastic/beats/compare/v6.8.12...v6.8.13[View commits] + +==== Added + +*Filebeat* + +- Add container image in Kubernetes metadata {pull}13356[13356] {issue}12688[12688] + +[[release-notes-6.8.12]] +=== Beats version 6.8.12 +https://github.com/elastic/beats/compare/v6.8.11...v6.8.12[View commits] + +==== Bugfixes + +*Filebeat* + +- Fix Filebeat OOMs on very long lines {issue}19500[19500], {pull}19552[19552] + +[[release-notes-6.8.11]] +=== Beats version 6.8.11 +https://github.com/elastic/beats/compare/v6.8.10...v6.8.11[View commits] + +==== Bugfixes + +*Metricbeat* + +- Fix bug incorrect parsing of float numbers as integers in Couchbase module {issue}18949[18949] {pull}19055[19055] + +[[release-notes-6.8.10]] +=== Beats version 6.8.10 +https://github.com/elastic/beats/compare/v6.8.9...v6.8.10[View commits] + +==== Bugfixes + +*Affecting all Beats* + +- Fix `add_cloud_metadata` to better support modifying sub-fields with other processors. {pull}13808[13808] + [[release-notes-6.8.9]] === Beats version 6.8.9 https://github.com/elastic/beats/compare/v6.8.8...v6.8.9[View commits] diff --git a/libbeat/docs/release.asciidoc b/libbeat/docs/release.asciidoc index 90dd214787a1..caf94c3bf2d0 100644 --- a/libbeat/docs/release.asciidoc +++ b/libbeat/docs/release.asciidoc @@ -39,6 +39,10 @@ upgrade. * <> * <> * <> +* <> +* <> +* <> +* <> * <> * <> * <> From 82c5855d965722f281ad611d11c01898084512bb Mon Sep 17 00:00:00 2001 From: Ivan Fernandez Calvo Date: Thu, 22 Oct 2020 13:23:51 +0200 Subject: [PATCH 26/27] docs: Prepare Changelog for 7.9.3 (#22073) (#22075) * docs: Close changelog for 7.9.3 * Apply suggestions from code review Co-authored-by: kuisathaverat Co-authored-by: Ivan Fernandez Calvo Co-authored-by: Elastic Machine --- CHANGELOG.asciidoc | 24 ++++++++++++++++++++++++ CHANGELOG.next.asciidoc | 4 ++++ libbeat/docs/release.asciidoc | 1 + 3 files changed, 29 insertions(+) diff --git a/CHANGELOG.asciidoc b/CHANGELOG.asciidoc index 5c364aeae64b..349eb49edb3e 100644 --- a/CHANGELOG.asciidoc +++ b/CHANGELOG.asciidoc @@ -3,6 +3,30 @@ :issue: https://github.com/elastic/beats/issues/ :pull: https://github.com/elastic/beats/pull/ +[[release-notes-7.9.3]] +=== Beats version 7.9.3 +https://github.com/elastic/beats/compare/v7.9.2...v7.9.3[View commits] + +==== Bugfixes + +*Affecting all Beats* + +- The `o365input` and `o365` module now recover from an authentication problem or other fatal errors, instead of terminating. {pull}21258[21258] + +*Auditbeat* + +- system/socket: Fixed a crash due to concurrent map read and write. {issue}21192[21192] {pull}21690[21690] + +*Filebeat* + +- Add field limit check for AWS Cloudtrail flattened fields. {pull}21388[21388] {issue}21382[21382] +*Metricbeat* + +- Fix remote_write flaky test. {pull}21173[21173] +- Fix panic in kubernetes autodiscover related to keystores {issue}21843[21843] {pull}21880[21880] +- [Kubernetes] Remove redundant dockersock volume mount {pull}22009[22009] + + [[release-notes-7.9.2]] === Beats version 7.9.2 https://github.com/elastic/beats/compare/v7.9.1...v7.9.2[View commits] diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 059bbdb1cf6c..a2dcaa48f2d5 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -379,6 +379,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - Fix panic in kubernetes autodiscover related to keystores {issue}21843[21843] {pull}21880[21880] - [Kubernetes] Remove redundant dockersock volume mount {pull}22009[22009] - Revert change to report `process.memory.rss` as `process.memory.wss` on Windows. {pull}22055[22055] +- Add a switch to the driver definition on SQL module to use pretty names {pull}17378[17378] *Packetbeat* @@ -821,3 +822,6 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d ==== Known Issue *Journalbeat* + + + diff --git a/libbeat/docs/release.asciidoc b/libbeat/docs/release.asciidoc index caf94c3bf2d0..724d8af03c35 100644 --- a/libbeat/docs/release.asciidoc +++ b/libbeat/docs/release.asciidoc @@ -8,6 +8,7 @@ This section summarizes the changes in each release. Also read <> for more detail about changes that affect upgrade. +* <> * <> * <> * <> From fb6d8ef3b7e3fb13af5cbd73220ac785aa50dead Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Manuel=20de=20la=20Pe=C3=B1a?= Date: Thu, 22 Oct 2020 15:34:38 +0200 Subject: [PATCH 27/27] chore: use ubuntu 18 as linux agent (#22084) --- .ci/packaging.groovy | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.ci/packaging.groovy b/.ci/packaging.groovy index 91902595a3cf..073c977a22e6 100644 --- a/.ci/packaging.groovy +++ b/.ci/packaging.groovy @@ -43,7 +43,7 @@ pipeline { } stages { stage('Filter build') { - agent { label 'ubuntu && immutable' } + agent { label 'ubuntu-18 && immutable' } when { beforeAgent true anyOf { @@ -98,7 +98,7 @@ pipeline { } stages { stage('Package Linux'){ - agent { label 'ubuntu && immutable' } + agent { label 'ubuntu-18 && immutable' } options { skipDefaultCheckout() } when { beforeAgent true @@ -160,7 +160,7 @@ pipeline { } } stage('Run E2E Tests for Packages'){ - agent { label 'ubuntu && immutable' } + agent { label 'ubuntu-18 && immutable' } options { skipDefaultCheckout() } steps { runE2ETests()