From fc007701ecc42f7c6dc0e11762029944539fe1b2 Mon Sep 17 00:00:00 2001 From: DeDe Morton Date: Wed, 21 Oct 2020 11:26:19 -0700 Subject: [PATCH 01/25] Fix typo (#19585) (#22061) Co-authored-by: Byungjin Park (BJ) --- heartbeat/docs/monitors/monitor-http.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/heartbeat/docs/monitors/monitor-http.asciidoc b/heartbeat/docs/monitors/monitor-http.asciidoc index ea981ea62b71..33d29dec89a7 100644 --- a/heartbeat/docs/monitors/monitor-http.asciidoc +++ b/heartbeat/docs/monitors/monitor-http.asciidoc @@ -161,7 +161,7 @@ Under `check.response`, specify these options: *`status`*:: A list of expected status codes. 4xx and 5xx codes are considered `down` by default. Other codes are considered `up`. *`headers`*:: The required response headers. -*`body`*:: A list of regular expressions to match the the body output. Only a single expression needs to match. HTTP response +*`body`*:: A list of regular expressions to match the body output. Only a single expression needs to match. HTTP response bodies of up to 100MiB are supported. Example configuration: From ba2b2f935f1c6badc316f62417d87d630991ad2f Mon Sep 17 00:00:00 2001 From: Alex K <8418476+fearful-symmetry@users.noreply.github.com> Date: Wed, 21 Oct 2020 11:43:43 -0700 Subject: [PATCH 02/25] revert WSS process reporting for windows (#22055) * revert WSS process reporting for windows * add changelog --- CHANGELOG.next.asciidoc | 1 + metricbeat/module/system/process/process.go | 15 +-------------- 2 files changed, 2 insertions(+), 14 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index f27501759696..059bbdb1cf6c 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -378,6 +378,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - Fix azure storage event format. {pull}21845[21845] - Fix panic in kubernetes autodiscover related to keystores {issue}21843[21843] {pull}21880[21880] - [Kubernetes] Remove redundant dockersock volume mount {pull}22009[22009] +- Revert change to report `process.memory.rss` as `process.memory.wss` on Windows. {pull}22055[22055] *Packetbeat* diff --git a/metricbeat/module/system/process/process.go b/metricbeat/module/system/process/process.go index c99ffaa1123f..141a4a3a62de 100644 --- a/metricbeat/module/system/process/process.go +++ b/metricbeat/module/system/process/process.go @@ -150,24 +150,11 @@ func (m *MetricSet) Fetch(r mb.ReporterV2) error { rootFields.Put("process.args", args) } - // This is a temporary fix until we make these changes global across libbeat - // This logic should happen in libbeat getProcessEvent() - - // There's some more Windows memory quirks we need to deal with. - // "rss" is a linux concept, but "wss" is a direct match on Windows. - // "share" is also unavailable on Windows. + // "share" is unavailable on Windows. if runtime.GOOS == "windows" { proc.Delete("memory.share") } - if m.IsAgent { - if runtime.GOOS == "windows" { - if setSize := getAndRemove(proc, "memory.rss"); setSize != nil { - proc.Put("memory.wss", setSize) - } - } - } - e := mb.Event{ RootFields: rootFields, MetricSetFields: proc, From 215f49cf50a079d5f0963eeab3d1336d897c36f9 Mon Sep 17 00:00:00 2001 From: Ichinose Shogo Date: Thu, 22 Oct 2020 17:03:50 +0900 Subject: [PATCH 03/25] Fix the url of reviewdog (#21981) --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index c21c0a7346ee..d64bb07776b7 100644 --- a/Makefile +++ b/Makefile @@ -12,7 +12,7 @@ GOLINT=golint GOLINT_REPO=golang.org/x/lint/golint REVIEWDOG=reviewdog REVIEWDOG_OPTIONS?=-diff "git diff master" -REVIEWDOG_REPO=github.com/haya14busa/reviewdog/cmd/reviewdog +REVIEWDOG_REPO=github.com/reviewdog/reviewdog/cmd/reviewdog XPACK_SUFFIX=x-pack/ # PROJECTS_XPACK_PKG is a list of Beats that have independent packaging support From 69cddaa1a0979a65c0bd8e3362ad69f5f9125652 Mon Sep 17 00:00:00 2001 From: Victor Martinez Date: Thu, 22 Oct 2020 11:38:09 +0100 Subject: [PATCH 04/25] [build][packaging] Add resilience when docker build (#22050) --- dev-tools/mage/dockerbuilder.go | 9 ++++++++- x-pack/elastic-agent/magefile.go | 13 +++++++++++-- 2 files changed, 19 insertions(+), 3 deletions(-) diff --git a/dev-tools/mage/dockerbuilder.go b/dev-tools/mage/dockerbuilder.go index 90a994348846..503fcae9cfc2 100644 --- a/dev-tools/mage/dockerbuilder.go +++ b/dev-tools/mage/dockerbuilder.go @@ -26,6 +26,7 @@ import ( "os/exec" "path/filepath" "strings" + "time" "github.com/magefile/mage/sh" "github.com/pkg/errors" @@ -71,7 +72,13 @@ func (b *dockerBuilder) Build() error { tag, err := b.dockerBuild() if err != nil { - return errors.Wrap(err, "failed to build docker") + fmt.Println(">> Building docker images again (after 10 seconds)") + // This sleep is to avoid hitting the docker build issues when resources are not available. + time.Sleep(10) + tag, err = b.dockerBuild() + if err != nil { + return errors.Wrap(err, "failed to build docker") + } } if err := b.dockerSave(tag); err != nil { diff --git a/x-pack/elastic-agent/magefile.go b/x-pack/elastic-agent/magefile.go index a1aaba840fbe..fad5ef935aac 100644 --- a/x-pack/elastic-agent/magefile.go +++ b/x-pack/elastic-agent/magefile.go @@ -513,8 +513,13 @@ func runAgent(env map[string]string) error { } // build docker image - if err := sh.Run("docker", "build", "-t", tag, "."); err != nil { - return err + if err := dockerBuild(tag); err != nil { + fmt.Println(">> Building docker images again (after 10 seconds)") + // This sleep is to avoid hitting the docker build issues when resources are not available. + time.Sleep(10) + if err := dockerBuild(tag); err != nil { + return err + } } } @@ -625,6 +630,10 @@ func copyAll(from, to string) error { }) } +func dockerBuild(tag string) error { + return sh.Run("docker", "build", "-t", tag, ".") +} + func dockerTag() string { const commitLen = 7 tagBase := "elastic-agent" From 5553dc24d26e0a12119083a39df0a904dbb7e2d9 Mon Sep 17 00:00:00 2001 From: Ivan Fernandez Calvo Date: Thu, 22 Oct 2020 13:23:38 +0200 Subject: [PATCH 05/25] docs: Prepare Changelog for 6.8.13 (#22072) (#22079) * docs: Close changelog for 6.8.13 * Apply suggestions from code review Co-authored-by: kuisathaverat Co-authored-by: Ivan Fernandez Calvo # Conflicts: # CHANGELOG.asciidoc # libbeat/docs/release.asciidoc Co-authored-by: Elastic Machine --- CHANGELOG.asciidoc | 40 +++++++++++++++++++++++++++++++++++ libbeat/docs/release.asciidoc | 4 ++++ 2 files changed, 44 insertions(+) diff --git a/CHANGELOG.asciidoc b/CHANGELOG.asciidoc index 1dfbb2fb8897..5c364aeae64b 100644 --- a/CHANGELOG.asciidoc +++ b/CHANGELOG.asciidoc @@ -2575,6 +2575,46 @@ https://github.com/elastic/beats/compare/v6.5.0...v7.0.0-alpha1[View commits] - Added support to calculate certificates' fingerprints (MD5, SHA-1, SHA-256). {issue}8180[8180] - Support new TLS version negotiation introduced in TLS 1.3. {issue}8647[8647]. +[[release-notes-6.8.13]] +=== Beats version 6.8.13 +https://github.com/elastic/beats/compare/v6.8.12...v6.8.13[View commits] + +==== Added + +*Filebeat* + +- Add container image in Kubernetes metadata {pull}13356[13356] {issue}12688[12688] + +[[release-notes-6.8.12]] +=== Beats version 6.8.12 +https://github.com/elastic/beats/compare/v6.8.11...v6.8.12[View commits] + +==== Bugfixes + +*Filebeat* + +- Fix Filebeat OOMs on very long lines {issue}19500[19500], {pull}19552[19552] + +[[release-notes-6.8.11]] +=== Beats version 6.8.11 +https://github.com/elastic/beats/compare/v6.8.10...v6.8.11[View commits] + +==== Bugfixes + +*Metricbeat* + +- Fix bug incorrect parsing of float numbers as integers in Couchbase module {issue}18949[18949] {pull}19055[19055] + +[[release-notes-6.8.10]] +=== Beats version 6.8.10 +https://github.com/elastic/beats/compare/v6.8.9...v6.8.10[View commits] + +==== Bugfixes + +*Affecting all Beats* + +- Fix `add_cloud_metadata` to better support modifying sub-fields with other processors. {pull}13808[13808] + [[release-notes-6.8.9]] === Beats version 6.8.9 https://github.com/elastic/beats/compare/v6.8.8...v6.8.9[View commits] diff --git a/libbeat/docs/release.asciidoc b/libbeat/docs/release.asciidoc index 90dd214787a1..caf94c3bf2d0 100644 --- a/libbeat/docs/release.asciidoc +++ b/libbeat/docs/release.asciidoc @@ -39,6 +39,10 @@ upgrade. * <> * <> * <> +* <> +* <> +* <> +* <> * <> * <> * <> From 82c5855d965722f281ad611d11c01898084512bb Mon Sep 17 00:00:00 2001 From: Ivan Fernandez Calvo Date: Thu, 22 Oct 2020 13:23:51 +0200 Subject: [PATCH 06/25] docs: Prepare Changelog for 7.9.3 (#22073) (#22075) * docs: Close changelog for 7.9.3 * Apply suggestions from code review Co-authored-by: kuisathaverat Co-authored-by: Ivan Fernandez Calvo Co-authored-by: Elastic Machine --- CHANGELOG.asciidoc | 24 ++++++++++++++++++++++++ CHANGELOG.next.asciidoc | 4 ++++ libbeat/docs/release.asciidoc | 1 + 3 files changed, 29 insertions(+) diff --git a/CHANGELOG.asciidoc b/CHANGELOG.asciidoc index 5c364aeae64b..349eb49edb3e 100644 --- a/CHANGELOG.asciidoc +++ b/CHANGELOG.asciidoc @@ -3,6 +3,30 @@ :issue: https://github.com/elastic/beats/issues/ :pull: https://github.com/elastic/beats/pull/ +[[release-notes-7.9.3]] +=== Beats version 7.9.3 +https://github.com/elastic/beats/compare/v7.9.2...v7.9.3[View commits] + +==== Bugfixes + +*Affecting all Beats* + +- The `o365input` and `o365` module now recover from an authentication problem or other fatal errors, instead of terminating. {pull}21258[21258] + +*Auditbeat* + +- system/socket: Fixed a crash due to concurrent map read and write. {issue}21192[21192] {pull}21690[21690] + +*Filebeat* + +- Add field limit check for AWS Cloudtrail flattened fields. {pull}21388[21388] {issue}21382[21382] +*Metricbeat* + +- Fix remote_write flaky test. {pull}21173[21173] +- Fix panic in kubernetes autodiscover related to keystores {issue}21843[21843] {pull}21880[21880] +- [Kubernetes] Remove redundant dockersock volume mount {pull}22009[22009] + + [[release-notes-7.9.2]] === Beats version 7.9.2 https://github.com/elastic/beats/compare/v7.9.1...v7.9.2[View commits] diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 059bbdb1cf6c..a2dcaa48f2d5 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -379,6 +379,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - Fix panic in kubernetes autodiscover related to keystores {issue}21843[21843] {pull}21880[21880] - [Kubernetes] Remove redundant dockersock volume mount {pull}22009[22009] - Revert change to report `process.memory.rss` as `process.memory.wss` on Windows. {pull}22055[22055] +- Add a switch to the driver definition on SQL module to use pretty names {pull}17378[17378] *Packetbeat* @@ -821,3 +822,6 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d ==== Known Issue *Journalbeat* + + + diff --git a/libbeat/docs/release.asciidoc b/libbeat/docs/release.asciidoc index caf94c3bf2d0..724d8af03c35 100644 --- a/libbeat/docs/release.asciidoc +++ b/libbeat/docs/release.asciidoc @@ -8,6 +8,7 @@ This section summarizes the changes in each release. Also read <> for more detail about changes that affect upgrade. +* <> * <> * <> * <> From fb6d8ef3b7e3fb13af5cbd73220ac785aa50dead Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Manuel=20de=20la=20Pe=C3=B1a?= Date: Thu, 22 Oct 2020 15:34:38 +0200 Subject: [PATCH 07/25] chore: use ubuntu 18 as linux agent (#22084) --- .ci/packaging.groovy | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.ci/packaging.groovy b/.ci/packaging.groovy index 91902595a3cf..073c977a22e6 100644 --- a/.ci/packaging.groovy +++ b/.ci/packaging.groovy @@ -43,7 +43,7 @@ pipeline { } stages { stage('Filter build') { - agent { label 'ubuntu && immutable' } + agent { label 'ubuntu-18 && immutable' } when { beforeAgent true anyOf { @@ -98,7 +98,7 @@ pipeline { } stages { stage('Package Linux'){ - agent { label 'ubuntu && immutable' } + agent { label 'ubuntu-18 && immutable' } options { skipDefaultCheckout() } when { beforeAgent true @@ -160,7 +160,7 @@ pipeline { } } stage('Run E2E Tests for Packages'){ - agent { label 'ubuntu && immutable' } + agent { label 'ubuntu-18 && immutable' } options { skipDefaultCheckout() } steps { runE2ETests() From 9aefcfe692961b5cc309ad888d5960e83a3c25f8 Mon Sep 17 00:00:00 2001 From: Michal Pristas Date: Thu, 22 Oct 2020 17:10:20 +0200 Subject: [PATCH 08/25] [Ingest Manager] Use symlink path for reexecutions (#21835) [Ingest Manager] Use symlink path for reexecutions (#21835) --- x-pack/elastic-agent/CHANGELOG.next.asciidoc | 1 + x-pack/elastic-agent/pkg/agent/cmd/run.go | 20 +++++++++++++++++++- 2 files changed, 20 insertions(+), 1 deletion(-) diff --git a/x-pack/elastic-agent/CHANGELOG.next.asciidoc b/x-pack/elastic-agent/CHANGELOG.next.asciidoc index 7088904a8203..b6a870e0259b 100644 --- a/x-pack/elastic-agent/CHANGELOG.next.asciidoc +++ b/x-pack/elastic-agent/CHANGELOG.next.asciidoc @@ -17,6 +17,7 @@ - Fix issue where inputs without processors defined would panic {pull}21628[21628] - Prevent reporting ecs version twice {pull}21616[21616] - Partial extracted beat result in failure to spawn beat {issue}21718[21718] +- Use symlink path for reexecutions {pull}21835[21835] - Use ML_SYSTEM to detect if agent is running as a service {pull}21884[21884] - Use local temp instead of system one {pull}21883[21883] - Rename monitoring index from `elastic.agent` to `elastic_agent` {pull}21932[21932] diff --git a/x-pack/elastic-agent/pkg/agent/cmd/run.go b/x-pack/elastic-agent/pkg/agent/cmd/run.go index 84dd8bd8a9aa..b014cd69084b 100644 --- a/x-pack/elastic-agent/pkg/agent/cmd/run.go +++ b/x-pack/elastic-agent/pkg/agent/cmd/run.go @@ -9,6 +9,7 @@ import ( "fmt" "os" "os/signal" + "path/filepath" "syscall" "github.com/spf13/cobra" @@ -26,6 +27,10 @@ import ( "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/release" ) +const ( + agentName = "elastic-agent" +) + func newRunCommandWithArgs(flags *globalFlags, _ []string, streams *cli.IOStreams) *cobra.Command { return &cobra.Command{ Use: "run", @@ -87,7 +92,7 @@ func run(flags *globalFlags, streams *cli.IOStreams) error { // Windows: Mark se logger.Warn("Artifact has been build with security disabled. Elastic Agent will not verify signatures of used artifacts.") } - execPath, err := os.Executable() + execPath, err := reexecPath() if err != nil { return err } @@ -146,3 +151,16 @@ func run(flags *globalFlags, streams *cli.IOStreams) error { // Windows: Mark se rex.ShutdownComplete() return err } + +func reexecPath() (string, error) { + // set executable path to symlink instead of binary + // in case of updated symlinks we should spin up new agent + potentialReexec := filepath.Join(paths.Top(), agentName) + + // in case it does not exists fallback to executable + if _, err := os.Stat(potentialReexec); os.IsNotExist(err) { + return os.Executable() + } + + return potentialReexec, nil +} From daed8f9361d6c2708d84d3764a5c9ae52b042238 Mon Sep 17 00:00:00 2001 From: Andrew Kroh Date: Thu, 22 Oct 2020 11:49:26 -0400 Subject: [PATCH 09/25] Remove suricata.eve.timestamp alias (#22095) Remove the suricata.eve.timestamp alias field from the Suricata module. This is a breaking change for anything that we dependent upon the field, but its presence caused issue in Kibana since it was always displayed in Discover. Fixes #10535 --- CHANGELOG.next.asciidoc | 1 + filebeat/docs/fields.asciidoc | 9 --------- x-pack/filebeat/module/suricata/eve/_meta/fields.yml | 4 ---- x-pack/filebeat/module/suricata/fields.go | 2 +- 4 files changed, 2 insertions(+), 14 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index a2dcaa48f2d5..1bf2cc8f762d 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -80,6 +80,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - Add support for GMT timezone offsets in `decode_cef`. {pull}20993[20993] - Fix parsing of Elasticsearch node name by `elasticsearch/slowlog` fileset. {pull}14547[14547] - API address and shard ID are required settings in the Cloud Foundry input. {pull}21759[21759] +- Remove `suricata.eve.timestamp` alias field. {issue}10535[10535] {pull}22095[22095] *Heartbeat* diff --git a/filebeat/docs/fields.asciidoc b/filebeat/docs/fields.asciidoc index b4f6a158ad70..b1ee49fed5ca 100644 --- a/filebeat/docs/fields.asciidoc +++ b/filebeat/docs/fields.asciidoc @@ -143952,15 +143952,6 @@ type: keyword -- -*`suricata.eve.timestamp`*:: -+ --- -type: alias - -alias to: @timestamp - --- - *`suricata.eve.in_iface`*:: + -- diff --git a/x-pack/filebeat/module/suricata/eve/_meta/fields.yml b/x-pack/filebeat/module/suricata/eve/_meta/fields.yml index 45980b888b00..dffb86e7ebe6 100644 --- a/x-pack/filebeat/module/suricata/eve/_meta/fields.yml +++ b/x-pack/filebeat/module/suricata/eve/_meta/fields.yml @@ -176,10 +176,6 @@ - name: http_content_type type: keyword - - name: timestamp - type: alias - path: '@timestamp' - - name: in_iface type: keyword diff --git a/x-pack/filebeat/module/suricata/fields.go b/x-pack/filebeat/module/suricata/fields.go index 4cba3d5ee745..105704a1cc35 100644 --- a/x-pack/filebeat/module/suricata/fields.go +++ b/x-pack/filebeat/module/suricata/fields.go @@ -19,5 +19,5 @@ func init() { // AssetSuricata returns asset data. // This is the base64 encoded gzipped contents of module/suricata. func AssetSuricata() string { - return "eJzsXEuP67YV3t9fod2sYjRpExSzKLpJFwXaLgJ0SxyTRxJjvi5J2eP++kKyxyNbpCw+MEWTO6uBZ/jxvHhePPR3zQHPr40bLKfg4UvTeO4Fvja/fHzC0FHLjedavTZ/+dI0TfMPzQaBTatt04Nigquu8T02P//75+bvv/zrn43QnWuM1WygyJr9+Ya3+9I0LUfB3OuE9F2jQOIdBeOPPxt8bTqrB3P9JEDF+PO3CatprZYTBe/7TKQI3TUtF7i7/vt84/nmeMTbZ6G9V/af0YBvRlt/YXchjNmCRyoeKFGejBTc/fmdqAOeT9qy29+CGGAMMVZ7TbTlXT6Opya4+FEyMZ7ukEgr4E4Km8n5gDGujwDstRYI6hnAjQ7iaRkpQA9lpLizKgTw4B+NJJGJmTgKNWOdL+Om5Vni+FgukKtWV7JX18P3ZQIZCRp/i6CA4LAUuQHfX5buxl+fqu+NcBbZQGjVfYIFOa8txmjYqPoOTMz6th6FHn748acyTiT7sVAU/D/Z2r5bGzRwTqWJh4W4tt/XM3SejOEpuD5E4IW8cR1XMEa93d3y4C7O0pxNnB4sxQ34U1RLBFfoT9oedt6Cchu2oGAIVWEWnst5lAAPh82n/M/WhXWoHpdnx+OlIaU6fFsDY8VDbsVgHzljJkSpG/VeFKy2VLNCGWRQfwtTQp+W3CfkiCiBP7KfHXU9+CErEbnzcsnHb+7jnp3ByQ8HVLZV4L33tZLqVXGthZyRhp1FZ7RyuLvA3PMUs1Vk3CKNpXobzXXcngwOLYEOF452CwMfi3djhcMViGebTpGD6thJTSHdYos2X+xfB3R+N4HYGU5kx8HGSF4VkBWbJdNr5zMz1XEbpiXwpxmaQNX5WPG23Vb3mp13+7NHt0lTEn2vY85xs6oeUNY2pFr5SP2eVnlzic6DTPVkL3+9rXx54scU4S3Qki6DwEiil+7KKHjstD0XZuB4RMt9DGVN4VPbZbcAiPrBY0HE78qKNt4p8IMtzBmAjuEuW1J68FTLpwHjRmxOhnUDWXR9ss1M8HjACcHFAB/iCjmidSFxblTKjFfd+hNYLEX8OBL2GA1WvyGO5zlRrSKJglk5Z3nCO6BVKIgBesBAz22LI1iAMasDDZQsKN7mg90yFjPGoAIPx9DHc8w8sYdiVRpRgby9jCSJksJyWZraJMrBLWWdwljLBZKpjVeVPW1QkRG7zJgYthYerzAKBQ9vZAQlPS8+gdwc/xSFCJMXJ/A+/dNDkLpt9M3Ua6GTqKpgWQTnUO5FoOO7FW0muWXD9pvknh9XoU9Vj4OnY0Wb60Vmbp9VcGblHhEl2o5IzZCg8hi6nMgG1KFUKg1teaWaisCpXDs6CSBxz7UNxBkIBI1PD2AhkRbltw4HpkkLPHxak0TkFLmY9ZSllWpNObSeMPAw2aIAM5FZSiS6MfEujYUXuZXSclahm/V8OSltJYgqYgpc2H+asc+ZO4LgjNAe6cENslj5U5CraKVXy6yjQ8Gdr6G80HxCoqC8RZCEofE9sQi0L/YOt6zgTKqYxgyvK1bAu4NhvG1J8JItDU9pEsxd0qoBqlnldgYcO2IOngSv79NYvG9M52GUJwgW8oQ88+SmlIajAEW+cvW1EOdFDUK8FIIIP6JEQfKLjUG5wVxG/sId/62UzkR/8MRrTZyEIMlZsaJQfl1xlrdaJleoFzea9aFGvf8T4YpUoIabtYOab5Tv+QE3Kx3VZH+g//flIVtcVjUVxFX3wI1+rzTPoIFGY2KeCW+1QtpUQZcm9dKI0oPnVhz4/1vdjNaZYjNB36NVWJrVjp65lkNDxD//4YfvYXnNn5TjBQrR8j476YTeQ6kNXbHCc7Of37kX+kRkVzcdtvrkyH5wy0v0NP5G4hy5tmOrYCldB23i0B24McUFHBXaISPGDqoYS+GpDtBFWhalPhZj7c9mLCkrsTiNINdj8WoOhKvy0vmCOPVUysv6EUrCm8BSJ28rE4XSBKZjUnwOGEMEnCvX4KuNgYLLoGDakXwPFMzHUlGC98cZxExdaRLOplPBmHIVkcKdi1QkJ2vKqQ5N0uXXUGtjTDkoXAY7e8kKo2gNraX9C1gtA9hno9wO/ts3b/I7dwB1Tttv75jErTl+PtbH5JT2e2zj80NsXixFMLhzA1oWm0fdOuyoeBlArGGWRsXlOpVYdIMsfRvZctWhNZZHx1a3D4LyQP2bhKG0h9ZHc78tinbD/tfiByS/wh/TegQMWxiEJ5Mhj8mBCBQMzyOA85ar5TBanP4lRA+rfmm7AH6H/M9rkMsQcmTUdxNKoObI9YDTvR/xenW8fG2mfv4AbdPrFuchMM/6fKPrM4e71ZEtzMFX4+lx3DmyJXQlY8MVXrO/K3J1an6N6etD3k06tAiuNNZclVRK70b9oIpFsy2uf5rBzouHi5MfPLjPn4AvHgGGU6PAe+CtadXMNz1+wUiCbwrkwLm+yVLjyUJgiWYmgQvSWr0cNkqC6VFkEbIULr4ZpEtjSnL/y2+jeS7j/wYAAP//9F32EA==" + return "eJzsXEuP47gRvs+v0G1Pa2Q32UXQh9w2hwBJDgvkSpTJksQxX0NSdju/PpDsdssWKYsPdLAz06eGu/mxXqwXi/6xOeD5pXGD5RQ8fGoaz73Al+b3908YOmq58Vyrl+Zvn5qmaf6p2SCwabVtelBMcNU1vsfmt//81vzj93//qxG6c42xmg0UWbM/3/B2n5qm5SiYe5mQfmwUSLyjYPzxZ4MvTWf1YK6fBKgYf/4+YTWt1XKi4G2fiRShu6blAnfXf59vPN8cj3j7LLT3yv4zGvDVaOsv7C6EMVvwSMUDJcqTkYK7P78RdcDzSVt2+1sQA4whxmqviba8y8fx1AQXP0omxtMdEmkF3ElhMznvMMb1EYC91gJBPQO40UE8LSMF6KGMFHdWhQAe/KORJDIxE0ehZqzzZdy0PEsc78sFctXqSvbqevipTCAjQeNvERQQHJYiN+D7y9Ld+OtT9b0SziIbCK26D7Ag57XFGA0bVd+BiVnf1qPQw8+//FrGiWS/FIqC/zdb23drgwbOqTTxsBDX9tt6hs6TMTwF14cIvJA3ruMKxqi3u1se3MVZmrOJ04OluAF/imqJ4Ar9SdvDzltQbsMWFAyhKszCczmPEuDhsPmU/9m6sA7V4/LseLw0pFSHb2tgrHjIrRjsPWfMhCh1o96LgtWWalYogwzqb2FK6NOS+4QcESXwR/azo64HP2QlIndeLvn4zX3cszM4+eGAyrYKvPe+VlK9Kq61kDPSsLPojFYOdxeYe55itoqMW6SxVG+juY7bk8GhJdDhwtFuYeB98W6scLgC8WzTKXJQHTupKaRbbNHmi/3LgM7vJhA7w4nsONgYyasCsmKzZHrtfGamOm7DtAT+NEMTqDofK9622+pes/Nuf/boNmlKou91zDluVtUDytqGVCsfqd+T/BhXhLdAS3oAAiNpWLqjoeCx0/ZcmB/jES33MZQ1dUxNkd0CIOqljgXxuCsrqXinwA+2MKIDHYNRtqT04KmWT935jdic/OcGsujJZJuZ4PFwEIKLAT54fXJE60Li3KiUGa+69SewWIr4fiTsMRpKviKO5xlLrRKGglk5Z3nCO6BVKIgBesBAR2yLI1iAMasD7Y0sKN7mg93yCeN5NNZvAWDo4xlgnthDsSqNqEBWXUaSRElhuSxNbRLl4JayTmGs5QLJ1GSryp42qMiIXWZMDFsLjxcMhYKHVzKCkp4Xn0Bujn+JQoTJixM4q/e5RD0EqdtG30y9FjqJqgqWRXAO5V4E+rFb0WaSW7ZTv0vu+XEV+lT1OHg61pu5XmTm9lkFZ1buEVGi7YjUDAkqj6Grg2xAHUql0tCWF56pCJzKtaOTABL3XNtAnIFA0PjwABYSaVF+63BgmrTAw6c1SUROkYtZT1laqdaUQ+sJAw+TLQowE5mlRKIbE+/SWHiRWyktZxW6986Xk9JWgqgipsB1+ocZ+5y5IwjOCO2RHtwgi5U/BbmKVnq1zDo6FNz5GsoLTQ8kCspbBEkYGt8Ti0D7Yu9wywrOpIppzPC6YgW8ORjG25YEr8DS8JQmwdwlrRqgmlVuZ8CxI+bgSfByPY3F+7ZxHkZ5gmAhT8gzT25KaTgKUOQLV18KcX5QgxA/FIIIP6JEQfKLjUG5wVwG8sL9+K2UzkR/8MRrTZyEIMlZsaJQfl1xlrdaJleoFzea9aFGvf8r4YpUoIabtYOab5Rv+QE3Kx3VZH+g///lIVtcVjUVxFX3wI1+rzTPoIFGY2KeCa+1QtpUQZcm9dKI0oPnVhz4H61uRutMsZmg79EqLM1qR89cy6Eh4l//9PNPsLyET8rxAoVoeZ+ddELvodSGrljhqdaP79wLfSKyq5sOW31yZD+45SV6Gn8jcY5c27FVsJSugzZx6A7cmOICjgrtkBFjB1WMpfBUB+giLYtSH4ux9mczlpSVWJwGhOuxeDUHwlV56XxBnHoq5WX9CCXhVWCpk7eViUJpAtMxKT4HjCECzpVr8NXGQMFlUDDtSL4HCuZjqSjB++MMYqauNAln06lgTLmKSOHORSqSkzXlVIcm6fJrqLUxphwULoOdvWSFUbSG1tL+BayWAeyzUW4H//W7N/nGHUCd0/b1HZO4NcfPx/qYnNJ+j218fojNi6UIBnduQMti86hbhx0VLwOINczSqLhcpxKLbpClLxdbrjq0xvLo2Or2QVAeqH+TMJT20Ppo7rdF0W7Yfy5+3vEZ/pzWI2DYwiA8mQx5TA5EoGB4HgGct1wth9Hi9C8helj1S9sF8A3yP69BLkPIkVHfTSiBmiPXA073fsTr1fHytZn6+fOwTW9PnIfAPOvzja7PHO5WR7YwB1+Np8dx58iW0JWMDVd4a/6myNWp+TWmr89sN+nQIrjSWHNVUim9G/WDKhbNtrj+aQY7Lx4uTn7w4D5/oL14ohdOjQKvdbemVTPf9Pj1Hwm+KZAD5/omS40nC4ElmpkELkhr9XLYKAmmR5FFyFK4+GqQLo0pyf0vvyvmuYz/FwAA//8GEN89" } From 5d077092d3e0aacfecae81ea307a3c6fda748705 Mon Sep 17 00:00:00 2001 From: kaiyan-sheng Date: Thu, 22 Oct 2020 11:34:32 -0600 Subject: [PATCH 10/25] Add max_number_of_messages into aws filebeat fileset vars (#22057) --- x-pack/filebeat/filebeat.reference.yml | 36 +++++++++++++++++++ x-pack/filebeat/module/aws/_meta/config.yml | 36 +++++++++++++++++++ .../module/aws/cloudtrail/config/s3.yml | 4 +++ .../module/aws/cloudtrail/manifest.yml | 2 ++ .../module/aws/cloudwatch/config/s3.yml | 4 +++ .../module/aws/cloudwatch/manifest.yml | 2 ++ x-pack/filebeat/module/aws/ec2/config/s3.yml | 4 +++ x-pack/filebeat/module/aws/ec2/manifest.yml | 2 ++ x-pack/filebeat/module/aws/elb/config/s3.yml | 4 +++ x-pack/filebeat/module/aws/elb/manifest.yml | 2 ++ .../module/aws/s3access/config/s3.yml | 4 +++ .../filebeat/module/aws/s3access/manifest.yml | 2 ++ .../module/aws/vpcflow/config/input.yml | 4 +++ .../filebeat/module/aws/vpcflow/manifest.yml | 2 ++ x-pack/filebeat/modules.d/aws.yml.disabled | 36 +++++++++++++++++++ 15 files changed, 144 insertions(+) diff --git a/x-pack/filebeat/filebeat.reference.yml b/x-pack/filebeat/filebeat.reference.yml index 49ede1c7d243..f10a46aa20e1 100644 --- a/x-pack/filebeat/filebeat.reference.yml +++ b/x-pack/filebeat/filebeat.reference.yml @@ -142,6 +142,12 @@ filebeat.modules: # AWS IAM Role to assume #var.role_arn: arn:aws:iam::123456789012:role/test-mb + # Enabling this option changes the service name from `s3` to `s3-fips` for connecting to the correct service endpoint. + #var.fips_enabled: false + + # The maximum number of messages to return from SQS. Valid values: 1 to 10. + #var.max_number_of_messages: 5 + cloudwatch: enabled: false @@ -176,6 +182,12 @@ filebeat.modules: # AWS IAM Role to assume #var.role_arn: arn:aws:iam::123456789012:role/test-mb + # Enabling this option changes the service name from `s3` to `s3-fips` for connecting to the correct service endpoint. + #var.fips_enabled: false + + # The maximum number of messages to return from SQS. Valid values: 1 to 10. + #var.max_number_of_messages: 5 + ec2: enabled: false @@ -210,6 +222,12 @@ filebeat.modules: # AWS IAM Role to assume #var.role_arn: arn:aws:iam::123456789012:role/test-mb + # Enabling this option changes the service name from `s3` to `s3-fips` for connecting to the correct service endpoint. + #var.fips_enabled: false + + # The maximum number of messages to return from SQS. Valid values: 1 to 10. + #var.max_number_of_messages: 5 + elb: enabled: false @@ -244,6 +262,12 @@ filebeat.modules: # AWS IAM Role to assume #var.role_arn: arn:aws:iam::123456789012:role/test-mb + # Enabling this option changes the service name from `s3` to `s3-fips` for connecting to the correct service endpoint. + #var.fips_enabled: false + + # The maximum number of messages to return from SQS. Valid values: 1 to 10. + #var.max_number_of_messages: 5 + s3access: enabled: false @@ -278,6 +302,12 @@ filebeat.modules: # AWS IAM Role to assume #var.role_arn: arn:aws:iam::123456789012:role/test-mb + # Enabling this option changes the service name from `s3` to `s3-fips` for connecting to the correct service endpoint. + #var.fips_enabled: false + + # The maximum number of messages to return from SQS. Valid values: 1 to 10. + #var.max_number_of_messages: 5 + vpcflow: enabled: false @@ -312,6 +342,12 @@ filebeat.modules: # AWS IAM Role to assume #var.role_arn: arn:aws:iam::123456789012:role/test-mb + # Enabling this option changes the service name from `s3` to `s3-fips` for connecting to the correct service endpoint. + #var.fips_enabled: false + + # The maximum number of messages to return from SQS. Valid values: 1 to 10. + #var.max_number_of_messages: 5 + #-------------------------------- Azure Module -------------------------------- - module: azure # All logs diff --git a/x-pack/filebeat/module/aws/_meta/config.yml b/x-pack/filebeat/module/aws/_meta/config.yml index b7e0c25b6744..e4b521e467f0 100644 --- a/x-pack/filebeat/module/aws/_meta/config.yml +++ b/x-pack/filebeat/module/aws/_meta/config.yml @@ -45,6 +45,12 @@ # AWS IAM Role to assume #var.role_arn: arn:aws:iam::123456789012:role/test-mb + # Enabling this option changes the service name from `s3` to `s3-fips` for connecting to the correct service endpoint. + #var.fips_enabled: false + + # The maximum number of messages to return from SQS. Valid values: 1 to 10. + #var.max_number_of_messages: 5 + cloudwatch: enabled: false @@ -79,6 +85,12 @@ # AWS IAM Role to assume #var.role_arn: arn:aws:iam::123456789012:role/test-mb + # Enabling this option changes the service name from `s3` to `s3-fips` for connecting to the correct service endpoint. + #var.fips_enabled: false + + # The maximum number of messages to return from SQS. Valid values: 1 to 10. + #var.max_number_of_messages: 5 + ec2: enabled: false @@ -113,6 +125,12 @@ # AWS IAM Role to assume #var.role_arn: arn:aws:iam::123456789012:role/test-mb + # Enabling this option changes the service name from `s3` to `s3-fips` for connecting to the correct service endpoint. + #var.fips_enabled: false + + # The maximum number of messages to return from SQS. Valid values: 1 to 10. + #var.max_number_of_messages: 5 + elb: enabled: false @@ -147,6 +165,12 @@ # AWS IAM Role to assume #var.role_arn: arn:aws:iam::123456789012:role/test-mb + # Enabling this option changes the service name from `s3` to `s3-fips` for connecting to the correct service endpoint. + #var.fips_enabled: false + + # The maximum number of messages to return from SQS. Valid values: 1 to 10. + #var.max_number_of_messages: 5 + s3access: enabled: false @@ -181,6 +205,12 @@ # AWS IAM Role to assume #var.role_arn: arn:aws:iam::123456789012:role/test-mb + # Enabling this option changes the service name from `s3` to `s3-fips` for connecting to the correct service endpoint. + #var.fips_enabled: false + + # The maximum number of messages to return from SQS. Valid values: 1 to 10. + #var.max_number_of_messages: 5 + vpcflow: enabled: false @@ -214,3 +244,9 @@ # AWS IAM Role to assume #var.role_arn: arn:aws:iam::123456789012:role/test-mb + + # Enabling this option changes the service name from `s3` to `s3-fips` for connecting to the correct service endpoint. + #var.fips_enabled: false + + # The maximum number of messages to return from SQS. Valid values: 1 to 10. + #var.max_number_of_messages: 5 diff --git a/x-pack/filebeat/module/aws/cloudtrail/config/s3.yml b/x-pack/filebeat/module/aws/cloudtrail/config/s3.yml index ac1caacf21ca..d11da6c6a52e 100644 --- a/x-pack/filebeat/module/aws/cloudtrail/config/s3.yml +++ b/x-pack/filebeat/module/aws/cloudtrail/config/s3.yml @@ -55,6 +55,10 @@ role_arn: {{ .role_arn }} fips_enabled: {{ .fips_enabled }} {{ end }} +{{ if .max_number_of_messages }} +max_number_of_messages: {{ .max_number_of_messages }} +{{ end }} + tags: {{.tags | tojson}} publisher_pipeline.disable_host: {{ inList .tags "forwarded" }} diff --git a/x-pack/filebeat/module/aws/cloudtrail/manifest.yml b/x-pack/filebeat/module/aws/cloudtrail/manifest.yml index 732967ff0b0c..03c7acf1336d 100644 --- a/x-pack/filebeat/module/aws/cloudtrail/manifest.yml +++ b/x-pack/filebeat/module/aws/cloudtrail/manifest.yml @@ -21,6 +21,8 @@ var: default: true - name: process_insight_logs default: true + - name: fips_enabled + - name: max_number_of_messages ingest_pipeline: ingest/pipeline.yml input: config/{{.input}}.yml diff --git a/x-pack/filebeat/module/aws/cloudwatch/config/s3.yml b/x-pack/filebeat/module/aws/cloudwatch/config/s3.yml index bdb0ff350f00..7364f997a656 100644 --- a/x-pack/filebeat/module/aws/cloudwatch/config/s3.yml +++ b/x-pack/filebeat/module/aws/cloudwatch/config/s3.yml @@ -41,6 +41,10 @@ role_arn: {{ .role_arn }} fips_enabled: {{ .fips_enabled }} {{ end }} +{{ if .max_number_of_messages }} +max_number_of_messages: {{ .max_number_of_messages }} +{{ end }} + tags: {{.tags | tojson}} publisher_pipeline.disable_host: {{ inList .tags "forwarded" }} diff --git a/x-pack/filebeat/module/aws/cloudwatch/manifest.yml b/x-pack/filebeat/module/aws/cloudwatch/manifest.yml index 2878c79936de..5d9931b2e40a 100644 --- a/x-pack/filebeat/module/aws/cloudwatch/manifest.yml +++ b/x-pack/filebeat/module/aws/cloudwatch/manifest.yml @@ -15,6 +15,8 @@ var: - name: role_arn - name: tags default: [forwarded] + - name: fips_enabled + - name: max_number_of_messages ingest_pipeline: ingest/pipeline.yml input: config/{{.input}}.yml diff --git a/x-pack/filebeat/module/aws/ec2/config/s3.yml b/x-pack/filebeat/module/aws/ec2/config/s3.yml index bdb0ff350f00..7364f997a656 100644 --- a/x-pack/filebeat/module/aws/ec2/config/s3.yml +++ b/x-pack/filebeat/module/aws/ec2/config/s3.yml @@ -41,6 +41,10 @@ role_arn: {{ .role_arn }} fips_enabled: {{ .fips_enabled }} {{ end }} +{{ if .max_number_of_messages }} +max_number_of_messages: {{ .max_number_of_messages }} +{{ end }} + tags: {{.tags | tojson}} publisher_pipeline.disable_host: {{ inList .tags "forwarded" }} diff --git a/x-pack/filebeat/module/aws/ec2/manifest.yml b/x-pack/filebeat/module/aws/ec2/manifest.yml index 2878c79936de..5d9931b2e40a 100644 --- a/x-pack/filebeat/module/aws/ec2/manifest.yml +++ b/x-pack/filebeat/module/aws/ec2/manifest.yml @@ -15,6 +15,8 @@ var: - name: role_arn - name: tags default: [forwarded] + - name: fips_enabled + - name: max_number_of_messages ingest_pipeline: ingest/pipeline.yml input: config/{{.input}}.yml diff --git a/x-pack/filebeat/module/aws/elb/config/s3.yml b/x-pack/filebeat/module/aws/elb/config/s3.yml index bdb0ff350f00..7364f997a656 100644 --- a/x-pack/filebeat/module/aws/elb/config/s3.yml +++ b/x-pack/filebeat/module/aws/elb/config/s3.yml @@ -41,6 +41,10 @@ role_arn: {{ .role_arn }} fips_enabled: {{ .fips_enabled }} {{ end }} +{{ if .max_number_of_messages }} +max_number_of_messages: {{ .max_number_of_messages }} +{{ end }} + tags: {{.tags | tojson}} publisher_pipeline.disable_host: {{ inList .tags "forwarded" }} diff --git a/x-pack/filebeat/module/aws/elb/manifest.yml b/x-pack/filebeat/module/aws/elb/manifest.yml index f823ccbacce3..dc95f6abb7ec 100644 --- a/x-pack/filebeat/module/aws/elb/manifest.yml +++ b/x-pack/filebeat/module/aws/elb/manifest.yml @@ -15,6 +15,8 @@ var: - name: role_arn - name: tags default: [forwarded] + - name: fips_enabled + - name: max_number_of_messages ingest_pipeline: ingest/pipeline.yml input: config/{{.input}}.yml diff --git a/x-pack/filebeat/module/aws/s3access/config/s3.yml b/x-pack/filebeat/module/aws/s3access/config/s3.yml index bdb0ff350f00..7364f997a656 100644 --- a/x-pack/filebeat/module/aws/s3access/config/s3.yml +++ b/x-pack/filebeat/module/aws/s3access/config/s3.yml @@ -41,6 +41,10 @@ role_arn: {{ .role_arn }} fips_enabled: {{ .fips_enabled }} {{ end }} +{{ if .max_number_of_messages }} +max_number_of_messages: {{ .max_number_of_messages }} +{{ end }} + tags: {{.tags | tojson}} publisher_pipeline.disable_host: {{ inList .tags "forwarded" }} diff --git a/x-pack/filebeat/module/aws/s3access/manifest.yml b/x-pack/filebeat/module/aws/s3access/manifest.yml index 2878c79936de..5d9931b2e40a 100644 --- a/x-pack/filebeat/module/aws/s3access/manifest.yml +++ b/x-pack/filebeat/module/aws/s3access/manifest.yml @@ -15,6 +15,8 @@ var: - name: role_arn - name: tags default: [forwarded] + - name: fips_enabled + - name: max_number_of_messages ingest_pipeline: ingest/pipeline.yml input: config/{{.input}}.yml diff --git a/x-pack/filebeat/module/aws/vpcflow/config/input.yml b/x-pack/filebeat/module/aws/vpcflow/config/input.yml index 628196b7d3ee..de4affbd694c 100644 --- a/x-pack/filebeat/module/aws/vpcflow/config/input.yml +++ b/x-pack/filebeat/module/aws/vpcflow/config/input.yml @@ -43,6 +43,10 @@ role_arn: {{ .role_arn }} fips_enabled: {{ .fips_enabled }} {{ end }} +{{ if .max_number_of_messages }} +max_number_of_messages: {{ .max_number_of_messages }} +{{ end }} + {{ else if eq .input "file" }} type: log diff --git a/x-pack/filebeat/module/aws/vpcflow/manifest.yml b/x-pack/filebeat/module/aws/vpcflow/manifest.yml index c7df14a4050d..19f40c7a3f71 100644 --- a/x-pack/filebeat/module/aws/vpcflow/manifest.yml +++ b/x-pack/filebeat/module/aws/vpcflow/manifest.yml @@ -15,6 +15,8 @@ var: - name: role_arn - name: tags default: [forwarded] + - name: fips_enabled + - name: max_number_of_messages ingest_pipeline: ingest/pipeline.yml input: config/input.yml diff --git a/x-pack/filebeat/modules.d/aws.yml.disabled b/x-pack/filebeat/modules.d/aws.yml.disabled index 0fe8465211b3..f3d2ac1f7c9c 100644 --- a/x-pack/filebeat/modules.d/aws.yml.disabled +++ b/x-pack/filebeat/modules.d/aws.yml.disabled @@ -48,6 +48,12 @@ # AWS IAM Role to assume #var.role_arn: arn:aws:iam::123456789012:role/test-mb + # Enabling this option changes the service name from `s3` to `s3-fips` for connecting to the correct service endpoint. + #var.fips_enabled: false + + # The maximum number of messages to return from SQS. Valid values: 1 to 10. + #var.max_number_of_messages: 5 + cloudwatch: enabled: false @@ -82,6 +88,12 @@ # AWS IAM Role to assume #var.role_arn: arn:aws:iam::123456789012:role/test-mb + # Enabling this option changes the service name from `s3` to `s3-fips` for connecting to the correct service endpoint. + #var.fips_enabled: false + + # The maximum number of messages to return from SQS. Valid values: 1 to 10. + #var.max_number_of_messages: 5 + ec2: enabled: false @@ -116,6 +128,12 @@ # AWS IAM Role to assume #var.role_arn: arn:aws:iam::123456789012:role/test-mb + # Enabling this option changes the service name from `s3` to `s3-fips` for connecting to the correct service endpoint. + #var.fips_enabled: false + + # The maximum number of messages to return from SQS. Valid values: 1 to 10. + #var.max_number_of_messages: 5 + elb: enabled: false @@ -150,6 +168,12 @@ # AWS IAM Role to assume #var.role_arn: arn:aws:iam::123456789012:role/test-mb + # Enabling this option changes the service name from `s3` to `s3-fips` for connecting to the correct service endpoint. + #var.fips_enabled: false + + # The maximum number of messages to return from SQS. Valid values: 1 to 10. + #var.max_number_of_messages: 5 + s3access: enabled: false @@ -184,6 +208,12 @@ # AWS IAM Role to assume #var.role_arn: arn:aws:iam::123456789012:role/test-mb + # Enabling this option changes the service name from `s3` to `s3-fips` for connecting to the correct service endpoint. + #var.fips_enabled: false + + # The maximum number of messages to return from SQS. Valid values: 1 to 10. + #var.max_number_of_messages: 5 + vpcflow: enabled: false @@ -217,3 +247,9 @@ # AWS IAM Role to assume #var.role_arn: arn:aws:iam::123456789012:role/test-mb + + # Enabling this option changes the service name from `s3` to `s3-fips` for connecting to the correct service endpoint. + #var.fips_enabled: false + + # The maximum number of messages to return from SQS. Valid values: 1 to 10. + #var.max_number_of_messages: 5 From cc2217ced1dd549dbbed0abbd048caac6150ecf7 Mon Sep 17 00:00:00 2001 From: kaiyan-sheng Date: Thu, 22 Oct 2020 12:45:17 -0600 Subject: [PATCH 11/25] Check context.Canceled and fix s3 input config (#22036) --- .../_meta/config/filebeat.inputs.reference.xpack.yml.tmpl | 4 ++-- x-pack/filebeat/filebeat.reference.yml | 4 ++-- x-pack/filebeat/input/s3/collector.go | 2 ++ x-pack/filebeat/input/s3/input.go | 8 +++++++- 4 files changed, 13 insertions(+), 5 deletions(-) diff --git a/x-pack/filebeat/_meta/config/filebeat.inputs.reference.xpack.yml.tmpl b/x-pack/filebeat/_meta/config/filebeat.inputs.reference.xpack.yml.tmpl index 16964b2c84eb..f083b4c814b6 100644 --- a/x-pack/filebeat/_meta/config/filebeat.inputs.reference.xpack.yml.tmpl +++ b/x-pack/filebeat/_meta/config/filebeat.inputs.reference.xpack.yml.tmpl @@ -67,8 +67,8 @@ #session_token: '${AWS_SESSION_TOKEN:"”}' #credential_profile_name: test-s3-input - # Queue urls (required) to receive queue messages from - #queue_urls: ["https://sqs.us-east-1.amazonaws.com/1234/test-s3-logs-queue"] + # Queue url (required) to receive queue messages from + #queue_url: "https://sqs.us-east-1.amazonaws.com/1234/test-s3-logs-queue" # The duration (in seconds) that the received messages are hidden from subsequent # retrieve requests after being retrieved by a ReceiveMessage request. diff --git a/x-pack/filebeat/filebeat.reference.yml b/x-pack/filebeat/filebeat.reference.yml index f10a46aa20e1..80bfacbf2c33 100644 --- a/x-pack/filebeat/filebeat.reference.yml +++ b/x-pack/filebeat/filebeat.reference.yml @@ -2410,8 +2410,8 @@ filebeat.inputs: #session_token: '${AWS_SESSION_TOKEN:"”}' #credential_profile_name: test-s3-input - # Queue urls (required) to receive queue messages from - #queue_urls: ["https://sqs.us-east-1.amazonaws.com/1234/test-s3-logs-queue"] + # Queue url (required) to receive queue messages from + #queue_url: "https://sqs.us-east-1.amazonaws.com/1234/test-s3-logs-queue" # The duration (in seconds) that the received messages are hidden from subsequent # retrieve requests after being retrieved by a ReceiveMessage request. diff --git a/x-pack/filebeat/input/s3/collector.go b/x-pack/filebeat/input/s3/collector.go index c3d3114c723b..9596b5ab23f3 100644 --- a/x-pack/filebeat/input/s3/collector.go +++ b/x-pack/filebeat/input/s3/collector.go @@ -153,8 +153,10 @@ func (c *s3Collector) processorKeepAlive(svcSQS sqsiface.ClientAPI, message sqs. for { select { case <-c.cancellation.Done(): + fmt.Println("------- c.cancellation.Done()") return nil case err := <-errC: + fmt.Println("------- err = ", err) if err != nil { if err == context.DeadlineExceeded { c.logger.Info("Context deadline exceeded, updating visibility timeout") diff --git a/x-pack/filebeat/input/s3/input.go b/x-pack/filebeat/input/s3/input.go index 36f160d759e5..a3f19f66327c 100644 --- a/x-pack/filebeat/input/s3/input.go +++ b/x-pack/filebeat/input/s3/input.go @@ -5,6 +5,7 @@ package s3 import ( + "context" "fmt" "github.com/aws/aws-sdk-go-v2/service/s3" @@ -67,7 +68,12 @@ func (in *s3Input) Run(ctx v2.Context, pipeline beat.Pipeline) error { defer collector.publisher.Close() collector.run() - return ctx.Cancelation.Err() + + if ctx.Cancelation.Err() == context.Canceled { + return nil + } else { + return ctx.Cancelation.Err() + } } func (in *s3Input) createCollector(ctx v2.Context, pipeline beat.Pipeline) (*s3Collector, error) { From f33bfd9b5be3b1f9287b22c575d7f9a057eebb96 Mon Sep 17 00:00:00 2001 From: Brandon Morelli Date: Thu, 22 Oct 2020 12:46:36 -0700 Subject: [PATCH 12/25] docs: move kerberos include (#22109) --- auditbeat/docs/configuring-howto.asciidoc | 4 ++++ filebeat/docs/configuring-howto.asciidoc | 4 ++++ heartbeat/docs/configuring-howto.asciidoc | 4 ++++ journalbeat/docs/configuring-howto.asciidoc | 4 ++++ libbeat/docs/outputs-list.asciidoc | 4 ---- metricbeat/docs/configuring-howto.asciidoc | 4 ++++ packetbeat/docs/configuring-howto.asciidoc | 4 ++++ winlogbeat/docs/configuring-howto.asciidoc | 4 ++++ x-pack/functionbeat/docs/configuring-howto.asciidoc | 4 ++++ 9 files changed, 32 insertions(+), 4 deletions(-) diff --git a/auditbeat/docs/configuring-howto.asciidoc b/auditbeat/docs/configuring-howto.asciidoc index 745c58c79976..65938efb9c7e 100644 --- a/auditbeat/docs/configuring-howto.asciidoc +++ b/auditbeat/docs/configuring-howto.asciidoc @@ -42,6 +42,10 @@ include::./reload-configuration.asciidoc[] include::{libbeat-dir}/outputconfig.asciidoc[] +ifndef::no_kerberos[] +include::{libbeat-dir}/shared-kerberos-config.asciidoc[] +endif::[] + include::{libbeat-dir}/shared-ssl-config.asciidoc[] include::{libbeat-dir}/shared-ilm.asciidoc[] diff --git a/filebeat/docs/configuring-howto.asciidoc b/filebeat/docs/configuring-howto.asciidoc index ec70fe239429..f09902a0d261 100644 --- a/filebeat/docs/configuring-howto.asciidoc +++ b/filebeat/docs/configuring-howto.asciidoc @@ -44,6 +44,10 @@ include::./reload-configuration.asciidoc[] include::{libbeat-dir}/outputconfig.asciidoc[] +ifndef::no_kerberos[] +include::{libbeat-dir}/shared-kerberos-config.asciidoc[] +endif::[] + include::{libbeat-dir}/shared-ssl-config.asciidoc[] include::../../libbeat/docs/shared-ilm.asciidoc[] diff --git a/heartbeat/docs/configuring-howto.asciidoc b/heartbeat/docs/configuring-howto.asciidoc index f562b8a42c1c..fa312e5d4acc 100644 --- a/heartbeat/docs/configuring-howto.asciidoc +++ b/heartbeat/docs/configuring-howto.asciidoc @@ -38,6 +38,10 @@ include::{libbeat-dir}/shared-path-config.asciidoc[] include::{libbeat-dir}/outputconfig.asciidoc[] +ifndef::no_kerberos[] +include::{libbeat-dir}/shared-kerberos-config.asciidoc[] +endif::[] + include::{libbeat-dir}/shared-ssl-config.asciidoc[] include::{libbeat-dir}/shared-ilm.asciidoc[] diff --git a/journalbeat/docs/configuring-howto.asciidoc b/journalbeat/docs/configuring-howto.asciidoc index 93083ac4cccb..246880468e33 100644 --- a/journalbeat/docs/configuring-howto.asciidoc +++ b/journalbeat/docs/configuring-howto.asciidoc @@ -34,6 +34,10 @@ include::{libbeat-dir}/shared-path-config.asciidoc[] include::{libbeat-dir}/outputconfig.asciidoc[] +ifndef::no_kerberos[] +include::{libbeat-dir}/shared-kerberos-config.asciidoc[] +endif::[] + include::{libbeat-dir}/shared-ssl-config.asciidoc[] include::{libbeat-dir}/shared-ilm.asciidoc[] diff --git a/libbeat/docs/outputs-list.asciidoc b/libbeat/docs/outputs-list.asciidoc index bd3b2878aa6b..4181c10f64f6 100644 --- a/libbeat/docs/outputs-list.asciidoc +++ b/libbeat/docs/outputs-list.asciidoc @@ -83,9 +83,5 @@ ifdef::requires_xpack[] endif::[] include::{libbeat-outputs-dir}/codec/docs/codec.asciidoc[] endif::[] -ifndef::no_kerberos[] -include::{libbeat-dir}/shared-kerberos-config.asciidoc[] -endif::[] - //# end::outputs-include[] diff --git a/metricbeat/docs/configuring-howto.asciidoc b/metricbeat/docs/configuring-howto.asciidoc index 60f8928df53d..dcacba01f790 100644 --- a/metricbeat/docs/configuring-howto.asciidoc +++ b/metricbeat/docs/configuring-howto.asciidoc @@ -40,6 +40,10 @@ include::{docdir}/../docs/reload-configuration.asciidoc[] include::{libbeat-dir}/outputconfig.asciidoc[] +ifndef::no_kerberos[] +include::{libbeat-dir}/shared-kerberos-config.asciidoc[] +endif::[] + include::{libbeat-dir}/shared-ssl-config.asciidoc[] include::{libbeat-dir}/shared-ilm.asciidoc[] diff --git a/packetbeat/docs/configuring-howto.asciidoc b/packetbeat/docs/configuring-howto.asciidoc index cc9e3c9a9268..8d27edbafd73 100644 --- a/packetbeat/docs/configuring-howto.asciidoc +++ b/packetbeat/docs/configuring-howto.asciidoc @@ -38,6 +38,10 @@ include::{libbeat-dir}/shared-path-config.asciidoc[] include::{libbeat-dir}/outputconfig.asciidoc[] +ifndef::no_kerberos[] +include::{libbeat-dir}/shared-kerberos-config.asciidoc[] +endif::[] + include::{libbeat-dir}/shared-ssl-config.asciidoc[] include::{libbeat-dir}/shared-ilm.asciidoc[] diff --git a/winlogbeat/docs/configuring-howto.asciidoc b/winlogbeat/docs/configuring-howto.asciidoc index 5c1c6086acee..5d9d4758cf8f 100644 --- a/winlogbeat/docs/configuring-howto.asciidoc +++ b/winlogbeat/docs/configuring-howto.asciidoc @@ -35,6 +35,10 @@ include::{libbeat-dir}/shared-path-config.asciidoc[] include::{libbeat-dir}/outputconfig.asciidoc[] +ifndef::no_kerberos[] +include::{libbeat-dir}/shared-kerberos-config.asciidoc[] +endif::[] + include::{libbeat-dir}/shared-ssl-config.asciidoc[] include::{libbeat-dir}/shared-ilm.asciidoc[] diff --git a/x-pack/functionbeat/docs/configuring-howto.asciidoc b/x-pack/functionbeat/docs/configuring-howto.asciidoc index 192cb79fea38..3d72f9b5a55c 100644 --- a/x-pack/functionbeat/docs/configuring-howto.asciidoc +++ b/x-pack/functionbeat/docs/configuring-howto.asciidoc @@ -35,6 +35,10 @@ include::./general-options.asciidoc[] [role="xpack"] include::{libbeat-dir}/outputconfig.asciidoc[] +ifndef::no_kerberos[] +include::{libbeat-dir}/shared-kerberos-config.asciidoc[] +endif::[] + [role="xpack"] include::{libbeat-dir}/shared-ssl-config.asciidoc[] From 2e7b90217e54016d9613ae0a1f58cba8a82cba00 Mon Sep 17 00:00:00 2001 From: Fae Charlton Date: Thu, 22 Oct 2020 15:57:56 -0400 Subject: [PATCH 13/25] [libbeat] Add more disk queue unit tests and fix a size-check bug (#22107) --- .../publisher/queue/diskqueue/core_loop.go | 24 +- .../queue/diskqueue/core_loop_test.go | 623 ++++++++++++++++-- libbeat/publisher/queue/diskqueue/queue.go | 5 + 3 files changed, 594 insertions(+), 58 deletions(-) diff --git a/libbeat/publisher/queue/diskqueue/core_loop.go b/libbeat/publisher/queue/diskqueue/core_loop.go index 77f4aadb47f5..ac6e22c52d86 100644 --- a/libbeat/publisher/queue/diskqueue/core_loop.go +++ b/libbeat/publisher/queue/diskqueue/core_loop.go @@ -93,10 +93,10 @@ func (dq *diskQueue) handleProducerWriteRequest(request producerWriteRequest) { // than an entire segment all by itself (as long as it isn't, it is // guaranteed to eventually enter the queue assuming no disk errors). frameSize := request.frame.sizeOnDisk() - if dq.settings.MaxSegmentSize < frameSize { + if dq.settings.maxSegmentOffset() < segmentOffset(frameSize) { dq.logger.Warnf( - "Rejecting event with size %v because the maximum segment size is %v", - frameSize, dq.settings.MaxSegmentSize) + "Rejecting event with size %v because the segment buffer limit is %v", + frameSize, dq.settings.maxSegmentOffset()) request.responseChan <- false return } @@ -326,13 +326,19 @@ func (dq *diskQueue) maybeWritePending() { // Nothing to do right now return } + // Remove everything from pendingFrames and forward it to the writer loop. frames := dq.pendingFrames dq.pendingFrames = nil + dq.writerLoop.requestChan <- writerLoopRequest{frames: frames} - dq.writerLoop.requestChan <- writerLoopRequest{ - frames: frames, + // Compute the size of the request so we know how full the queue is going + // to be. + totalSize := uint64(0) + for _, sf := range frames { + totalSize += sf.frame.sizeOnDisk() } + dq.writeRequestSize = totalSize dq.writing = true } @@ -471,8 +477,12 @@ func (dq *diskQueue) canAcceptFrameOfSize(frameSize uint64) bool { // left in the queue after accounting for the existing segments and the // pending writes that were already accepted. pendingBytes := uint64(0) - for _, request := range dq.pendingFrames { - pendingBytes += request.frame.sizeOnDisk() + for _, sf := range dq.pendingFrames { + pendingBytes += sf.frame.sizeOnDisk() + } + // If a writing request is outstanding, include it in the size total. + if dq.writing { + pendingBytes += dq.writeRequestSize } currentSize := pendingBytes + dq.segments.sizeOnDisk() diff --git a/libbeat/publisher/queue/diskqueue/core_loop_test.go b/libbeat/publisher/queue/diskqueue/core_loop_test.go index 309a145968d5..1eb9ff54a157 100644 --- a/libbeat/publisher/queue/diskqueue/core_loop_test.go +++ b/libbeat/publisher/queue/diskqueue/core_loop_test.go @@ -24,76 +24,267 @@ import ( "github.com/elastic/beats/v7/libbeat/logp" ) -func TestProducerWriteRequest(t *testing.T) { - dq := &diskQueue{settings: DefaultSettings()} - frame := &writeFrame{ - serialized: make([]byte, 100), - } - request := producerWriteRequest{ - frame: frame, - shouldBlock: true, - responseChan: make(chan bool, 1), - } - dq.handleProducerWriteRequest(request) - - // The request inserts 100 bytes into an empty queue, so it should succeed. - // We expect: - // - the response channel should contain the value true - // - the frame should be added to pendingFrames and assigned to - // segment 0. - success, ok := <-request.responseChan - if !ok { - t.Error("Expected a response from the producer write request.") - } - if !success { - t.Error("Expected write request to succeed") - } +func TestHandleProducerWriteRequest(t *testing.T) { + // handleProducerWriteRequest should: + // - Immediately reject any frame larger than settings.MaxSegmentSize. + // - If dq.blockedProducers is nonempty (indicating that other frames are + // already waiting for empty space in the queue), or the queue doesn't + // have room for the new frame (see canAcceptFrameOfSize), then it is + // appended to blockedProducers if request.shouldBlock is true, and + // otherwise is rejected immediately. + // - Otherwise, the request is assigned a target segment and appended + // to pendingFrames. + // * If the frame fits in the current writing segment, it is assigned + // to that segment. Otherwise, it is assigned to segments.nextID + // and segments.nextID is incremented (see enqueueWriteFrame). - if len(dq.pendingFrames) != 1 { - t.Error("Expected 1 pending frame after a write request.") - } - if dq.pendingFrames[0].frame != frame { - t.Error("Expected pendingFrames to contain the new frame.") + // For this test setup, the queue is initialized with a max segment + // offset of 1000 and a max total size of 10000. + testCases := map[string]struct { + // The segment structure to start with before calling + // handleProducerWriteRequest + segments diskQueueSegments + + // Whether the blockedProducers list should be nonempty in the + // initial queue state. + blockedProducers bool + + // The size of the frame to send in the producer write request + frameSize int + + // The value to set shouldBlock to in the producer write request + shouldBlock bool + + // The result we expect on the requests's response channel, or + // nil if there should be none. + expectedResult *bool + + // The segment the frame should be assigned to in pendingFrames. + // This is ignored unless expectedResult is &true. + expectedSegment segmentID + }{ + "accept single frame when empty": { + segments: diskQueueSegments{nextID: 5}, + frameSize: 1000, + shouldBlock: false, + expectedResult: boolRef(true), + expectedSegment: 5, + }, + "reject immediately when frame is larger than segment limit": { + // max segment buffer size for the test wrapper is 1000. + frameSize: 1001, + shouldBlock: true, + expectedResult: boolRef(false), + }, + "accept with frame in new segment if current segment is full": { + segments: diskQueueSegments{ + writing: []*queueSegment{{}}, + nextWriteOffset: 600, + nextID: 1, + }, + frameSize: 500, + shouldBlock: false, + expectedResult: boolRef(true), + expectedSegment: 1, + }, + "reject when full and shouldBlock=false": { + segments: diskQueueSegments{ + reading: []*queueSegment{ + {endOffset: 9600}, + }, + }, + frameSize: 500, + shouldBlock: false, + expectedResult: boolRef(false), + }, + "block when full and shouldBlock=true": { + segments: diskQueueSegments{ + reading: []*queueSegment{ + {endOffset: 9600}, + }, + }, + frameSize: 500, + shouldBlock: true, + expectedResult: nil, + }, + "reject when blockedProducers is nonempty and shouldBlock=false": { + blockedProducers: true, + frameSize: 500, + shouldBlock: false, + expectedResult: boolRef(false), + }, + "block when blockedProducers is nonempty and shouldBlock=true": { + blockedProducers: true, + frameSize: 500, + shouldBlock: true, + expectedResult: nil, + }, } - if dq.pendingFrames[0].segment.id != 0 { - t.Error("Expected new frame to be assigned to segment 0.") + + settings := DefaultSettings() + settings.MaxSegmentSize = 1000 + segmentHeaderSize + settings.MaxBufferSize = 10000 + for description, test := range testCases { + dq := &diskQueue{ + logger: logp.L(), + settings: settings, + segments: test.segments, + } + if test.blockedProducers { + // Set an empty placeholder write request + dq.blockedProducers = []producerWriteRequest{{}} + } + initialBlockedProducerCount := len(dq.blockedProducers) + + // Construct a frame of the requested size. We subtract the + // metadata size from the buffer length, so test.frameSize + // corresponds to the "real" on-disk size of the frame. + request := producerWriteRequest{ + frame: makeWriteFrameWithSize(test.frameSize), + shouldBlock: test.shouldBlock, + responseChan: make(chan bool, 1), + } + + dq.handleProducerWriteRequest(request) + + var result *bool + select { + case r := <-request.responseChan: + result = &r + default: + // No response, result can stay nil. + } + + // Check that the result itself is correct. + if result != nil && test.expectedResult != nil { + if *result != *test.expectedResult { + t.Errorf("%s: expected response %v, got %v", + description, *test.expectedResult, *result) + } + } else if result == nil && test.expectedResult != nil { + t.Errorf("%s: expected response %v, got none", + description, *test.expectedResult) + } else if result != nil && test.expectedResult == nil { + t.Errorf("%s: expected no response, got %v", + description, *result) + } + // Check whether the request was added to blockedProducers. + if test.expectedResult != nil && + len(dq.blockedProducers) > initialBlockedProducerCount { + // Requests with responses shouldn't be added to + // blockedProducers. + t.Errorf("%s: request shouldn't be added to blockedProducers", + description) + } else if test.expectedResult == nil && + len(dq.blockedProducers) <= initialBlockedProducerCount { + // Requests without responses should be added to + // blockedProducers. + t.Errorf("%s: request should be added to blockedProducers", + description) + } + // Check whether the frame was added to pendingFrames. + var lastPendingFrame *segmentedFrame + if len(dq.pendingFrames) != 0 { + lastPendingFrame = &dq.pendingFrames[len(dq.pendingFrames)-1] + } + if test.expectedResult != nil && *test.expectedResult { + // If the result is success, the frame should now be + // enqueued. + if lastPendingFrame == nil || + lastPendingFrame.frame != request.frame { + t.Errorf("%s: frame should be added to pendingFrames", + description) + } else if lastPendingFrame.segment.id != test.expectedSegment { + t.Errorf("%s: expected frame to be in segment %v, got %v", + description, test.expectedSegment, + lastPendingFrame.segment.id) + } + // Check that segments.nextID is one more than the segment that + // was just assigned. + if lastPendingFrame != nil && + dq.segments.nextID != test.expectedSegment+1 { + t.Errorf("%s: expected segments.nextID to be %v, got %v", + description, test.expectedSegment+1, dq.segments.nextID) + } + } } } func TestHandleWriterLoopResponse(t *testing.T) { - // Initialize the queue with two writing segments only. + // handleWriterLoopResponse should: + // - Add the values in the bytesWritten array, in order, to the endOffset + // of the segments in segments.writing (these represent the amount + // written to each segment as a result of the preceding writer loop + // request). + // - If bytesWritten covers more than one writing segment, then move + // all except the last one from segments.writing to segments.reading. + // These invariants are relatively simple so this test is "by hand" + // rather than using a structured list of sub-cases. + dq := &diskQueue{ settings: DefaultSettings(), segments: diskQueueSegments{ writing: []*queueSegment{ - {id: 1}, + {id: 1, endOffset: 100}, {id: 2}, + {id: 3}, + {id: 4}, }, }, } - // This response says that the writer loop wrote 200 bytes to the first - // segment and 100 bytes to the second. + + // Write to one segment (no segments should be moved to reading list) dq.handleWriterLoopResponse(writerLoopResponse{ - bytesWritten: []int64{200, 100}, + bytesWritten: []int64{100}, }) - - // After the response is handled, we expect: - // - Each segment's endOffset should be incremented by the bytes written - // - Segment 1 should be moved to the reading list (because all but the - // last segment in a writer loop response has been closed) - // - Segment 2 should remain in the writing list - if len(dq.segments.reading) != 1 || dq.segments.reading[0].id != 1 { - t.Error("Expected segment 1 to move to the reading list") + if len(dq.segments.writing) != 4 || len(dq.segments.reading) != 0 { + t.Fatalf("expected 4 writing and 0 reading segments, got %v writing "+ + "and %v reading", len(dq.segments.writing), len(dq.segments.reading)) } - if len(dq.segments.writing) != 1 || dq.segments.writing[0].id != 2 { - t.Error("Expected segment 2 to remain in the writing list") + if dq.segments.writing[0].endOffset != 200 { + t.Errorf("expected first writing segment to be size 200, got %v", + dq.segments.writing[0].endOffset) + } + + // Write to two segments (the first one should be moved to reading list) + dq.handleWriterLoopResponse(writerLoopResponse{ + bytesWritten: []int64{100, 100}, + }) + if len(dq.segments.writing) != 3 || len(dq.segments.reading) != 1 { + t.Fatalf("expected 3 writing and 1 reading segments, got %v writing "+ + "and %v reading", len(dq.segments.writing), len(dq.segments.reading)) } - if dq.segments.reading[0].endOffset != 200 { - t.Errorf("Expected segment 1 endOffset 200, got %d", + if dq.segments.reading[0].endOffset != 300 { + t.Errorf("expected first reading segment to be size 300, got %v", dq.segments.reading[0].endOffset) } if dq.segments.writing[0].endOffset != 100 { - t.Errorf("Expected segment 2 endOffset 100, got %d", + t.Errorf("expected first writing segment to be size 100, got %v", + dq.segments.writing[0].endOffset) + } + + // Write to three segments (the first two should be moved to reading list) + dq.handleWriterLoopResponse(writerLoopResponse{ + bytesWritten: []int64{100, 100, 500}, + }) + if len(dq.segments.writing) != 1 || len(dq.segments.reading) != 3 { + t.Fatalf("expected 1 writing and 3 reading segments, got %v writing "+ + "and %v reading", len(dq.segments.writing), len(dq.segments.reading)) + } + if dq.segments.reading[0].endOffset != 300 { + t.Errorf("expected first reading segment to be size 300, got %v", + dq.segments.reading[0].endOffset) + } + if dq.segments.reading[1].endOffset != 200 { + t.Errorf("expected second reading segment to be size 200, got %v", + dq.segments.reading[1].endOffset) + } + if dq.segments.reading[2].endOffset != 100 { + t.Errorf("expected third reading segment to be size 100, got %v", + dq.segments.reading[2].endOffset) + } + if dq.segments.writing[0].endOffset != 500 { + t.Errorf("expected first writing segment to be size 500, got %v", dq.segments.writing[0].endOffset) } } @@ -111,7 +302,8 @@ func TestHandleReaderLoopResponse(t *testing.T) { // mark the remaining data as processed) testCases := map[string]struct { - // The segment structure to start with before calling maybeReadPending + // The segment structure to start with before calling + // handleReaderLoopResponse. segments diskQueueSegments response readerLoopResponse @@ -273,9 +465,10 @@ func TestHandleReaderLoopResponse(t *testing.T) { func TestMaybeReadPending(t *testing.T) { // maybeReadPending should: + // - If diskQueue.reading is true, do nothing and return immediately. // - If any unread data is available in a reading or writing segment, // send a readerLoopRequest for the full amount available in the - // first such segment. + // first such segment, and set diskQueue.reading to true. // - When creating a readerLoopRequest that includes the beginning of // a segment (startOffset == 0), set that segment's firstFrameID // to segments.nextReadFrameID (so ACKs based on frame ID can be linked @@ -287,6 +480,8 @@ func TestMaybeReadPending(t *testing.T) { testCases := map[string]struct { // The segment structure to start with before calling maybeReadPending segments diskQueueSegments + // The value of the diskQueue.reading flag before calling maybeReadPending + reading bool // The request we expect to see on the reader loop's request channel, // or nil if there should be none. expectedRequest *readerLoopRequest @@ -308,6 +503,15 @@ func TestMaybeReadPending(t *testing.T) { endOffset: 1000, }, }, + "do nothing if reading flag is set": { + segments: diskQueueSegments{ + reading: []*queueSegment{ + {id: 1, endOffset: 1000}, + }, + }, + reading: true, + expectedRequest: nil, + }, "read the end of a segment": { segments: diskQueueSegments{ reading: []*queueSegment{ @@ -402,6 +606,7 @@ func TestMaybeReadPending(t *testing.T) { readerLoop: &readerLoop{ requestChan: make(chan readerLoopRequest, 1), }, + reading: test.reading, } firstFrameID := test.segments.nextReadFrameID dq.maybeReadPending() @@ -421,6 +626,10 @@ func TestMaybeReadPending(t *testing.T) { t.Errorf( "%s: maybeReadPending should update firstFrameID", description) } + if !dq.reading { + t.Errorf( + "%s: maybeReadPending should set the reading flag", description) + } default: if test.expectedRequest != nil { t.Errorf("%s: expected read request %v, got none", @@ -446,10 +655,322 @@ func TestMaybeReadPending(t *testing.T) { } } +func TestMaybeWritePending(t *testing.T) { + // maybeWritePending should: + // - If diskQueue.writing is true, do nothing and return immediately. + // - Otherwise, if diskQueue.pendingFrames is nonempty: + // * send its contents as a writer loop request + // * set diskQueue.writeRequestSize to the total size of the + // request's frames + // * reset diskQueue.pendingFrames to nil + // * set diskQueue.writing to true. + dq := &diskQueue{ + settings: DefaultSettings(), + writerLoop: &writerLoop{ + requestChan: make(chan writerLoopRequest, 1), + }, + } + + // First call: pendingFrames is empty, should do nothing. + dq.maybeWritePending() + select { + case request := <-dq.writerLoop.requestChan: + t.Errorf("expected no request on empty pendingFrames, got %v", request) + default: + if dq.writing { + t.Errorf( + "maybeWritePending shouldn't set writing flag without a request") + } + } + + // Set up some frame data for the remaining calls. + pendingFrames := []segmentedFrame{ + {frame: makeWriteFrameWithSize(100)}, + {frame: makeWriteFrameWithSize(200)}} + // The size on disk should be the summed buffer lengths plus + // frameMetadataSize times the number of frames + expectedSize := uint64(300) + + // Second call: writing is true, should do nothing. + dq.pendingFrames = pendingFrames + dq.writing = true + dq.maybeWritePending() + select { + case request := <-dq.writerLoop.requestChan: + t.Errorf("expected no request with writing flag set, got %v", request) + default: + } + + // Third call: writing is false, should send a request with pendingFrames. + dq.writing = false + dq.maybeWritePending() + select { + case request := <-dq.writerLoop.requestChan: + // We are extra strict, because we can afford to be: the request should + // contain not just the same elements, but the exact same array (slice) + // as the previous value of pendingFrames. + if len(request.frames) != len(pendingFrames) || + &request.frames[0] != &pendingFrames[0] { + t.Errorf( + "expected request containing pendingFrames, got a different array") + } + if dq.writeRequestSize != expectedSize { + t.Errorf("expected writeRequestSize to equal %v, got %v", + expectedSize, dq.writeRequestSize) + } + if len(dq.pendingFrames) != 0 { + t.Errorf("pendingFrames should be reset after a write request") + } + if !dq.writing { + t.Errorf("the writing flag should be set after a write request") + } + default: + } +} + +func TestMaybeUnblockProducers(t *testing.T) { + // maybeUnblockProducers should: + // - As long as diskQueue.blockedProducers is nonempty and the queue has + // capacity to add its first element (see TestCanAcceptFrameOfSize): + // * Add the request's frame to diskQueue.pendingFrames (see + // enqueueWriteFrame) + // * Report success (true) to the producer's response channel + // * Remove the request from blockedProducers + // When complete, either blockedProducers should be empty or its first + // element should be too big to add to the queue. + + settings := DefaultSettings() + settings.MaxBufferSize = 1000 + responseChans := []chan bool{ + make(chan bool, 1), make(chan bool, 1), make(chan bool, 1)} + dq := &diskQueue{ + settings: settings, + segments: diskQueueSegments{ + writing: []*queueSegment{segmentWithSize(100)}, + }, + blockedProducers: []producerWriteRequest{ + { + frame: makeWriteFrameWithSize(200), + responseChan: responseChans[0], + }, + { + frame: makeWriteFrameWithSize(200), + responseChan: responseChans[1], + }, + { + frame: makeWriteFrameWithSize(501), + responseChan: responseChans[2], + }, + }, + } + + // First call: we expect two producers to be unblocked, because the third + // one would push us one byte above the 1000 byte limit. + dq.maybeUnblockProducers() + if len(dq.pendingFrames) != 2 || len(dq.blockedProducers) != 1 { + t.Fatalf("Expected 2 pending frames and 1 blocked producer, got %v and %v", + len(dq.pendingFrames), len(dq.blockedProducers)) + } + for i := 0; i < 3; i++ { + select { + case response := <-responseChans[i]: + if i < 2 && !response { + t.Errorf("Expected success response for producer %v, got failure", i) + } else if i == 2 { + t.Fatalf("Expected no response for producer 2, got %v", response) + } + default: + if i < 2 { + t.Errorf("Expected success response for producer %v, got none", i) + } + } + } + + dq.blockedProducers[0].frame = makeWriteFrameWithSize(500) + // Second call: with the blocked request one byte smaller, it should fit + // into the queue, and be added with the other pending frames. + dq.maybeUnblockProducers() + if len(dq.pendingFrames) != 3 || len(dq.blockedProducers) != 0 { + t.Fatalf("Expected 3 pending frames and 0 blocked producers, got %v and %v", + len(dq.pendingFrames), len(dq.blockedProducers)) + } + for i := 0; i < 3; i++ { + // This time the first two response channels should get nothing and the + // third should get success. + select { + case response := <-responseChans[i]: + if i < 2 { + t.Errorf("Expected no response for producer %v, got %v", i, response) + } else if !response { + t.Errorf("Expected success response for producer 2, got failure") + } + default: + if i == 2 { + t.Errorf("Expected success response for producer 2, got none") + } + } + } +} + +func TestCanAcceptFrameOfSize(t *testing.T) { + // canAcceptFrameOfSize decides whether the queue has enough free capacity + // to accept an incoming frame of the given size. It should: + // - If the length of pendingFrames is >= settings.WriteAheadLimit, + // return false. + // - If the queue size is unbounded (MaxBufferSize == 0), return true. + // - Otherwise, return true iff the total size of the queue plus the new + // frame is <= settings.MaxBufferSize. + // The size of the queue is calculated as the summed size of: + // * All segments listed in diskQueue.segments (writing, reading, acking, + // acked) + // * All frames in diskQueue.pendingFrames (which have been accepted but + // not yet written) + // * If a write request is outstanding (diskQueue.writing == true), + // diskQueue.writeRequestSize, which is the size of the data that is + // being written by writerLoop but hasn't yet been completed. + // All test cases are run with WriteAheadLimit = 2. + + testCases := map[string]struct { + // The value of settings.MaxBufferSize in the test queue. + maxBufferSize uint64 + // The value of the segments field in the test queue. + segments diskQueueSegments + // The value of pendingFrames in the test queue. + pendingFrames []segmentedFrame + // The value of writeRequestSize (the size of the most recent write + // request) in the test queue. + writeRequestSize uint64 + // The value of the writing flag in the test queue (writeRequestSize is + // included in the queue size calculation only if there is an active + // writing request). + writing bool + + // If expectedOutcomes[v] = b then canAcceptFrameOfSize(v) should return b. + expectedOutcomes map[uint64]bool + }{ + "always reject when at the write ahead limit": { + maxBufferSize: 1000, + pendingFrames: []segmentedFrame{ + {frame: makeWriteFrameWithSize(10)}, + {frame: makeWriteFrameWithSize(10)}, + }, + expectedOutcomes: map[uint64]bool{10: false}, + }, + "always accept when queue size is unbounded": { + maxBufferSize: 0, + expectedOutcomes: map[uint64]bool{ + 1: true, 1000: true, 1000000: true, 1000000000: true, + }, + }, + // The remaining cases are all below the write ahead limit and have + // bounded buffer size, we are just testing that the various + // source values are all accounted for. + "pendingFrames counts against buffer capacity": { + maxBufferSize: 1000, + pendingFrames: []segmentedFrame{ + {frame: makeWriteFrameWithSize(500)}, + }, + // There should be exactly 500 bytes capacity left + expectedOutcomes: map[uint64]bool{ + 500: true, 501: false, + }, + }, + "diskQueue.segments counts against buffer capacity": { + maxBufferSize: 1000, + segments: diskQueueSegments{ + writing: []*queueSegment{segmentWithSize(100)}, + reading: []*queueSegment{segmentWithSize(100)}, + acking: []*queueSegment{segmentWithSize(100)}, + acked: []*queueSegment{segmentWithSize(100)}, + }, + // Four segments of size 100, should be exactly 600 bytes left + expectedOutcomes: map[uint64]bool{ + 600: true, 601: false, + }, + }, + "writeRequestSize counts against buffer capacity when writing=true": { + maxBufferSize: 1000, + writeRequestSize: 600, + writing: true, + expectedOutcomes: map[uint64]bool{ + 400: true, 401: false, + }, + }, + "writeRequestSize doesn't count against buffer capacity when writing=false": { + maxBufferSize: 1000, + writeRequestSize: 600, + writing: false, + expectedOutcomes: map[uint64]bool{ + 1000: true, 1001: false, + }, + }, + "buffer capacity includes the sum of all sources": { + // include all of them together. + maxBufferSize: 1000, + segments: diskQueueSegments{ + writing: []*queueSegment{segmentWithSize(100)}, + reading: []*queueSegment{segmentWithSize(100)}, + acking: []*queueSegment{segmentWithSize(100)}, + acked: []*queueSegment{segmentWithSize(100)}, + }, + pendingFrames: []segmentedFrame{ + {frame: makeWriteFrameWithSize(100)}, + }, + writeRequestSize: 200, + writing: true, + expectedOutcomes: map[uint64]bool{ + 300: true, 301: false, + }, + }, + } + + for description, test := range testCases { + settings := DefaultSettings() + settings.WriteAheadLimit = 2 + settings.MaxBufferSize = test.maxBufferSize + dq := &diskQueue{ + settings: settings, + segments: test.segments, + pendingFrames: test.pendingFrames, + writeRequestSize: test.writeRequestSize, + writing: test.writing, + } + for size, expected := range test.expectedOutcomes { + result := dq.canAcceptFrameOfSize(size) + if result != expected { + t.Errorf("%v: expected canAcceptFrameOfSize(%v) = %v, got %v", + description, size, expected, result) + } + } + } +} + +func boolRef(b bool) *bool { + return &b +} + func segmentIDRef(id segmentID) *segmentID { return &id } +// Convenience helper that creates a frame that will have the given size on +// disk after accounting for header / footer size. +func makeWriteFrameWithSize(size int) *writeFrame { + if size <= frameMetadataSize { + // Frames must have a nonempty data region. + return nil + } + return &writeFrame{serialized: make([]byte, size-frameMetadataSize)} +} + +func segmentWithSize(size int) *queueSegment { + if size < segmentHeaderSize { + // Can't have a segment smaller than the segment header + return nil + } + return &queueSegment{endOffset: segmentOffset(size - segmentHeaderSize)} +} + func equalReaderLoopRequests( r0 readerLoopRequest, r1 readerLoopRequest, ) bool { diff --git a/libbeat/publisher/queue/diskqueue/queue.go b/libbeat/publisher/queue/diskqueue/queue.go index 5f756996e5f3..1819ced21d55 100644 --- a/libbeat/publisher/queue/diskqueue/queue.go +++ b/libbeat/publisher/queue/diskqueue/queue.go @@ -55,6 +55,11 @@ type diskQueue struct { // otherwise. writing bool + // If writing is true, then writeRequestSize equals the number of bytes it + // contained. Used to calculate how much free capacity the queue has left + // after all scheduled writes have been completed (see canAcceptFrameOfSize). + writeRequestSize uint64 + // reading is true if the reader loop is processing a request, false // otherwise. reading bool From 3842bee898759378781ec37e0d0697637fcff60b Mon Sep 17 00:00:00 2001 From: Lee Hinman <57081003+leehinman@users.noreply.github.com> Date: Thu, 22 Oct 2020 16:56:37 -0500 Subject: [PATCH 14/25] Incorporate librpm fix feedback (#22098) - re-order imports - fix capitalization in error string --- x-pack/auditbeat/module/system/package/rpm_linux.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/x-pack/auditbeat/module/system/package/rpm_linux.go b/x-pack/auditbeat/module/system/package/rpm_linux.go index 6e5df7e0c6ec..399c121f878c 100644 --- a/x-pack/auditbeat/module/system/package/rpm_linux.go +++ b/x-pack/auditbeat/module/system/package/rpm_linux.go @@ -7,6 +7,7 @@ package pkg import ( + "debug/elf" "errors" "fmt" "runtime" @@ -14,8 +15,6 @@ import ( "time" "unsafe" - "debug/elf" - "github.com/coreos/pkg/dlopen" ) @@ -257,7 +256,7 @@ func openLibrpm() (*librpm, error) { librpm.handle, err = dlopen.GetHandle(librpmNames) if err != nil { - return nil, fmt.Errorf("Couldn't open %v", librpmNames) + return nil, fmt.Errorf("couldn't open %v: %v", librpmNames, err) } librpm.rpmtsCreate, err = librpm.handle.GetSymbolPointer("rpmtsCreate") From 14326dc5f40f67868db1c7ba1a76200f5f2791af Mon Sep 17 00:00:00 2001 From: DeDe Morton Date: Thu, 22 Oct 2020 20:34:14 -0700 Subject: [PATCH 15/25] Edit 6.8.13 release notes (#22120) --- CHANGELOG.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.asciidoc b/CHANGELOG.asciidoc index 349eb49edb3e..df4e85892e32 100644 --- a/CHANGELOG.asciidoc +++ b/CHANGELOG.asciidoc @@ -2607,7 +2607,7 @@ https://github.com/elastic/beats/compare/v6.8.12...v6.8.13[View commits] *Filebeat* -- Add container image in Kubernetes metadata {pull}13356[13356] {issue}12688[12688] +- Add container image in Kubernetes metadata. {pull}13356[13356] {issue}12688[12688] [[release-notes-6.8.12]] === Beats version 6.8.12 From e74e886884ffbc1ca5c59ef636bfaed41792cbe7 Mon Sep 17 00:00:00 2001 From: DeDe Morton Date: Thu, 22 Oct 2020 20:36:05 -0700 Subject: [PATCH 16/25] Edit 7.9.3 changelog (#22117) --- CHANGELOG.asciidoc | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.asciidoc b/CHANGELOG.asciidoc index df4e85892e32..61353d3afdb8 100644 --- a/CHANGELOG.asciidoc +++ b/CHANGELOG.asciidoc @@ -11,20 +11,21 @@ https://github.com/elastic/beats/compare/v7.9.2...v7.9.3[View commits] *Affecting all Beats* -- The `o365input` and `o365` module now recover from an authentication problem or other fatal errors, instead of terminating. {pull}21258[21258] +- The `o365audit` input and `o365` module now recover from an authentication problem or other fatal errors, instead of terminating. {pull}21258[21258] *Auditbeat* -- system/socket: Fixed a crash due to concurrent map read and write. {issue}21192[21192] {pull}21690[21690] +- system/socket: Fix a crash due to concurrent map read and write. {issue}21192[21192] {pull}21690[21690] *Filebeat* - Add field limit check for AWS Cloudtrail flattened fields. {pull}21388[21388] {issue}21382[21382] + *Metricbeat* -- Fix remote_write flaky test. {pull}21173[21173] -- Fix panic in kubernetes autodiscover related to keystores {issue}21843[21843] {pull}21880[21880] -- [Kubernetes] Remove redundant dockersock volume mount {pull}22009[22009] +- Fix `remote_write` flaky test. {pull}21173[21173] +- Fix panic in Kubernetes autodiscovery caused by storing stateless keystores. {issue}21843[21843] {pull}21880[21880] +- Remove redundant dockersock volume mount to avoid problems on Kubernetes deployments that do not use docker as the container runtime. {pull}22009[22009] [[release-notes-7.9.2]] From eb695ef4312a5dffaa708f1591ebaf5b7800d9ea Mon Sep 17 00:00:00 2001 From: DeDe Morton Date: Thu, 22 Oct 2020 20:37:17 -0700 Subject: [PATCH 17/25] Add fleet settings image (#22065) --- x-pack/elastic-agent/docs/run-elastic-agent.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/x-pack/elastic-agent/docs/run-elastic-agent.asciidoc b/x-pack/elastic-agent/docs/run-elastic-agent.asciidoc index 34bb2481f7fe..2314f7652f40 100644 --- a/x-pack/elastic-agent/docs/run-elastic-agent.asciidoc +++ b/x-pack/elastic-agent/docs/run-elastic-agent.asciidoc @@ -27,7 +27,7 @@ For self-managed installations, set the URLs for {es} and {kib}, including the http ports, then save your changes. + [role="screenshot"] -//image::images/kibana-fleet-settings.png[{fleet} settings] +image::images/kibana-fleet-settings.png[{fleet} settings] . Select **Agents**, then click **Add agent** to get an enrollment token. See <> for detailed steps. From 155dfda99aeb7a74383aa9023d4f350c4d5da668 Mon Sep 17 00:00:00 2001 From: Marc Guasch Date: Fri, 23 Oct 2020 09:12:12 +0200 Subject: [PATCH 18/25] Change x509 mappings from file. to tls.server. (#22097) --- .../module/suricata/eve/ingest/pipeline.yml | 32 ++++++------ .../eve/test/eve-alerts.log-expected.json | 52 +++++++++---------- .../eve/test/eve-small.log-expected.json | 24 ++++----- 3 files changed, 54 insertions(+), 54 deletions(-) diff --git a/x-pack/filebeat/module/suricata/eve/ingest/pipeline.yml b/x-pack/filebeat/module/suricata/eve/ingest/pipeline.yml index 01ed5accbe69..e132a8acdde7 100644 --- a/x-pack/filebeat/module/suricata/eve/ingest/pipeline.yml +++ b/x-pack/filebeat/module/suricata/eve/ingest/pipeline.yml @@ -247,27 +247,27 @@ processors: ignore_missing: true - rename: field: suricata.eve.tls.kv_issuerdn.C - target_field: file.x509.issuer.country + target_field: tls.server.x509.issuer.country ignore_missing: true - rename: field: suricata.eve.tls.kv_issuerdn.CN - target_field: file.x509.issuer.common_name + target_field: tls.server.x509.issuer.common_name ignore_missing: true - rename: field: suricata.eve.tls.kv_issuerdn.L - target_field: file.x509.issuer.locality + target_field: tls.server.x509.issuer.locality ignore_missing: true - rename: field: suricata.eve.tls.kv_issuerdn.O - target_field: file.x509.issuer.organization + target_field: tls.server.x509.issuer.organization ignore_missing: true - rename: field: suricata.eve.tls.kv_issuerdn.OU - target_field: file.x509.issuer.organizational_unit + target_field: tls.server.x509.issuer.organizational_unit ignore_missing: true - rename: field: suricata.eve.tls.kv_issuerdn.ST - target_field: file.x509.issuer.state_or_province + target_field: tls.server.x509.issuer.state_or_province ignore_missing: true - gsub: field: suricata.eve.tls.subject @@ -282,34 +282,34 @@ processors: ignore_missing: true - rename: field: suricata.eve.tls.kv_subject.C - target_field: file.x509.subject.country + target_field: tls.server.x509.subject.country ignore_missing: true - rename: field: suricata.eve.tls.kv_subject.CN - target_field: file.x509.subject.common_name + target_field: tls.server.x509.subject.common_name ignore_missing: true - rename: field: suricata.eve.tls.kv_subject.L - target_field: file.x509.subject.locality + target_field: tls.server.x509.subject.locality ignore_missing: true - rename: field: suricata.eve.tls.kv_subject.O - target_field: file.x509.subject.organization + target_field: tls.server.x509.subject.organization ignore_missing: true - rename: field: suricata.eve.tls.kv_subject.OU - target_field: file.x509.subject.organizational_unit + target_field: tls.server.x509.subject.organizational_unit ignore_missing: true - rename: field: suricata.eve.tls.kv_subject.ST - target_field: file.x509.subject.state_or_province + target_field: tls.server.x509.subject.state_or_province ignore_missing: true - set: - field: file.x509.serial_number + field: tls.server.x509.serial_number value: '{{suricata.eve.tls.serial}}' ignore_empty_value: true - gsub: - field: file.x509.serial_number + field: tls.server.x509.serial_number pattern: ':' replacement: '' ignore_missing: true @@ -326,11 +326,11 @@ processors: - ISO8601 if: ctx.suricata?.eve?.tls?.notbefore != null - set: - field: file.x509.not_after + field: tls.server.x509.not_after value: '{{tls.server.not_after}}' ignore_empty_value: true - set: - field: file.x509.not_before + field: tls.server.x509.not_before value: '{{tls.server.not_before}}' ignore_empty_value: true - append: diff --git a/x-pack/filebeat/module/suricata/eve/test/eve-alerts.log-expected.json b/x-pack/filebeat/module/suricata/eve/test/eve-alerts.log-expected.json index a63e2fd592a7..ecccab3a10f9 100644 --- a/x-pack/filebeat/module/suricata/eve/test/eve-alerts.log-expected.json +++ b/x-pack/filebeat/module/suricata/eve/test/eve-alerts.log-expected.json @@ -1633,17 +1633,6 @@ "event.type": [ "protocol" ], - "file.x509.issuer.common_name": "Google Internet Authority G2", - "file.x509.issuer.country": "US", - "file.x509.issuer.organization": "Google Inc", - "file.x509.not_after": "2024-07-16T14:52:35.000Z", - "file.x509.not_before": "2019-07-17T14:52:35.000Z", - "file.x509.serial_number": "001122334455667788", - "file.x509.subject.common_name": "*.google.com", - "file.x509.subject.country": "US", - "file.x509.subject.locality": "Mountain View", - "file.x509.subject.organization": "Google Inc", - "file.x509.subject.state_or_province": "California", "fileset.name": "eve", "input.type": "log", "log.offset": 16546, @@ -1687,6 +1676,17 @@ "tls.server.not_after": "2024-07-16T14:52:35.000Z", "tls.server.not_before": "2019-07-17T14:52:35.000Z", "tls.server.subject": "C=US, ST=California, L=Mountain View, O=Google Inc, CN=*.google.com", + "tls.server.x509.issuer.common_name": "Google Internet Authority G2", + "tls.server.x509.issuer.country": "US", + "tls.server.x509.issuer.organization": "Google Inc", + "tls.server.x509.not_after": "2024-07-16T14:52:35.000Z", + "tls.server.x509.not_before": "2019-07-17T14:52:35.000Z", + "tls.server.x509.serial_number": "001122334455667788", + "tls.server.x509.subject.common_name": "*.google.com", + "tls.server.x509.subject.country": "US", + "tls.server.x509.subject.locality": "Mountain View", + "tls.server.x509.subject.organization": "Google Inc", + "tls.server.x509.subject.state_or_province": "California", "tls.version": "1.2", "tls.version_protocol": "tls" }, @@ -1711,21 +1711,6 @@ "event.type": [ "allowed" ], - "file.x509.issuer.common_name": "Unknown", - "file.x509.issuer.country": "Unknown", - "file.x509.issuer.locality": "Unknown", - "file.x509.issuer.organization": "Unknown", - "file.x509.issuer.organizational_unit": "Unknown", - "file.x509.issuer.state_or_province": "Unknown", - "file.x509.not_after": "2026-06-25T17:36:29.000Z", - "file.x509.not_before": "2016-06-27T17:36:29.000Z", - "file.x509.serial_number": "72A92C51", - "file.x509.subject.common_name": "Unknown", - "file.x509.subject.country": "Unknown", - "file.x509.subject.locality": "Unknown", - "file.x509.subject.organization": "Unknown", - "file.x509.subject.organizational_unit": "Unknown", - "file.x509.subject.state_or_province": "Unknown", "fileset.name": "eve", "input.type": "log", "log.offset": 17541, @@ -1781,6 +1766,21 @@ "tls.server.not_after": "2026-06-25T17:36:29.000Z", "tls.server.not_before": "2016-06-27T17:36:29.000Z", "tls.server.subject": "C=Unknown, ST=Unknown, L=Unknown, O=Unknown, OU=Unknown, CN=Unknown", + "tls.server.x509.issuer.common_name": "Unknown", + "tls.server.x509.issuer.country": "Unknown", + "tls.server.x509.issuer.locality": "Unknown", + "tls.server.x509.issuer.organization": "Unknown", + "tls.server.x509.issuer.organizational_unit": "Unknown", + "tls.server.x509.issuer.state_or_province": "Unknown", + "tls.server.x509.not_after": "2026-06-25T17:36:29.000Z", + "tls.server.x509.not_before": "2016-06-27T17:36:29.000Z", + "tls.server.x509.serial_number": "72A92C51", + "tls.server.x509.subject.common_name": "Unknown", + "tls.server.x509.subject.country": "Unknown", + "tls.server.x509.subject.locality": "Unknown", + "tls.server.x509.subject.organization": "Unknown", + "tls.server.x509.subject.organizational_unit": "Unknown", + "tls.server.x509.subject.state_or_province": "Unknown", "tls.version": "1.2", "tls.version_protocol": "tls" } diff --git a/x-pack/filebeat/module/suricata/eve/test/eve-small.log-expected.json b/x-pack/filebeat/module/suricata/eve/test/eve-small.log-expected.json index 4851f2db8263..2db09a8ee383 100644 --- a/x-pack/filebeat/module/suricata/eve/test/eve-small.log-expected.json +++ b/x-pack/filebeat/module/suricata/eve/test/eve-small.log-expected.json @@ -430,18 +430,6 @@ "event.type": [ "protocol" ], - "file.x509.issuer.common_name": "Apple IST CA 2 - G1", - "file.x509.issuer.country": "US", - "file.x509.issuer.organization": "Apple Inc.", - "file.x509.issuer.organizational_unit": "Certification Authority", - "file.x509.not_after": "2019-03-29T17:54:31.000Z", - "file.x509.not_before": "2017-02-27T17:54:31.000Z", - "file.x509.serial_number": "5C9CE1097887F807", - "file.x509.subject.common_name": "*.icloud.com", - "file.x509.subject.country": "US", - "file.x509.subject.organization": "Apple Inc.", - "file.x509.subject.organizational_unit": "management:idms.group.506364", - "file.x509.subject.state_or_province": "California", "fileset.name": "eve", "input.type": "log", "log.offset": 4683, @@ -479,6 +467,18 @@ "tls.server.not_after": "2019-03-29T17:54:31.000Z", "tls.server.not_before": "2017-02-27T17:54:31.000Z", "tls.server.subject": "CN=*.icloud.com, OU=management:idms.group.506364, O=Apple Inc., ST=California, C=US", + "tls.server.x509.issuer.common_name": "Apple IST CA 2 - G1", + "tls.server.x509.issuer.country": "US", + "tls.server.x509.issuer.organization": "Apple Inc.", + "tls.server.x509.issuer.organizational_unit": "Certification Authority", + "tls.server.x509.not_after": "2019-03-29T17:54:31.000Z", + "tls.server.x509.not_before": "2017-02-27T17:54:31.000Z", + "tls.server.x509.serial_number": "5C9CE1097887F807", + "tls.server.x509.subject.common_name": "*.icloud.com", + "tls.server.x509.subject.country": "US", + "tls.server.x509.subject.organization": "Apple Inc.", + "tls.server.x509.subject.organizational_unit": "management:idms.group.506364", + "tls.server.x509.subject.state_or_province": "California", "tls.version": "1.2", "tls.version_protocol": "tls" }, From 2f7b15b7da8f41ef1534d5c5a3c1ac80d9ffbd40 Mon Sep 17 00:00:00 2001 From: Marc Guasch Date: Mon, 26 Oct 2020 10:15:05 +0100 Subject: [PATCH 19/25] Use default config when creating the input (#22126) --- x-pack/filebeat/input/httpjson/input_manager.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/x-pack/filebeat/input/httpjson/input_manager.go b/x-pack/filebeat/input/httpjson/input_manager.go index 21f5066dc052..8d7e60707869 100644 --- a/x-pack/filebeat/input/httpjson/input_manager.go +++ b/x-pack/filebeat/input/httpjson/input_manager.go @@ -36,7 +36,7 @@ func (m inputManager) Init(grp unison.Group, mode v2.Mode) error { // Create creates a cursor input manager if the config has a date cursor set up, // otherwise it creates a stateless input manager. func (m inputManager) Create(cfg *common.Config) (v2.Input, error) { - var config config + config := newDefaultConfig() if err := cfg.Unpack(&config); err != nil { return nil, err } From a56193354a5a24b003ac33243916d42f7e39274f Mon Sep 17 00:00:00 2001 From: Victor Martinez Date: Mon, 26 Oct 2020 11:22:26 +0000 Subject: [PATCH 20/25] [CI] support windows-10 (#19804) --- Jenkinsfile.yml | 2 +- auditbeat/Jenkinsfile.yml | 11 +++++++++++ filebeat/Jenkinsfile.yml | 13 +++++++++++++ heartbeat/Jenkinsfile.yml | 11 +++++++++++ metricbeat/Jenkinsfile.yml | 11 +++++++++++ packetbeat/Jenkinsfile.yml | 11 +++++++++++ winlogbeat/Jenkinsfile.yml | 11 +++++++++++ x-pack/auditbeat/Jenkinsfile.yml | 13 ++++++++++++- x-pack/elastic-agent/Jenkinsfile.yml | 11 +++++++++++ x-pack/filebeat/Jenkinsfile.yml | 11 +++++++++++ x-pack/functionbeat/Jenkinsfile.yml | 11 +++++++++++ x-pack/metricbeat/Jenkinsfile.yml | 11 +++++++++++ x-pack/packetbeat/Jenkinsfile.yml | 13 ++++++++++++- x-pack/winlogbeat/Jenkinsfile.yml | 11 +++++++++++ 14 files changed, 148 insertions(+), 3 deletions(-) diff --git a/Jenkinsfile.yml b/Jenkinsfile.yml index f7b21e1cbdfa..cc35232d6d04 100644 --- a/Jenkinsfile.yml +++ b/Jenkinsfile.yml @@ -8,7 +8,7 @@ projects: - "libbeat" - "metricbeat" - "packetbeat" - - "winlogbeat" + ##- "winlogbeat" See https://github.com/elastic/beats/issues/22046 - "x-pack/auditbeat" - "x-pack/dockerlogbeat" - "x-pack/elastic-agent" diff --git a/auditbeat/Jenkinsfile.yml b/auditbeat/Jenkinsfile.yml index b3f20af2d379..c68b5689f486 100644 --- a/auditbeat/Jenkinsfile.yml +++ b/auditbeat/Jenkinsfile.yml @@ -69,3 +69,14 @@ stages: - "windows-2012" branches: true ## for all the branches tags: true ## for all the tags + windows-10: + mage: "mage build unitTest" + platforms: ## override default labels in this specific stage. + - "windows-10" + when: ## Override the top-level when. + comments: + - "/test auditbeat for windows-10" + labels: + - "windows-10" + branches: true ## for all the branches + tags: true ## for all the tags diff --git a/filebeat/Jenkinsfile.yml b/filebeat/Jenkinsfile.yml index 09dbe948c72c..d8ea11c24a54 100644 --- a/filebeat/Jenkinsfile.yml +++ b/filebeat/Jenkinsfile.yml @@ -57,3 +57,16 @@ stages: - "windows-2016" branches: true ## for all the branches tags: true ## for all the tags + windows-10: + mage: "mage build unitTest" + platforms: ## override default labels in this specific stage. + - "windows-10" + when: ## Override the top-level when. + comments: + - "/test filebeat for windows-10" + labels: + - "windows-10" + branches: true ## for all the branches + tags: true ## for all the tags + branches: true ## for all the branches + tags: true ## for all the tags diff --git a/heartbeat/Jenkinsfile.yml b/heartbeat/Jenkinsfile.yml index 1d41dbe581ed..032ec411892d 100644 --- a/heartbeat/Jenkinsfile.yml +++ b/heartbeat/Jenkinsfile.yml @@ -67,4 +67,15 @@ stages: - "windows-2012" branches: true ## for all the branches tags: true ## for all the tags + windows-10: + mage: "mage build unitTest" + platforms: ## override default labels in this specific stage. + - "windows-10" + when: ## Override the top-level when. + comments: + - "/test heartbeat for windows-10" + labels: + - "windows-10" + branches: true ## for all the branches + tags: true ## for all the tags diff --git a/metricbeat/Jenkinsfile.yml b/metricbeat/Jenkinsfile.yml index bdd450908371..e6c4ffcef0e0 100644 --- a/metricbeat/Jenkinsfile.yml +++ b/metricbeat/Jenkinsfile.yml @@ -62,3 +62,14 @@ stages: - "windows-2012" branches: true ## for all the branches tags: true ## for all the tags + windows-10: + mage: "mage build unitTest" + platforms: ## override default labels in this specific stage. + - "windows-10" + when: ## Override the top-level when. + comments: + - "/test metricbeat for windows-10" + labels: + - "windows-10" + branches: true ## for all the branches + tags: true ## for all the tags diff --git a/packetbeat/Jenkinsfile.yml b/packetbeat/Jenkinsfile.yml index adf6471b82ad..ef373bb5f15f 100644 --- a/packetbeat/Jenkinsfile.yml +++ b/packetbeat/Jenkinsfile.yml @@ -67,3 +67,14 @@ stages: - "windows-2012" branches: true ## for all the branches tags: true ## for all the tags + windows-10: + mage: "mage build unitTest" + platforms: ## override default labels in this specific stage. + - "windows-10" + when: ## Override the top-level when. + comments: + - "/test packetbeat for windows-10" + labels: + - "windows-10" + branches: true ## for all the branches + tags: true ## for all the tags diff --git a/winlogbeat/Jenkinsfile.yml b/winlogbeat/Jenkinsfile.yml index 94b36b0e6473..3ec79093ca47 100644 --- a/winlogbeat/Jenkinsfile.yml +++ b/winlogbeat/Jenkinsfile.yml @@ -41,3 +41,14 @@ stages: - "windows-2012" branches: true ## for all the branches tags: true ## for all the tags + windows-10: + mage: "mage build unitTest" + platforms: ## override default labels in this specific stage. + - "windows-10" + when: ## Override the top-level when. + comments: + - "/test winlogbeat for windows-10" + labels: + - "windows-10" + branches: true ## for all the branches + tags: true ## for all the tags diff --git a/x-pack/auditbeat/Jenkinsfile.yml b/x-pack/auditbeat/Jenkinsfile.yml index 1294c4681b49..f4e55ea63720 100644 --- a/x-pack/auditbeat/Jenkinsfile.yml +++ b/x-pack/auditbeat/Jenkinsfile.yml @@ -52,7 +52,7 @@ stages: - "windows-2016" when: ## Override the top-level when. comments: - - "/test auditbeat for windows-2016" + - "/test x-pack/auditbeat for windows-2016" labels: - "windows-2016" branches: true ## for all the branches @@ -68,3 +68,14 @@ stages: - "windows-2012" branches: true ## for all the branches tags: true ## for all the tags + windows-10: + mage: "mage build unitTest" + platforms: ## override default labels in this specific stage. + - "windows-10" + when: ## Override the top-level when. + comments: + - "/test x-pack/auditbeat for windows-10" + labels: + - "windows-10" + branches: true ## for all the branches + tags: true ## for all the tags diff --git a/x-pack/elastic-agent/Jenkinsfile.yml b/x-pack/elastic-agent/Jenkinsfile.yml index bf1bfed3ddd6..d324e3381af8 100644 --- a/x-pack/elastic-agent/Jenkinsfile.yml +++ b/x-pack/elastic-agent/Jenkinsfile.yml @@ -67,3 +67,14 @@ stages: - "windows-2012" branches: true ## for all the branches tags: true ## for all the tags + windows-10: + mage: "mage build unitTest" + platforms: ## override default labels in this specific stage. + - "windows-10" + when: ## Override the top-level when. + comments: + - "/test x-pack/elastic-agent for windows-10" + labels: + - "windows-10" + branches: true ## for all the branches + tags: true ## for all the tags diff --git a/x-pack/filebeat/Jenkinsfile.yml b/x-pack/filebeat/Jenkinsfile.yml index 5bd2bcd40cf8..d28520b7c32b 100644 --- a/x-pack/filebeat/Jenkinsfile.yml +++ b/x-pack/filebeat/Jenkinsfile.yml @@ -68,3 +68,14 @@ stages: - "windows-2012" branches: true ## for all the branches tags: true ## for all the tags + windows-10: + mage: "mage build unitTest" + platforms: ## override default labels in this specific stage. + - "windows-10" + when: ## Override the top-level when. + comments: + - "/test x-pack/filebeat for windows-10" + labels: + - "windows-10" + branches: true ## for all the branches + tags: true ## for all the tags diff --git a/x-pack/functionbeat/Jenkinsfile.yml b/x-pack/functionbeat/Jenkinsfile.yml index ecb2bd14e0ea..117d92e3179a 100644 --- a/x-pack/functionbeat/Jenkinsfile.yml +++ b/x-pack/functionbeat/Jenkinsfile.yml @@ -65,3 +65,14 @@ stages: - "windows-2012" branches: true ## for all the branches tags: true ## for all the tags + windows-10: + mage: "mage build unitTest" + platforms: ## override default labels in this specific stage. + - "windows-10" + when: ## Override the top-level when. + comments: + - "/test x-pack/functionbeat for windows-10" + labels: + - "windows-10" + branches: true ## for all the branches + tags: true ## for all the tags diff --git a/x-pack/metricbeat/Jenkinsfile.yml b/x-pack/metricbeat/Jenkinsfile.yml index 60a593c488dd..8506eb12e69a 100644 --- a/x-pack/metricbeat/Jenkinsfile.yml +++ b/x-pack/metricbeat/Jenkinsfile.yml @@ -57,3 +57,14 @@ stages: - "windows-2012" branches: true ## for all the branches tags: true ## for all the tags + windows-10: + mage: "mage build unitTest" + platforms: ## override default labels in this specific stage. + - "windows-10" + when: ## Override the top-level when. + comments: + - "/test x-pack/metricbeat for windows-10" + labels: + - "windows-10" + branches: true ## for all the branches + tags: true ## for all the tags diff --git a/x-pack/packetbeat/Jenkinsfile.yml b/x-pack/packetbeat/Jenkinsfile.yml index e3fa9ad0324b..a3c11636dc65 100644 --- a/x-pack/packetbeat/Jenkinsfile.yml +++ b/x-pack/packetbeat/Jenkinsfile.yml @@ -24,7 +24,7 @@ stages: - "windows-2016" when: ## Override the top-level when. comments: - - "/test x-pack/winlogbeat for windows-2016" + - "/test x-pack/packetbeat for windows-2016" labels: - "windows-2016" branches: true ## for all the branches @@ -40,3 +40,14 @@ stages: - "windows-2012" branches: true ## for all the branches tags: true ## for all the tags + windows-10: + mage: "mage build unitTest" + platforms: ## override default labels in this specific stage. + - "windows-10" + when: ## Override the top-level when. + comments: + - "/test x-pack/packetbeat for windows-10" + labels: + - "windows-10" + branches: true ## for all the branches + tags: true ## for all the tags diff --git a/x-pack/winlogbeat/Jenkinsfile.yml b/x-pack/winlogbeat/Jenkinsfile.yml index 371f0aa6f48e..45dfcad9d453 100644 --- a/x-pack/winlogbeat/Jenkinsfile.yml +++ b/x-pack/winlogbeat/Jenkinsfile.yml @@ -40,3 +40,14 @@ stages: - "windows-2012" branches: true ## for all the branches tags: true ## for all the tags + windows-10: + mage: "mage build unitTest" + platforms: ## override default labels in this specific stage. + - "windows-10" + when: ## Override the top-level when. + comments: + - "/test x-pack/winlogbeat for windows-10" + labels: + - "windows-10" + branches: true ## for all the branches + tags: true ## for all the tags From 97d7324690326e2b3eb1e6014bee4a191daf732c Mon Sep 17 00:00:00 2001 From: Victor Martinez Date: Mon, 26 Oct 2020 13:59:08 +0000 Subject: [PATCH 21/25] [CI] Enable winlogbeat (#22142) --- Jenkinsfile.yml | 2 +- winlogbeat/Jenkinsfile.yml | 22 +++++++++++----------- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/Jenkinsfile.yml b/Jenkinsfile.yml index cc35232d6d04..f7b21e1cbdfa 100644 --- a/Jenkinsfile.yml +++ b/Jenkinsfile.yml @@ -8,7 +8,7 @@ projects: - "libbeat" - "metricbeat" - "packetbeat" - ##- "winlogbeat" See https://github.com/elastic/beats/issues/22046 + - "winlogbeat" - "x-pack/auditbeat" - "x-pack/dockerlogbeat" - "x-pack/elastic-agent" diff --git a/winlogbeat/Jenkinsfile.yml b/winlogbeat/Jenkinsfile.yml index 3ec79093ca47..3b9c71bf0c31 100644 --- a/winlogbeat/Jenkinsfile.yml +++ b/winlogbeat/Jenkinsfile.yml @@ -41,14 +41,14 @@ stages: - "windows-2012" branches: true ## for all the branches tags: true ## for all the tags - windows-10: - mage: "mage build unitTest" - platforms: ## override default labels in this specific stage. - - "windows-10" - when: ## Override the top-level when. - comments: - - "/test winlogbeat for windows-10" - labels: - - "windows-10" - branches: true ## for all the branches - tags: true ## for all the tags + # windows-10: See https://github.com/elastic/beats/issues/22046 + # mage: "mage build unitTest" + # platforms: ## override default labels in this specific stage. + # - "windows-10" + # when: ## Override the top-level when. + # comments: + # - "/test winlogbeat for windows-10" + # labels: + # - "windows-10" + # branches: true ## for all the branches + # tags: true ## for all the tags From d36a5106da49ccce2ae1d4a5f53397e73ea8417c Mon Sep 17 00:00:00 2001 From: Chris Mark Date: Mon, 26 Oct 2020 17:56:53 +0200 Subject: [PATCH 22/25] Remove old TODO on kubernetes node update (#22074) --- libbeat/autodiscover/providers/kubernetes/node.go | 1 - 1 file changed, 1 deletion(-) diff --git a/libbeat/autodiscover/providers/kubernetes/node.go b/libbeat/autodiscover/providers/kubernetes/node.go index a78622756cd6..95e23b33d2a5 100644 --- a/libbeat/autodiscover/providers/kubernetes/node.go +++ b/libbeat/autodiscover/providers/kubernetes/node.go @@ -105,7 +105,6 @@ func (n *node) OnUpdate(obj interface{}) { time.AfterFunc(n.config.CleanupTimeout, func() { n.emit(node, "stop") }) } else { n.logger.Debugf("Watcher Node update: %+v", obj) - // TODO: figure out how to avoid stop starting when node status is periodically being updated by kubelet n.emit(node, "stop") n.emit(node, "start") } From 13a195a16aec0468fb6de228fa3bb696c2d8aeef Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?No=C3=A9mi=20V=C3=A1nyi?= Date: Mon, 26 Oct 2020 17:27:05 +0100 Subject: [PATCH 23/25] Fix Google Cloud Function configuration file issues (#22156) ## What does this PR do? This PR adds a new function to to `cfgfile` to set the path to the configuration file of a Beat. This fixes the issue on GCP with Functionbeat. ## Why is it important? ATM Functionbeat cannot run on GCP. ## Related issues Closes #20864 --- CHANGELOG.next.asciidoc | 1 + libbeat/cfgfile/cfgfile.go | 4 ++++ x-pack/functionbeat/provider/gcp/pubsub/pubsub.go | 1 + x-pack/functionbeat/provider/gcp/storage/storage.go | 1 + x-pack/functionbeat/scripts/mage/update.go | 3 +++ 5 files changed, 10 insertions(+) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 1bf2cc8f762d..9e79ccef1dd5 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -400,6 +400,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - Do not need Google credentials if not required for the operation. {issue}17329[17329] {pull}21072[21072] - Fix dependency issues of GCP functions. {issue}20830[20830] {pull}21070[21070] - Fix catchall bucket config errors by adding more validation. {issue}17572[16282] {pull}20887[16287] +- Fix Google Cloud Function configuration issue. {issue}20864[20864] {pull}22156[22156] ==== Added diff --git a/libbeat/cfgfile/cfgfile.go b/libbeat/cfgfile/cfgfile.go index 767cbd34bc57..ff3949069314 100644 --- a/libbeat/cfgfile/cfgfile.go +++ b/libbeat/cfgfile/cfgfile.go @@ -205,6 +205,10 @@ func LoadList(file string) ([]*common.Config, error) { return c, nil } +func SetConfigPath(path string) { + *configPath = path +} + // GetPathConfig returns ${path.config}. If ${path.config} is not set, ${path.home} is returned. func GetPathConfig() string { if *configPath != "" { diff --git a/x-pack/functionbeat/provider/gcp/pubsub/pubsub.go b/x-pack/functionbeat/provider/gcp/pubsub/pubsub.go index 44f77695e946..813a0df9cc06 100644 --- a/x-pack/functionbeat/provider/gcp/pubsub/pubsub.go +++ b/x-pack/functionbeat/provider/gcp/pubsub/pubsub.go @@ -29,6 +29,7 @@ func RunPubSub(ctx context.Context, m gpubsub.Message) error { ConfigOverrides: config.FunctionOverrides, } + cfgfile.SetConfigPath("/srv/src/pubsub") cfgfile.ChangeDefaultCfgfileFlag(settings.Name) return instance.Run(settings, initFunctionbeat(ctx, m)) diff --git a/x-pack/functionbeat/provider/gcp/storage/storage.go b/x-pack/functionbeat/provider/gcp/storage/storage.go index c9d1660d67c1..2de829392d2b 100644 --- a/x-pack/functionbeat/provider/gcp/storage/storage.go +++ b/x-pack/functionbeat/provider/gcp/storage/storage.go @@ -27,6 +27,7 @@ func RunCloudStorage(ctx context.Context, e gcp.StorageEvent) error { ConfigOverrides: config.FunctionOverrides, } + cfgfile.SetConfigPath("/srv/src/storage") cfgfile.ChangeDefaultCfgfileFlag(settings.Name) return instance.Run(settings, initFunctionbeat(ctx, e)) diff --git a/x-pack/functionbeat/scripts/mage/update.go b/x-pack/functionbeat/scripts/mage/update.go index 59b56cb6bed5..99f1b9a3ce34 100644 --- a/x-pack/functionbeat/scripts/mage/update.go +++ b/x-pack/functionbeat/scripts/mage/update.go @@ -74,6 +74,9 @@ func (Update) VendorBeats() error { Exclude: []string{ ".*_test.go$", ".*.yml", + // XXX GCP function metadata lib must be removed to avoid build failures + // GH issue: https://github.com/googleapis/google-cloud-go/issues/1947 + ".*cloud.google.com/go.*/functions/metadata.*", }, } err = cp.Execute() From 5469c46c82da8472a22dce446a48ef2d1827c0db Mon Sep 17 00:00:00 2001 From: Lee Hinman <57081003+leehinman@users.noreply.github.com> Date: Mon, 26 Oct 2020 12:21:07 -0500 Subject: [PATCH 24/25] Fix zeek connection pipeline (#22151) - connection state for rejected is 'REJ' Closes #22149 --- CHANGELOG.next.asciidoc | 1 + x-pack/filebeat/module/zeek/connection/ingest/pipeline.yml | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 9e79ccef1dd5..ae48f268977d 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -287,6 +287,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - Fix checkpoint module when logs contain time field. {pull}20567[20567] - Add field limit check for AWS Cloudtrail flattened fields. {pull}21388[21388] {issue}21382[21382] - Fix syslog RFC 5424 parsing in the CheckPoint module. {pull}21854[21854] +- Fix incorrect connection state mapping in zeek connection pipeline. {pull}22151[22151] {issue}22149[22149] *Heartbeat* diff --git a/x-pack/filebeat/module/zeek/connection/ingest/pipeline.yml b/x-pack/filebeat/module/zeek/connection/ingest/pipeline.yml index 9cd654edd516..c25c9cee6e5d 100644 --- a/x-pack/filebeat/module/zeek/connection/ingest/pipeline.yml +++ b/x-pack/filebeat/module/zeek/connection/ingest/pipeline.yml @@ -115,7 +115,7 @@ processors: - connection - start - end - REG: + REJ: conn_str: "Connection attempt rejected." types: - connection From 5501ce848afca10590696ba1f4bb7426660ebec8 Mon Sep 17 00:00:00 2001 From: Victor Martinez Date: Mon, 26 Oct 2020 17:58:22 +0000 Subject: [PATCH 25/25] [CI] set env variable for the params (#22143) --- Jenkinsfile | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/Jenkinsfile b/Jenkinsfile index 4099e820f97b..95f270e9e640 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -12,6 +12,7 @@ pipeline { agent { label 'ubuntu-18 && immutable' } environment { AWS_ACCOUNT_SECRET = 'secret/observability-team/ci/elastic-observability-aws-account-auth' + AWS_REGION = "${params.awsRegion}" REPO = 'beats' BASE_DIR = "src/github.com/elastic/${env.REPO}" DOCKERELASTIC_SECRET = 'secret/observability-team/ci/docker-registry/prod' @@ -431,7 +432,7 @@ def withCloudTestEnv(Closure body) { error("${AWS_ACCOUNT_SECRET} doesn't contain 'secret_key'") } maskedVars.addAll([ - [var: "AWS_REGION", password: params.awsRegion], + [var: "AWS_REGION", password: "${env.AWS_REGION}"], [var: "AWS_ACCESS_KEY_ID", password: aws.access_key], [var: "AWS_SECRET_ACCESS_KEY", password: aws.secret_key], ])