diff --git a/CHANGELOG.asciidoc b/CHANGELOG.asciidoc index 593d0cc9ec59..e088076532ac 100644 --- a/CHANGELOG.asciidoc +++ b/CHANGELOG.asciidoc @@ -37,6 +37,7 @@ https://github.com/elastic/beats/compare/v6.0.0-beta1...master[Check the HEAD di - Fix go plugins not loaded when beat starts {pull}4799[4799] - Eliminate deprecated _default_ mapping in 6.x {pull}4864[4864] - Register kubernetes `field_format` matcher and remove logger in `Encode` API {pull}4888[4888] +- Add support for `initContainers` in `add_kubernetes_metadata` processor. {issue}4825[4825] *Auditbeat* diff --git a/libbeat/libbeat.full.yml b/libbeat/libbeat.full.yml deleted file mode 100644 index 1e9023a82474..000000000000 --- a/libbeat/libbeat.full.yml +++ /dev/null @@ -1,647 +0,0 @@ - -#================================ General ====================================== - -# The name of the shipper that publishes the network data. It can be used to group -# all the transactions sent by a single shipper in the web interface. -# If this options is not defined, the hostname is used. -#name: - -# The tags of the shipper are included in their own field with each -# transaction published. Tags make it easy to group servers by different -# logical properties. -#tags: ["service-X", "web-tier"] - -# Optional fields that you can specify to add additional information to the -# output. Fields can be scalar values, arrays, dictionaries, or any nested -# combination of these. -#fields: -# env: staging - -# If this option is set to true, the custom fields are stored as top-level -# fields in the output document instead of being grouped under a fields -# sub-dictionary. Default is false. -#fields_under_root: false - -# Internal queue size for single events in processing pipeline -#queue_size: 1000 - -# The internal queue size for bulk events in the processing pipeline. -# Do not modify this value. -#bulk_queue_size: 0 - -# Sets the maximum number of CPUs that can be executing simultaneously. The -# default is the number of logical CPUs available in the system. -#max_procs: - -#================================ Processors =================================== - -# Processors are used to reduce the number of fields in the exported event or to -# enhance the event with external metadata. This section defines a list of -# processors that are applied one by one and the first one receives the initial -# event: -# -# event -> filter1 -> event1 -> filter2 ->event2 ... -# -# The supported processors are drop_fields, drop_event, include_fields, and -# add_cloud_metadata. -# -# For example, you can use the following processors to keep the fields that -# contain CPU load percentages, but remove the fields that contain CPU ticks -# values: -# -#processors: -#- include_fields: -# fields: ["cpu"] -#- drop_fields: -# fields: ["cpu.user", "cpu.system"] -# -# The following example drops the events that have the HTTP response code 200: -# -#processors: -#- drop_event: -# when: -# equals: -# http.code: 200 -# -# The following example enriches each event with metadata from the cloud -# provider about the host machine. It works on EC2, GCE, and DigitalOcean. -# -#processors: -#- add_cloud_metadata: -# -# The following example enriches each event with the local timezone. -# -#processors: -#- add_locale: -# - -#================================ Outputs ====================================== - -# Configure what outputs to use when sending the data collected by the beat. -# Multiple outputs may be used. - -#-------------------------- Elasticsearch output ------------------------------- -output.elasticsearch: - # Boolean flag to enable or disable the output module. - #enabled: true - - # Array of hosts to connect to. - # Scheme and port can be left out and will be set to the default (http and 9200) - # In case you specify and additional path, the scheme is required: http://localhost:9200/path - # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200 - hosts: ["localhost:9200"] - - # Set gzip compression level. - #compression_level: 0 - - # Optional protocol and basic auth credentials. - #protocol: "https" - #username: "elastic" - #password: "changeme" - - # Dictionary of HTTP parameters to pass within the url with index operations. - #parameters: - #param1: value1 - #param2: value2 - - # Number of workers per Elasticsearch host. - #worker: 1 - - # Optional index name. The default is "libbeat" plus date - # and generates [libbeat-]YYYY.MM.DD keys. - #index: "libbeat-%{[beat.version]}-%{+yyyy.MM.dd}" - - # Optional ingest node pipeline. By default no pipeline will be used. - #pipeline: "" - - # Optional HTTP Path - #path: "/elasticsearch" - - # Custom HTTP headers to add to each request - #headers: - # X-My-Header: Contents of the header - - # Proxy server url - #proxy_url: http://proxy:3128 - - # The number of times a particular Elasticsearch index operation is attempted. If - # the indexing operation doesn't succeed after this many retries, the events are - # dropped. The default is 3. - #max_retries: 3 - - # The maximum number of events to bulk in a single Elasticsearch bulk API index request. - # The default is 50. - #bulk_max_size: 50 - - # Configure http request timeout before failing an request to Elasticsearch. - #timeout: 90 - - # The number of seconds to wait for new events between two bulk API index requests. - # If `bulk_max_size` is reached before this interval expires, addition bulk index - # requests are made. - #flush_interval: 1s - - # Use SSL settings for HTTPS. Default is true. - #ssl.enabled: true - - # Configure SSL verification mode. If `none` is configured, all server hosts - # and certificates will be accepted. In this mode, SSL based connections are - # susceptible to man-in-the-middle attacks. Use only for testing. Default is - # `full`. - #ssl.verification_mode: full - - # List of supported/valid TLS versions. By default all TLS versions 1.0 up to - # 1.2 are enabled. - #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] - - # SSL configuration. By default is off. - # List of root certificates for HTTPS server verifications - #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] - - # Certificate for SSL client authentication - #ssl.certificate: "/etc/pki/client/cert.pem" - - # Client Certificate Key - #ssl.key: "/etc/pki/client/cert.key" - - # Optional passphrase for decrypting the Certificate Key. - #ssl.key_passphrase: '' - - # Configure cipher suites to be used for SSL connections - #ssl.cipher_suites: [] - - # Configure curve types for ECDHE based cipher suites - #ssl.curve_types: [] - - -#----------------------------- Logstash output --------------------------------- -#output.logstash: - # Boolean flag to enable or disable the output module. - #enabled: true - - # The Logstash hosts - #hosts: ["localhost:5044"] - - # Number of workers per Logstash host. - #worker: 1 - - # Set gzip compression level. - #compression_level: 3 - - # Optional load balance the events between the Logstash hosts - #loadbalance: true - - # Number of batches to be send asynchronously to logstash while processing - # new batches. - #pipelining: 0 - - # Optional index name. The default index name is set to name of the beat - # in all lowercase. - #index: 'libbeat' - - # SOCKS5 proxy server URL - #proxy_url: socks5://user:password@socks5-server:2233 - - # Resolve names locally when using a proxy server. Defaults to false. - #proxy_use_local_resolver: false - - # Enable SSL support. SSL is automatically enabled, if any SSL setting is set. - #ssl.enabled: true - - # Configure SSL verification mode. If `none` is configured, all server hosts - # and certificates will be accepted. In this mode, SSL based connections are - # susceptible to man-in-the-middle attacks. Use only for testing. Default is - # `full`. - #ssl.verification_mode: full - - # List of supported/valid TLS versions. By default all TLS versions 1.0 up to - # 1.2 are enabled. - #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] - - # Optional SSL configuration options. SSL is off by default. - # List of root certificates for HTTPS server verifications - #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] - - # Certificate for SSL client authentication - #ssl.certificate: "/etc/pki/client/cert.pem" - - # Client Certificate Key - #ssl.key: "/etc/pki/client/cert.key" - - # Optional passphrase for decrypting the Certificate Key. - #ssl.key_passphrase: '' - - # Configure cipher suites to be used for SSL connections - #ssl.cipher_suites: [] - - # Configure curve types for ECDHE based cipher suites - #ssl.curve_types: [] - -#------------------------------- Kafka output ---------------------------------- -#output.kafka: - # Boolean flag to enable or disable the output module. - #enabled: true - - # The list of Kafka broker addresses from where to fetch the cluster metadata. - # The cluster metadata contain the actual Kafka brokers events are published - # to. - #hosts: ["localhost:9092"] - - # The Kafka topic used for produced events. The setting can be a format string - # using any event field. To set the topic from document type use `%{[type]}`. - #topic: beats - - # The Kafka event key setting. Use format string to create unique event key. - # By default no event key will be generated. - #key: '' - - # The Kafka event partitioning strategy. Default hashing strategy is `hash` - # using the `output.kafka.key` setting or randomly distributes events if - # `output.kafka.key` is not configured. - #partition.hash: - # If enabled, events will only be published to partitions with reachable - # leaders. Default is false. - #reachable_only: false - - # Configure alternative event field names used to compute the hash value. - # If empty `output.kafka.key` setting will be used. - # Default value is empty list. - #hash: [] - - # Authentication details. Password is required if username is set. - #username: '' - #password: '' - - # Kafka version libbeat is assumed to run against. Defaults to the oldest - # supported stable version (currently version 0.8.2.0) - #version: 0.8.2 - - # Metadata update configuration. Metadata do contain leader information - # deciding which broker to use when publishing. - #metadata: - # Max metadata request retry attempts when cluster is in middle of leader - # election. Defaults to 3 retries. - #retry.max: 3 - - # Waiting time between retries during leader elections. Default is 250ms. - #retry.backoff: 250ms - - # Refresh metadata interval. Defaults to every 10 minutes. - #refresh_frequency: 10m - - # The number of concurrent load-balanced Kafka output workers. - #worker: 1 - - # The number of times to retry publishing an event after a publishing failure. - # After the specified number of retries, the events are typically dropped. - # Some Beats, such as Filebeat, ignore the max_retries setting and retry until - # all events are published. Set max_retries to a value less than 0 to retry - # until all events are published. The default is 3. - #max_retries: 3 - - # The maximum number of events to bulk in a single Kafka request. The default - # is 2048. - #bulk_max_size: 2048 - - # The number of seconds to wait for responses from the Kafka brokers before - # timing out. The default is 30s. - #timeout: 30s - - # The maximum duration a broker will wait for number of required ACKs. The - # default is 10s. - #broker_timeout: 10s - - # The number of messages buffered for each Kafka broker. The default is 256. - #channel_buffer_size: 256 - - # The keep-alive period for an active network connection. If 0s, keep-alives - # are disabled. The default is 0 seconds. - #keep_alive: 0 - - # Sets the output compression codec. Must be one of none, snappy and gzip. The - # default is gzip. - #compression: gzip - - # The maximum permitted size of JSON-encoded messages. Bigger messages will be - # dropped. The default value is 1000000 (bytes). This value should be equal to - # or less than the broker's message.max.bytes. - #max_message_bytes: 1000000 - - # The ACK reliability level required from broker. 0=no response, 1=wait for - # local commit, -1=wait for all replicas to commit. The default is 1. Note: - # If set to 0, no ACKs are returned by Kafka. Messages might be lost silently - # on error. - #required_acks: 1 - - # The number of seconds to wait for new events between two producer API calls. - #flush_interval: 1s - - # The configurable ClientID used for logging, debugging, and auditing - # purposes. The default is "beats". - #client_id: beats - - # Enable SSL support. SSL is automatically enabled, if any SSL setting is set. - #ssl.enabled: true - - # Optional SSL configuration options. SSL is off by default. - # List of root certificates for HTTPS server verifications - #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] - - # Configure SSL verification mode. If `none` is configured, all server hosts - # and certificates will be accepted. In this mode, SSL based connections are - # susceptible to man-in-the-middle attacks. Use only for testing. Default is - # `full`. - #ssl.verification_mode: full - - # List of supported/valid TLS versions. By default all TLS versions 1.0 up to - # 1.2 are enabled. - #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] - - # Certificate for SSL client authentication - #ssl.certificate: "/etc/pki/client/cert.pem" - - # Client Certificate Key - #ssl.key: "/etc/pki/client/cert.key" - - # Optional passphrase for decrypting the Certificate Key. - #ssl.key_passphrase: '' - - # Configure cipher suites to be used for SSL connections - #ssl.cipher_suites: [] - - # Configure curve types for ECDHE based cipher suites - #ssl.curve_types: [] - -#------------------------------- Redis output ---------------------------------- -#output.redis: - # Boolean flag to enable or disable the output module. - #enabled: true - - # The list of Redis servers to connect to. If load balancing is enabled, the - # events are distributed to the servers in the list. If one server becomes - # unreachable, the events are distributed to the reachable servers only. - #hosts: ["localhost:6379"] - - # The Redis port to use if hosts does not contain a port number. The default - # is 6379. - #port: 6379 - - # The name of the Redis list or channel the events are published to. The - # default is libbeat. - #key: libbeat - - # The password to authenticate with. The default is no authentication. - #password: - - # The Redis database number where the events are published. The default is 0. - #db: 0 - - # The Redis data type to use for publishing events. If the data type is list, - # the Redis RPUSH command is used. If the data type is channel, the Redis - # PUBLISH command is used. The default value is list. - #datatype: list - - # The number of workers to use for each host configured to publish events to - # Redis. Use this setting along with the loadbalance option. For example, if - # you have 2 hosts and 3 workers, in total 6 workers are started (3 for each - # host). - #worker: 1 - - # If set to true and multiple hosts or workers are configured, the output - # plugin load balances published events onto all Redis hosts. If set to false, - # the output plugin sends all events to only one host (determined at random) - # and will switch to another host if the currently selected one becomes - # unreachable. The default value is true. - #loadbalance: true - - # The Redis connection timeout in seconds. The default is 5 seconds. - #timeout: 5s - - # The number of times to retry publishing an event after a publishing failure. - # After the specified number of retries, the events are typically dropped. - # Some Beats, such as Filebeat, ignore the max_retries setting and retry until - # all events are published. Set max_retries to a value less than 0 to retry - # until all events are published. The default is 3. - #max_retries: 3 - - # The maximum number of events to bulk in a single Redis request or pipeline. - # The default is 2048. - #bulk_max_size: 2048 - - # The URL of the SOCKS5 proxy to use when connecting to the Redis servers. The - # value must be a URL with a scheme of socks5://. - #proxy_url: - - # This option determines whether Redis hostnames are resolved locally when - # using a proxy. The default value is false, which means that name resolution - # occurs on the proxy server. - #proxy_use_local_resolver: false - - # Enable SSL support. SSL is automatically enabled, if any SSL setting is set. - #ssl.enabled: true - - # Configure SSL verification mode. If `none` is configured, all server hosts - # and certificates will be accepted. In this mode, SSL based connections are - # susceptible to man-in-the-middle attacks. Use only for testing. Default is - # `full`. - #ssl.verification_mode: full - - # List of supported/valid TLS versions. By default all TLS versions 1.0 up to - # 1.2 are enabled. - #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] - - # Optional SSL configuration options. SSL is off by default. - # List of root certificates for HTTPS server verifications - #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] - - # Certificate for SSL client authentication - #ssl.certificate: "/etc/pki/client/cert.pem" - - # Client Certificate Key - #ssl.key: "/etc/pki/client/cert.key" - - # Optional passphrase for decrypting the Certificate Key. - #ssl.key_passphrase: '' - - # Configure cipher suites to be used for SSL connections - #ssl.cipher_suites: [] - - # Configure curve types for ECDHE based cipher suites - #ssl.curve_types: [] - - -#------------------------------- File output ----------------------------------- -#output.file: - # Boolean flag to enable or disable the output module. - #enabled: true - - # Path to the directory where to save the generated files. The option is - # mandatory. - #path: "/tmp/libbeat" - - # Name of the generated files. The default is `libbeat` and it generates - # files: `libbeat`, `libbeat.1`, `libbeat.2`, etc. - #filename: libbeat - - # Maximum size in kilobytes of each file. When this size is reached, and on - # every libbeat restart, the files are rotated. The default value is 10240 - # kB. - #rotate_every_kb: 10000 - - # Maximum number of files under path. When this number of files is reached, - # the oldest file is deleted and the rest are shifted from last to first. The - # default is 7 files. - #number_of_files: 7 - - -#----------------------------- Console output --------------------------------- -#output.console: - # Boolean flag to enable or disable the output module. - #enabled: true - - # Pretty print json event - #pretty: false - -#================================= Paths ====================================== - -# The home path for the libbeat installation. This is the default base path -# for all other path settings and for miscellaneous files that come with the -# distribution (for example, the sample dashboards). -# If not set by a CLI flag or in the configuration file, the default for the -# home path is the location of the binary. -#path.home: - -# The configuration path for the libbeat installation. This is the default -# base path for configuration files, including the main YAML configuration file -# and the Elasticsearch template file. If not set by a CLI flag or in the -# configuration file, the default for the configuration path is the home path. -#path.config: ${path.home} - -# The data path for the libbeat installation. This is the default base path -# for all the files in which libbeat needs to store its data. If not set by a -# CLI flag or in the configuration file, the default for the data path is a data -# subdirectory inside the home path. -#path.data: ${path.home}/data - -# The logs path for a libbeat installation. This is the default location for -# the Beat's log files. If not set by a CLI flag or in the configuration file, -# the default for the logs path is a logs subdirectory inside the home path. -#path.logs: ${path.home}/logs - -#============================== Dashboards ===================================== -# These settings control loading the sample dashboards to the Kibana index. Loading -# the dashboards is disabled by default and can be enabled either by setting the -# options here, or by using the `-setup` CLI flag. -#setup.dashboards.enabled: false - -# The URL from where to download the dashboards archive. By default this URL -# has a value which is computed based on the Beat name and version. For released -# versions, this URL points to the dashboard archive on the artifacts.elastic.co -# website. -#setup.dashboards.url: - -# The directory from where to read the dashboards. It is used instead of the URL -# when it has a value. -#setup.dashboards.directory: - -# The file archive (zip file) from where to read the dashboards. It is used instead -# of the URL when it has a value. -#setup.dashboards.file: - -# If this option is enabled, the snapshot URL is used instead of the default URL. -#setup.dashboards.snapshot: false - -# The URL from where to download the snapshot version of the dashboards. By default -# this has a value which is computed based on the Beat name and version. -#setup.dashboards.snapshot_url - -# In case the archive contains the dashboards from multiple Beats, this lets you -# select which one to load. You can load all the dashboards in the archive by -# setting this to the empty string. -#setup.dashboards.beat: libbeat - -# The name of the Kibana index to use for setting the configuration. Default is ".kibana" -#setup.dashboards.kibana_index: .kibana - -# The Elasticsearch index name. This overwrites the index name defined in the -# dashboards and index pattern. Example: testbeat-* -#setup.dashboards.index: - -#============================== Template ===================================== - -# A template is used to set the mapping in Elasticsearch -# By default template loading is enabled and the template is loaded. -# These settings can be adjusted to load your own template or overwrite existing ones. - -# Set to false to disable template loading. -#setup.template.enabled: true - -# Template name. By default the template name is libbeat. -# The version of the beat will always be appended to the given name -# so the final name is libbeat-%{[beat.version]}. -#setup.template.name: "libbeat" - -# Path to fields.yml file to generate the template -#setup.template.fields: "${path.config}/fields.yml" - -# Overwrite existing template -#setup.template.overwrite: false - - -#================================ HTTP Endpoint ====================================== -# Each beat can expose internal data points through a http endpoint. For security -# reason the endpoint is disabled by default. This feature is currently in beta. - -# Defines if http endpoint is enabled -#http.enabled: false - -# Host to expose the http endpoint to. It is recommended to use only localhost. -#http.host: localhost - -# Port on which the http endpoint is exposed. Default is 5066 -#http.port: 5066 - -#================================ Logging ====================================== -# There are three options for the log output: syslog, file, stderr. -# Under Windows systems, the log files are per default sent to the file output, -# under all other system per default to syslog. - -# Sets log level. The default log level is info. -# Available log levels are: critical, error, warning, info, debug -#logging.level: info - -# Enable debug output for selected components. To enable all selectors use ["*"] -# Other available selectors are "beat", "publish", "service" -# Multiple selectors can be chained. -#logging.selectors: [ ] - -# Send all logging output to syslog. The default is false. -#logging.to_syslog: true - -# If enabled, libbeat periodically logs its internal metrics that have changed -# in the last period. For each metric that changed, the delta from the value at -# the beginning of the period is logged. Also, the total values for -# all non-zero internal metrics are logged on shutdown. The default is true. -#logging.metrics.enabled: true - -# The period after which to log the internal metrics. The default is 30s. -#logging.metrics.period: 30s - -# Logging to rotating files files. Set logging.to_files to false to disable logging to -# files. -logging.to_files: true -logging.files: - # Configure the path where the logs are written. The default is the logs directory - # under the home path (the binary location). - #path: /var/log/libbeat - - # The name of the files where the logs are written to. - #name: libbeat - - # Configure log file size limit. If limit is reached, log file will be - # automatically rotated - #rotateeverybytes: 10485760 # = 10MB - - # Number of rotated log files to keep. Oldest files will be deleted first. - #keepfiles: 7 - diff --git a/libbeat/processors/add_kubernetes_metadata/indexing.go b/libbeat/processors/add_kubernetes_metadata/indexing.go index 06ecce7ae42f..ccb9c35d0982 100644 --- a/libbeat/processors/add_kubernetes_metadata/indexing.go +++ b/libbeat/processors/add_kubernetes_metadata/indexing.go @@ -271,15 +271,19 @@ func NewContainerIndexer(_ common.Config, genMeta GenMeta) (Indexer, error) { func (c *ContainerIndexer) GetMetadata(pod *Pod) []MetadataIndex { commonMeta := c.genMeta.GenerateMetaData(pod) - containers := c.GetIndexes(pod) var metadata []MetadataIndex - for i := 0; i < len(containers); i++ { + for _, status := range append(pod.Status.ContainerStatuses, pod.Status.InitContainerStatuses...) { + cID := containerID(status) + if cID == "" { + continue + } + containerMeta := commonMeta.Clone() containerMeta["container"] = common.MapStr{ - "name": pod.Status.ContainerStatuses[i].Name, + "name": status.Name, } metadata = append(metadata, MetadataIndex{ - Index: containers[i], + Index: cID, Data: containerMeta, }) } @@ -289,18 +293,27 @@ func (c *ContainerIndexer) GetMetadata(pod *Pod) []MetadataIndex { func (c *ContainerIndexer) GetIndexes(pod *Pod) []string { var containers []string - for _, status := range pod.Status.ContainerStatuses { - cID := status.ContainerID - if cID != "" { - parts := strings.Split(cID, "//") - if len(parts) == 2 { - containers = append(containers, parts[1]) - } + for _, status := range append(pod.Status.ContainerStatuses, pod.Status.InitContainerStatuses...) { + cID := containerID(status) + if cID == "" { + continue } + containers = append(containers, cID) } return containers } +func containerID(status PodContainerStatus) string { + cID := status.ContainerID + if cID != "" { + parts := strings.Split(cID, "//") + if len(parts) == 2 { + return parts[1] + } + } + return "" +} + type FieldMatcher struct { MatchFields []string } diff --git a/libbeat/processors/add_kubernetes_metadata/indexing_test.go b/libbeat/processors/add_kubernetes_metadata/indexing_test.go index 9a36fc4868a7..657d8c31e2a5 100644 --- a/libbeat/processors/add_kubernetes_metadata/indexing_test.go +++ b/libbeat/processors/add_kubernetes_metadata/indexing_test.go @@ -60,6 +60,7 @@ func TestContainerIndexer(t *testing.T) { podName := "testpod" ns := "testns" container := "container" + initContainer := "initcontainer" pod := Pod{ Metadata: ObjectMeta{ @@ -70,7 +71,8 @@ func TestContainerIndexer(t *testing.T) { }, }, Status: PodStatus{ - ContainerStatuses: make([]PodContainerStatus, 0), + ContainerStatuses: make([]PodContainerStatus, 0), + InitContainerStatuses: make([]PodContainerStatus, 0), }, } @@ -88,27 +90,38 @@ func TestContainerIndexer(t *testing.T) { }, } - cid := "docker://abcde" - pod.Status.ContainerStatuses = []PodContainerStatus{ { Name: container, - ContainerID: cid, + ContainerID: "docker://abcde", }, } - expected["container"] = common.MapStr{ - "name": container, + pod.Status.InitContainerStatuses = []PodContainerStatus{ + { + Name: initContainer, + ContainerID: "docker://fghij", + }, } indexers = conIndexer.GetMetadata(&pod) - assert.Equal(t, len(indexers), 1) + assert.Equal(t, len(indexers), 2) assert.Equal(t, indexers[0].Index, "abcde") + assert.Equal(t, indexers[1].Index, "fghij") indices = conIndexer.GetIndexes(&pod) - assert.Equal(t, len(indices), 1) + assert.Equal(t, len(indices), 2) assert.Equal(t, indices[0], "abcde") + assert.Equal(t, indices[1], "fghij") + expected["container"] = common.MapStr{ + "name": container, + } assert.Equal(t, expected.String(), indexers[0].Data.String()) + + expected["container"] = common.MapStr{ + "name": initContainer, + } + assert.Equal(t, expected.String(), indexers[1].Data.String()) } func TestFieldMatcher(t *testing.T) { diff --git a/libbeat/processors/add_kubernetes_metadata/podwatcher.go b/libbeat/processors/add_kubernetes_metadata/podwatcher.go index bc25327efe80..2d0ef0e2fca3 100644 --- a/libbeat/processors/add_kubernetes_metadata/podwatcher.go +++ b/libbeat/processors/add_kubernetes_metadata/podwatcher.go @@ -84,7 +84,7 @@ func (p *PodWatcher) watchPods() { if err != nil { //watch pod failures should be logged and gracefully failed over as metadata retrieval //should never stop. - logp.Err("kubernetes: Watching API eror %v", err) + logp.Err("kubernetes: Watching API error %v", err) time.Sleep(time.Second) continue } diff --git a/libbeat/processors/add_kubernetes_metadata/types.go b/libbeat/processors/add_kubernetes_metadata/types.go index d3bec7c7cfab..6337d2d5ea5d 100644 --- a/libbeat/processors/add_kubernetes_metadata/types.go +++ b/libbeat/processors/add_kubernetes_metadata/types.go @@ -82,12 +82,13 @@ type PodContainerStatus struct { } type PodStatus struct { - Conditions []PodStatusCondition `json:"conditions"` - ContainerStatuses []PodContainerStatus `json:"containerStatuses"` - HostIP string `json:"hostIP"` - Phase string `json:"phase"` - PodIP string `json:"podIP"` - StartTime string `json:"startTime"` + Conditions []PodStatusCondition `json:"conditions"` + ContainerStatuses []PodContainerStatus `json:"containerStatuses"` + InitContainerStatuses []PodContainerStatus `json:"initContainerStatuses"` + HostIP string `json:"hostIP"` + Phase string `json:"phase"` + PodIP string `json:"podIP"` + StartTime string `json:"startTime"` } type Pod struct {