From 8f08f159ff01ac4a0ee2c08388bd8de12a254904 Mon Sep 17 00:00:00 2001 From: amandahla Date: Wed, 24 May 2017 17:01:50 -0300 Subject: [PATCH] Add rabbitmq module (#3887) --- CHANGELOG.asciidoc | 1 + metricbeat/docker-compose.yml | 5 + metricbeat/docs/fields.asciidoc | 330 ++++++++++++++++++ metricbeat/docs/modules/rabbitmq.asciidoc | 39 +++ .../docs/modules/rabbitmq/node.asciidoc | 19 + metricbeat/docs/modules_list.asciidoc | 2 + metricbeat/include/list.go | 2 + metricbeat/metricbeat.full.yml | 10 + metricbeat/module/rabbitmq/_meta/Dockerfile | 5 + metricbeat/module/rabbitmq/_meta/config.yml | 8 + .../module/rabbitmq/_meta/docs.asciidoc | 4 + metricbeat/module/rabbitmq/_meta/env | 4 + metricbeat/module/rabbitmq/_meta/fields.yml | 11 + .../_meta/testdata/node_sample_response.json | 312 +++++++++++++++++ metricbeat/module/rabbitmq/doc.go | 4 + .../module/rabbitmq/node/_meta/data.json | 19 + .../module/rabbitmq/node/_meta/docs.asciidoc | 3 + .../module/rabbitmq/node/_meta/fields.yml | 159 +++++++++ metricbeat/module/rabbitmq/node/data.go | 155 ++++++++ metricbeat/module/rabbitmq/node/node.go | 57 +++ .../rabbitmq/node/node_integration_test.go | 60 ++++ metricbeat/module/rabbitmq/node/node_test.go | 139 ++++++++ 22 files changed, 1348 insertions(+) create mode 100644 metricbeat/docs/modules/rabbitmq.asciidoc create mode 100644 metricbeat/docs/modules/rabbitmq/node.asciidoc create mode 100644 metricbeat/module/rabbitmq/_meta/Dockerfile create mode 100644 metricbeat/module/rabbitmq/_meta/config.yml create mode 100644 metricbeat/module/rabbitmq/_meta/docs.asciidoc create mode 100644 metricbeat/module/rabbitmq/_meta/env create mode 100644 metricbeat/module/rabbitmq/_meta/fields.yml create mode 100644 metricbeat/module/rabbitmq/_meta/testdata/node_sample_response.json create mode 100644 metricbeat/module/rabbitmq/doc.go create mode 100644 metricbeat/module/rabbitmq/node/_meta/data.json create mode 100644 metricbeat/module/rabbitmq/node/_meta/docs.asciidoc create mode 100644 metricbeat/module/rabbitmq/node/_meta/fields.yml create mode 100644 metricbeat/module/rabbitmq/node/data.go create mode 100644 metricbeat/module/rabbitmq/node/node.go create mode 100644 metricbeat/module/rabbitmq/node/node_integration_test.go create mode 100644 metricbeat/module/rabbitmq/node/node_test.go diff --git a/CHANGELOG.asciidoc b/CHANGELOG.asciidoc index d8c7cf79cfa6..0bafc9413fd7 100644 --- a/CHANGELOG.asciidoc +++ b/CHANGELOG.asciidoc @@ -75,6 +75,7 @@ https://github.com/elastic/beats/compare/v6.0.0-alpha1...master[Check the HEAD d - Add debug logging to Jolokia JMX metricset. {pull}4341[4341] - Add events metricset for kubernetes metricbeat module {pull}4315[4315] - Change Metricbeat default configuration file to be better optimized for most users. {pull}4329[4329] +- Add experimental RabbitMQ module. {pull}4394[4394] *Packetbeat* diff --git a/metricbeat/docker-compose.yml b/metricbeat/docker-compose.yml index a951792fe9fd..8e316130d508 100644 --- a/metricbeat/docker-compose.yml +++ b/metricbeat/docker-compose.yml @@ -34,6 +34,7 @@ services: - ${PWD}/module/php_fpm/_meta/env - ${PWD}/module/postgresql/_meta/env - ${PWD}/module/prometheus/_meta/env + - ${PWD}/module/rabbitmq/_meta/env - ${PWD}/module/redis/_meta/env - ${PWD}/module/zookeeper/_meta/env @@ -61,6 +62,7 @@ services: phpfpm: { condition: service_healthy } postgresql: { condition: service_healthy } prometheus: { condition: service_healthy } + rabbitmq: { condition: service_healthy } redis: { condition: service_healthy } zookeeper: { condition: service_healthy } @@ -136,6 +138,9 @@ services: prometheus: build: ${PWD}/module/prometheus/_meta + rabbitmq: + build: ${PWD}/module/rabbitmq/_meta + redis: build: ${PWD}/module/redis/_meta diff --git a/metricbeat/docs/fields.asciidoc b/metricbeat/docs/fields.asciidoc index f98563974035..7c924ef3612d 100644 --- a/metricbeat/docs/fields.asciidoc +++ b/metricbeat/docs/fields.asciidoc @@ -37,6 +37,7 @@ grouped in the following categories: * <> * <> * <> +* <> * <> * <> * <> @@ -8867,6 +8868,335 @@ type: long Number of memory chunks that are not yet persisted to disk. +[[exported-fields-rabbitmq]] +== rabbitmq Fields + +[]experimental +rabbitmq Module + + + +[float] +== rabbitmq Fields + + + + +[float] +== node Fields + +node + + + +[float] +=== rabbitmq.node.disk.free.bytes + +type: long + +format: bytes + +Disk free space in bytes. + + +[float] +=== rabbitmq.node.disk.free.limit.bytes + +type: long + +format: bytes + +Point at which the disk alarm will go off. + + +[float] +=== rabbitmq.node.fd.total + +type: long + +File descriptors available. + + +[float] +=== rabbitmq.node.fd.used + +type: long + +Used file descriptors. + + +[float] +=== rabbitmq.node.gc.num.count + +type: long + +Number of GC operations. + + +[float] +=== rabbitmq.node.gc.reclaimed.bytes + +type: long + +format: bytes + +GC bytes reclaimed. + + +[float] +=== rabbitmq.node.io.file_handle.open_attempt.avg.ms + +type: long + +File handle open avg time + + +[float] +=== rabbitmq.node.io.file_handle.open_attempt.count + +type: long + +File handle open attempts + + +[float] +=== rabbitmq.node.io.read.avg.ms + +type: long + +File handle read avg time + + +[float] +=== rabbitmq.node.io.read.bytes + +type: long + +format: bytes + +Data read in bytes + + +[float] +=== rabbitmq.node.io.read.count + +type: long + +Data read operations + + +[float] +=== rabbitmq.node.io.reopen.count + +type: long + +Data reopen operations + + +[float] +=== rabbitmq.node.io.seek.avg.ms + +type: long + +Data seek avg time + + +[float] +=== rabbitmq.node.io.seek.count + +type: long + +Data seek operations + + +[float] +=== rabbitmq.node.io.sync.avg.ms + +type: long + +Data sync avg time + + +[float] +=== rabbitmq.node.io.sync.count + +type: long + +Data sync operations + + +[float] +=== rabbitmq.node.io.write.avg.ms + +type: long + +Data write avg time + + +[float] +=== rabbitmq.node.io.write.bytes + +type: long + +format: bytes + +Data write in bytes + + +[float] +=== rabbitmq.node.io.write.count + +type: long + +Data write operations + + +[float] +=== rabbitmq.node.mem.limit.bytes + +type: long + +format: bytes + +Point at which the memory alarm will go off. + + +[float] +=== rabbitmq.node.mem.used.bytes + +type: long + +Memory used in bytes. + + +[float] +=== rabbitmq.node.mnesia.disk.tx.count + +type: long + +Number of Mnesia transactions which have been performed that required writes to disk. + + +[float] +=== rabbitmq.node.mnesia.ram.tx.count + +type: long + +Number of Mnesia transactions which have been performed that did not require writes to disk. + + +[float] +=== rabbitmq.node.msg.store_read.count + +type: long + +Number of messages which have been read from the message store. + + +[float] +=== rabbitmq.node.msg.store_write.count + +type: long + +Number of messages which have been written to the message store. + + +[float] +=== rabbitmq.node.name + +type: keyword + +Node name + + +[float] +=== rabbitmq.node.proc.total + +type: long + +Maximum number of Erlang processes. + + +[float] +=== rabbitmq.node.proc.used + +type: long + +Number of Erlang processes in use. + + +[float] +=== rabbitmq.node.processors + +type: long + +Number of cores detected and usable by Erlang. + + +[float] +=== rabbitmq.node.queue.index.journal_write.count + +type: long + +Number of records written to the queue index journal. + + +[float] +=== rabbitmq.node.queue.index.read.count + +type: long + +Number of records read from the queue index. + + +[float] +=== rabbitmq.node.queue.index.write.count + +type: long + +Number of records written to the queue index. + + +[float] +=== rabbitmq.node.run.queue + +type: long + +Average number of Erlang processes waiting to run. + + +[float] +=== rabbitmq.node.socket.total + +type: long + +File descriptors available for use as sockets. + + +[float] +=== rabbitmq.node.socket.used + +type: long + +File descriptors used as sockets. + + +[float] +=== rabbitmq.node.type + +type: keyword + +Node type. + + +[float] +=== rabbitmq.node.uptime + +type: long + +Node uptime. + + [[exported-fields-redis]] == Redis Fields diff --git a/metricbeat/docs/modules/rabbitmq.asciidoc b/metricbeat/docs/modules/rabbitmq.asciidoc new file mode 100644 index 000000000000..da2db7b84283 --- /dev/null +++ b/metricbeat/docs/modules/rabbitmq.asciidoc @@ -0,0 +1,39 @@ +//// +This file is generated! See scripts/docs_collector.py +//// + +[[metricbeat-module-rabbitmq]] +== rabbitmq Module + +This is the rabbitmq Module. It uses http://www.rabbitmq.com/management.html [HTTP API] created by the management plugin to collect metrics. + + + +[float] +=== Example Configuration + +The rabbitmq module supports the standard configuration options that are described +in <>. Here is an example configuration: + +[source,yaml] +---- +metricbeat.modules: +- module: rabbitmq + metricsets: ["node"] + enabled: false + period: 10s + hosts: ["localhost:15672"] + + username: guest + password: guest +---- + +[float] +=== Metricsets + +The following metricsets are available: + +* <> + +include::rabbitmq/node.asciidoc[] + diff --git a/metricbeat/docs/modules/rabbitmq/node.asciidoc b/metricbeat/docs/modules/rabbitmq/node.asciidoc new file mode 100644 index 000000000000..d4625767298c --- /dev/null +++ b/metricbeat/docs/modules/rabbitmq/node.asciidoc @@ -0,0 +1,19 @@ +//// +This file is generated! See scripts/docs_collector.py +//// + +[[metricbeat-metricset-rabbitmq-node]] +include::../../../module/rabbitmq/node/_meta/docs.asciidoc[] + + +==== Fields + +For a description of each field in the metricset, see the +<> section. + +Here is an example document generated by this metricset: + +[source,json] +---- +include::../../../module/rabbitmq/node/_meta/data.json[] +---- diff --git a/metricbeat/docs/modules_list.asciidoc b/metricbeat/docs/modules_list.asciidoc index 884d63f99138..a9dbabd5ae2b 100644 --- a/metricbeat/docs/modules_list.asciidoc +++ b/metricbeat/docs/modules_list.asciidoc @@ -23,6 +23,7 @@ This file is generated! See scripts/docs_collector.py * <> * <> * <> + * <> * <> * <> * <> @@ -53,6 +54,7 @@ include::modules/nginx.asciidoc[] include::modules/php_fpm.asciidoc[] include::modules/postgresql.asciidoc[] include::modules/prometheus.asciidoc[] +include::modules/rabbitmq.asciidoc[] include::modules/redis.asciidoc[] include::modules/system.asciidoc[] include::modules/vsphere.asciidoc[] diff --git a/metricbeat/include/list.go b/metricbeat/include/list.go index 19496aa5648b..9795c299b0cf 100644 --- a/metricbeat/include/list.go +++ b/metricbeat/include/list.go @@ -81,6 +81,8 @@ import ( _ "github.com/elastic/beats/metricbeat/module/prometheus" _ "github.com/elastic/beats/metricbeat/module/prometheus/collector" _ "github.com/elastic/beats/metricbeat/module/prometheus/stats" + _ "github.com/elastic/beats/metricbeat/module/rabbitmq" + _ "github.com/elastic/beats/metricbeat/module/rabbitmq/node" _ "github.com/elastic/beats/metricbeat/module/redis" _ "github.com/elastic/beats/metricbeat/module/redis/info" _ "github.com/elastic/beats/metricbeat/module/redis/keyspace" diff --git a/metricbeat/metricbeat.full.yml b/metricbeat/metricbeat.full.yml index ca0c4250d51f..0e8570542c8a 100644 --- a/metricbeat/metricbeat.full.yml +++ b/metricbeat/metricbeat.full.yml @@ -385,6 +385,16 @@ metricbeat.modules: metrics_path: /metrics #namespace: example +#------------------------------ rabbitmq Module ------------------------------ +- module: rabbitmq + metricsets: ["node"] + enabled: false + period: 10s + hosts: ["localhost:15672"] + + username: guest + password: guest + #-------------------------------- Redis Module ------------------------------- - module: redis metricsets: ["info", "keyspace"] diff --git a/metricbeat/module/rabbitmq/_meta/Dockerfile b/metricbeat/module/rabbitmq/_meta/Dockerfile new file mode 100644 index 000000000000..1b680a7b8d6a --- /dev/null +++ b/metricbeat/module/rabbitmq/_meta/Dockerfile @@ -0,0 +1,5 @@ +FROM rabbitmq:3-management + +RUN apt-get update && apt-get install -y netcat && apt-get clean +HEALTHCHECK CMD nc -w 1 -v 127.0.0.1 15672 + []experimental + + rabbitmq Module + fields: + - name: rabbitmq + type: group + description: > + fields: diff --git a/metricbeat/module/rabbitmq/_meta/testdata/node_sample_response.json b/metricbeat/module/rabbitmq/_meta/testdata/node_sample_response.json new file mode 100644 index 000000000000..dd3f73eb6b38 --- /dev/null +++ b/metricbeat/module/rabbitmq/_meta/testdata/node_sample_response.json @@ -0,0 +1,312 @@ +[ + { + "applications": [ + { + "description": "RabbitMQ AMQP Client", + "name": "amqp_client", + "version": "3.6.9" + }, + { + "description": "The Erlang ASN1 compiler version 4.0.4", + "name": "asn1", + "version": "4.0.4" + }, + { + "description": "ERTS CXC 138 10", + "name": "compiler", + "version": "7.0.4" + }, + { + "description": "Small, fast, modular HTTP server.", + "name": "cowboy", + "version": "1.0.4" + }, + { + "description": "Support library for manipulating Web protocols.", + "name": "cowlib", + "version": "1.0.2" + }, + { + "description": "CRYPTO", + "name": "crypto", + "version": "3.7.3" + }, + { + "description": "INETS CXC 138 49", + "name": "inets", + "version": "6.3.6" + }, + { + "description": "ERTS CXC 138 10", + "name": "kernel", + "version": "5.2" + }, + { + "description": "MNESIA CXC 138 12", + "name": "mnesia", + "version": "4.14.3" + }, + { + "description": "CPO CXC 138 46", + "name": "os_mon", + "version": "2.4.2" + }, + { + "description": "Public key infrastructure", + "name": "public_key", + "version": "1.4" + }, + { + "description": "RabbitMQ", + "name": "rabbit", + "version": "3.6.9" + }, + { + "description": "Modules shared by rabbitmq-server and rabbitmq-erlang-client", + "name": "rabbit_common", + "version": "3.6.9" + }, + { + "description": "RabbitMQ Management Console", + "name": "rabbitmq_management", + "version": "3.6.9" + }, + { + "description": "RabbitMQ Management Agent", + "name": "rabbitmq_management_agent", + "version": "3.6.9" + }, + { + "description": "RabbitMQ Web Dispatcher", + "name": "rabbitmq_web_dispatch", + "version": "3.6.9" + }, + { + "description": "Socket acceptor pool for TCP protocols.", + "name": "ranch", + "version": "1.3.0" + }, + { + "description": "SASL CXC 138 11", + "name": "sasl", + "version": "3.0.3" + }, + { + "description": "Erlang/OTP SSL application", + "name": "ssl", + "version": "8.1.1" + }, + { + "description": "ERTS CXC 138 10", + "name": "stdlib", + "version": "3.3" + }, + { + "description": "Syntax tools", + "name": "syntax_tools", + "version": "2.1.1" + }, + { + "description": "XML parser", + "name": "xmerl", + "version": "1.3.13" + } + ], + "auth_mechanisms": [ + { + "description": "SASL PLAIN authentication mechanism", + "enabled": true, + "name": "PLAIN" + }, + { + "description": "RabbitMQ Demo challenge-response authentication mechanism", + "enabled": false, + "name": "RABBIT-CR-DEMO" + }, + { + "description": "QPid AMQPLAIN mechanism", + "enabled": true, + "name": "AMQPLAIN" + } + ], + "cluster_links": [], + "config_files": [ + "/etc/rabbitmq/rabbitmq.config" + ], + "context_switches": 75445, + "context_switches_details": { + "rate": 37.8 + }, + "contexts": [ + { + "description": "RabbitMQ Management", + "path": "/", + "port": "15672", + "ssl": "false" + } + ], + "db_dir": "/var/lib/rabbitmq/mnesia/rabbit@prcdsrvv1682", + "disk_free": 313352192, + "disk_free_alarm": false, + "disk_free_details": { + "rate": 929792.0 + }, + "disk_free_limit": 50000000, + "enabled_plugins": [ + "rabbitmq_management" + ], + "exchange_types": [ + { + "description": "AMQP direct exchange, as per the AMQP specification", + "enabled": true, + "name": "direct" + }, + { + "description": "AMQP topic exchange, as per the AMQP specification", + "enabled": true, + "name": "topic" + }, + { + "description": "AMQP fanout exchange, as per the AMQP specification", + "enabled": true, + "name": "fanout" + }, + { + "description": "AMQP headers exchange, as per the AMQP specification", + "enabled": true, + "name": "headers" + } + ], + "fd_total": 65536, + "fd_used": 54, + "fd_used_details": { + "rate": 0.0 + }, + "gc_bytes_reclaimed": 270119840, + "gc_bytes_reclaimed_details": { + "rate": 85476.8 + }, + "gc_num": 3184, + "gc_num_details": { + "rate": 9.0 + }, + "io_file_handle_open_attempt_avg_time": 0.1362, + "io_file_handle_open_attempt_avg_time_details": { + "rate": 0.0 + }, + "io_file_handle_open_attempt_count": 10, + "io_file_handle_open_attempt_count_details": { + "rate": 0.0 + }, + "io_read_avg_time": 33.969, + "io_read_avg_time_details": { + "rate": 0.0 + }, + "io_read_bytes": 1, + "io_read_bytes_details": { + "rate": 0.0 + }, + "io_read_count": 1, + "io_read_count_details": { + "rate": 0.0 + }, + "io_reopen_count": 0, + "io_reopen_count_details": { + "rate": 0.0 + }, + "io_seek_avg_time": 0.0, + "io_seek_avg_time_details": { + "rate": 0.0 + }, + "io_seek_count": 0, + "io_seek_count_details": { + "rate": 0.0 + }, + "io_sync_avg_time": 0.0, + "io_sync_avg_time_details": { + "rate": 0.0 + }, + "io_sync_count": 0, + "io_sync_count_details": { + "rate": 0.0 + }, + "io_write_avg_time": 0.0, + "io_write_avg_time_details": { + "rate": 0.0 + }, + "io_write_bytes": 0, + "io_write_bytes_details": { + "rate": 0.0 + }, + "io_write_count": 0, + "io_write_count_details": { + "rate": 0.0 + }, + "log_file": "tty", + "mem_alarm": false, + "mem_limit": 413047193, + "mem_used": 57260080, + "mem_used_details": { + "rate": -1910.4 + }, + "metrics_gc_queue_length": { + "channel_closed": 0, + "channel_consumer_deleted": 0, + "connection_closed": 0, + "consumer_deleted": 0, + "exchange_deleted": 0, + "node_node_deleted": 0, + "queue_deleted": 0, + "vhost_deleted": 0 + }, + "mnesia_disk_tx_count": 0, + "mnesia_disk_tx_count_details": { + "rate": 0.0 + }, + "mnesia_ram_tx_count": 11, + "mnesia_ram_tx_count_details": { + "rate": 0.0 + }, + "msg_store_read_count": 0, + "msg_store_read_count_details": { + "rate": 0.0 + }, + "msg_store_write_count": 0, + "msg_store_write_count_details": { + "rate": 0.0 + }, + "name": "rabbit@prcdsrvv1682", + "net_ticktime": 60, + "os_pid": "114", + "partitions": [], + "proc_total": 1048576, + "proc_used": 322, + "proc_used_details": { + "rate": 0.0 + }, + "processors": 2, + "queue_index_journal_write_count": 0, + "queue_index_journal_write_count_details": { + "rate": 0.0 + }, + "queue_index_read_count": 0, + "queue_index_read_count_details": { + "rate": 0.0 + }, + "queue_index_write_count": 0, + "queue_index_write_count_details": { + "rate": 0.0 + }, + "rates_mode": "basic", + "run_queue": 0, + "running": true, + "sasl_log_file": "tty", + "sockets_total": 58890, + "sockets_used": 0, + "sockets_used_details": { + "rate": 0.0 + }, + "type": "disc", + "uptime": 37139 + } +] diff --git a/metricbeat/module/rabbitmq/doc.go b/metricbeat/module/rabbitmq/doc.go new file mode 100644 index 000000000000..c86d5c59f96c --- /dev/null +++ b/metricbeat/module/rabbitmq/doc.go @@ -0,0 +1,4 @@ +/* +Package rabbitmq is a Metricbeat module that contains MetricSets. +*/ +package rabbitmq diff --git a/metricbeat/module/rabbitmq/node/_meta/data.json b/metricbeat/module/rabbitmq/node/_meta/data.json new file mode 100644 index 000000000000..24702fec3a23 --- /dev/null +++ b/metricbeat/module/rabbitmq/node/_meta/data.json @@ -0,0 +1,19 @@ +{ + "@timestamp":"2016-05-23T08:05:34.853Z", + "beat":{ + "hostname":"beathost", + "name":"beathost" + }, + "metricset":{ + "host":"localhost", + "module":"rabbitmq", + "name":"node", + "rtt":44269 + }, + "rabbitmq":{ + "node":{ + "example": "node" + } + }, + "type":"metricsets" +} diff --git a/metricbeat/module/rabbitmq/node/_meta/docs.asciidoc b/metricbeat/module/rabbitmq/node/_meta/docs.asciidoc new file mode 100644 index 000000000000..b6e1d80a53c9 --- /dev/null +++ b/metricbeat/module/rabbitmq/node/_meta/docs.asciidoc @@ -0,0 +1,3 @@ +=== rabbitmq node MetricSet + +This is the node metricset of the module rabbitmq. diff --git a/metricbeat/module/rabbitmq/node/_meta/fields.yml b/metricbeat/module/rabbitmq/node/_meta/fields.yml new file mode 100644 index 000000000000..8243c2f6474a --- /dev/null +++ b/metricbeat/module/rabbitmq/node/_meta/fields.yml @@ -0,0 +1,159 @@ +- name: node + type: group + description: > + node + fields: + - name: disk.free.bytes + type: long + description: > + Disk free space in bytes. + format: bytes + - name: disk.free.limit.bytes + type: long + description: > + Point at which the disk alarm will go off. + format: bytes + - name: fd.total + type: long + description: > + File descriptors available. + - name: fd.used + type: long + description: > + Used file descriptors. + - name: gc.num.count + type: long + description: > + Number of GC operations. + - name: gc.reclaimed.bytes + type: long + description: > + GC bytes reclaimed. + format: bytes + - name: io.file_handle.open_attempt.avg.ms + type: long + description: > + File handle open avg time + - name: io.file_handle.open_attempt.count + type: long + description: > + File handle open attempts + - name: io.read.avg.ms + type: long + description: > + File handle read avg time + - name: io.read.bytes + type: long + description: > + Data read in bytes + format: bytes + - name: io.read.count + type: long + description: > + Data read operations + - name: io.reopen.count + type: long + description: > + Data reopen operations + - name: io.seek.avg.ms + type: long + description: > + Data seek avg time + - name: io.seek.count + type: long + description: > + Data seek operations + - name: io.sync.avg.ms + type: long + description: > + Data sync avg time + - name: io.sync.count + type: long + description: > + Data sync operations + - name: io.write.avg.ms + type: long + description: > + Data write avg time + - name: io.write.bytes + type: long + description: > + Data write in bytes + format: bytes + - name: io.write.count + type: long + description: > + Data write operations + - name: mem.limit.bytes + type: long + description: > + Point at which the memory alarm will go off. + format: bytes + - name: mem.used.bytes + type: long + description: > + Memory used in bytes. + - name: mnesia.disk.tx.count + type: long + description: > + Number of Mnesia transactions which have been performed that required writes to disk. + - name: mnesia.ram.tx.count + type: long + description: > + Number of Mnesia transactions which have been performed that did not require writes to disk. + - name: msg.store_read.count + type: long + description: > + Number of messages which have been read from the message store. + - name: msg.store_write.count + type: long + description: > + Number of messages which have been written to the message store. + - name: name + type: keyword + description: > + Node name + - name: proc.total + type: long + description: > + Maximum number of Erlang processes. + - name: proc.used + type: long + description: > + Number of Erlang processes in use. + - name: processors + type: long + description: > + Number of cores detected and usable by Erlang. + - name: queue.index.journal_write.count + type: long + description: > + Number of records written to the queue index journal. + - name: queue.index.read.count + type: long + description: > + Number of records read from the queue index. + - name: queue.index.write.count + type: long + description: > + Number of records written to the queue index. + - name: run.queue + type: long + description: > + Average number of Erlang processes waiting to run. + - name: socket.total + type: long + description: > + File descriptors available for use as sockets. + - name: socket.used + type: long + description: > + File descriptors used as sockets. + - name: type + type: keyword + description: > + Node type. + - name: uptime + type: long + description: > + Node uptime. diff --git a/metricbeat/module/rabbitmq/node/data.go b/metricbeat/module/rabbitmq/node/data.go new file mode 100644 index 000000000000..25ef77558795 --- /dev/null +++ b/metricbeat/module/rabbitmq/node/data.go @@ -0,0 +1,155 @@ +package node + +import ( + "encoding/json" + + "github.com/elastic/beats/libbeat/common" + "github.com/elastic/beats/libbeat/logp" + s "github.com/elastic/beats/metricbeat/schema" + c "github.com/elastic/beats/metricbeat/schema/mapstriface" +) + +var ( + schema = s.Schema{ + "disk": s.Object{ + "free": s.Object{ + "bytes": c.Int("disk_free"), + "limit": s.Object{ + "bytes": c.Int("disk_free_limit"), + }, + }, + }, + "fd": s.Object{ + "total": c.Int("fd_total"), + "used": c.Int("fd_used"), + }, + "gc": s.Object{ + "reclaimed": s.Object{ + "bytes": c.Int("gc_bytes_reclaimed"), + }, + "num": s.Object{ + "count": c.Int("gc_num"), + }, + }, + "io": s.Object{ + "file_handle": s.Object{ + "open_attempt": s.Object{ + "avg": s.Object{ + "ms": c.Int("io_file_handle_open_attempt_avg_time"), + }, + "count": c.Int("io_file_handle_open_attempt_count"), + }, + }, + "read": s.Object{ + "avg": s.Object{ + "ms": c.Int("io_read_avg_time"), + }, + "bytes": c.Int("io_read_bytes"), + "count": c.Int("io_read_count"), + }, + "reopen": s.Object{ + "count": c.Int("io_read_count"), + }, + "seek": s.Object{ + "avg": s.Object{ + "ms": c.Int("io_seek_avg_time"), + }, + "count": c.Int("io_seek_count"), + }, + "sync": s.Object{ + "avg": s.Object{ + "ms": c.Int("io_sync_avg_time"), + }, + "count": c.Int("io_sync_count"), + }, + "write": s.Object{ + "avg": s.Object{ + "ms": c.Int("io_write_avg_time"), + }, + "bytes": c.Int("io_write_bytes"), + "count": c.Int("io_write_count"), + }, + }, + "mem": s.Object{ + "limit": s.Object{ + "bytes": c.Int("mem_limit"), + }, + "used": s.Object{ + "bytes": c.Int("mem_used"), + }, + }, + "mnesia": s.Object{ + "disk": s.Object{ + "tx": s.Object{ + "count": c.Int("mnesia_disk_tx_count"), + }, + }, + "ram": s.Object{ + "tx": s.Object{ + "count": c.Int("mnesia_ram_tx_count"), + }, + }, + }, + "msg": s.Object{ + "store_read": s.Object{ + "count": c.Int("msg_store_read_count"), + }, + "store_write": s.Object{ + "count": c.Int("msg_store_write_count"), + }, + }, + "name": c.Str("name"), + "proc": s.Object{ + "total": c.Int("proc_total"), + "used": c.Int("proc_used"), + }, + "processors": c.Int("processors"), + "queue": s.Object{ + "index": s.Object{ + "journal_write": s.Object{ + "count": c.Int("queue_index_journal_write_count"), + }, + "read": s.Object{ + "count": c.Int("queue_index_read_count"), + }, + "write": s.Object{ + "count": c.Int("queue_index_write_count"), + }, + }, + }, + "run": s.Object{ + "queue": c.Int("run_queue"), + }, + "socket": s.Object{ + "total": c.Int("sockets_total"), + "used": c.Int("sockets_used"), + }, + "type": c.Str("type"), + "uptime": c.Int("uptime"), + } +) + +func eventsMapping(content []byte) ([]common.MapStr, error) { + + var nodes []map[string]interface{} + err := json.Unmarshal(content, &nodes) + if err != nil { + logp.Err("Error: ", err) + } + + events := []common.MapStr{} + errors := s.NewErrors() + + for _, node := range nodes { + event, errs := eventMapping(node) + events = append(events, event) + errors.AddErrors(errs) + + } + + return events, errors +} + +func eventMapping(node map[string]interface{}) (common.MapStr, *s.Errors) { + return schema.Apply(node) +} diff --git a/metricbeat/module/rabbitmq/node/node.go b/metricbeat/module/rabbitmq/node/node.go new file mode 100644 index 000000000000..0fb99cbda285 --- /dev/null +++ b/metricbeat/module/rabbitmq/node/node.go @@ -0,0 +1,57 @@ +package node + +import ( + "github.com/elastic/beats/libbeat/common" + "github.com/elastic/beats/libbeat/logp" + "github.com/elastic/beats/metricbeat/helper" + "github.com/elastic/beats/metricbeat/mb" + "github.com/elastic/beats/metricbeat/mb/parse" +) + +const ( + defaultScheme = "http" + defaultPath = "/api/nodes" +) + +var ( + hostParser = parse.URLHostParserBuilder{ + DefaultScheme: defaultScheme, + DefaultPath: defaultPath, + }.Build() +) + +func init() { + if err := mb.Registry.AddMetricSet("rabbitmq", "node", New, hostParser); err != nil { + panic(err) + } +} + +type MetricSet struct { + mb.BaseMetricSet + *helper.HTTP +} + +func New(base mb.BaseMetricSet) (mb.MetricSet, error) { + + logp.Experimental("The rabbitmq node metricset is experimental") + + http := helper.NewHTTP(base) + http.SetHeader("Accept", "application/json") + + return &MetricSet{ + base, + http, + }, nil +} + +func (m *MetricSet) Fetch() ([]common.MapStr, error) { + + content, err := m.HTTP.FetchContent() + + if err != nil { + return nil, err + } + + events, _ := eventsMapping(content) + return events, nil +} diff --git a/metricbeat/module/rabbitmq/node/node_integration_test.go b/metricbeat/module/rabbitmq/node/node_integration_test.go new file mode 100644 index 000000000000..b89cbde9ff97 --- /dev/null +++ b/metricbeat/module/rabbitmq/node/node_integration_test.go @@ -0,0 +1,60 @@ +package node + +import ( + "fmt" + "os" + "testing" + + mbtest "github.com/elastic/beats/metricbeat/mb/testing" +) + +func TestData(t *testing.T) { + f := mbtest.NewEventsFetcher(t, getConfig()) + err := mbtest.WriteEvents(f, t) + if err != nil { + t.Fatal("write", err) + } +} + +func getConfig() map[string]interface{} { + return map[string]interface{}{ + "module": "rabbitmq", + "metricsets": []string{"node"}, + "hosts": getTestRabbitMQHost(), + "username": getTestRabbitMQUsername(), + "password": getTestRabbitMQPassword(), + } +} + +const ( + rabbitmqDefaultHost = "localhost" + rabbitmqDefaultPort = "15672" + rabbitmqDefaultUsername = "guest" + rabbitmqDefaultPassword = "guest" +) + +func getTestRabbitMQHost() string { + return fmt.Sprintf("%v:%v", + getenv("RABBITMQ_HOST", rabbitmqDefaultHost), + getenv("RABBITMQ_PORT", rabbitmqDefaultPort), + ) +} + +func getTestRabbitMQUsername() string { + return getenv("RABBITMQ_USERNAME", rabbitmqDefaultUsername) +} + +func getTestRabbitMQPassword() string { + return getenv("RABBITMQ_PASSWORD", rabbitmqDefaultPassword) +} + +func getenv(name, defaultValue string) string { + return strDefault(os.Getenv(name), defaultValue) +} + +func strDefault(a, defaults string) string { + if len(a) == 0 { + return defaults + } + return a +} diff --git a/metricbeat/module/rabbitmq/node/node_test.go b/metricbeat/module/rabbitmq/node/node_test.go new file mode 100644 index 000000000000..c0da0ccbfc8a --- /dev/null +++ b/metricbeat/module/rabbitmq/node/node_test.go @@ -0,0 +1,139 @@ +package node + +import ( + "io/ioutil" + "net/http" + "net/http/httptest" + "path/filepath" + "testing" + + "github.com/elastic/beats/libbeat/common" + mbtest "github.com/elastic/beats/metricbeat/mb/testing" + + "github.com/stretchr/testify/assert" +) + +func TestFetchEventContents(t *testing.T) { + absPath, err := filepath.Abs("../_meta/testdata/") + + response, err := ioutil.ReadFile(absPath + "/node_sample_response.json") + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(200) + w.Header().Set("Content-Type", "application/json;") + w.Write([]byte(response)) + })) + defer server.Close() + + config := map[string]interface{}{ + "module": "rabbitmq", + "metricsets": []string{"node"}, + "hosts": []string{server.URL}, + } + + f := mbtest.NewEventsFetcher(t, config) + events, err := f.Fetch() + event := events[0] + if !assert.NoError(t, err) { + t.FailNow() + } + + t.Logf("%s/%s event: %+v", f.Module().Name(), f.Name(), event.StringToPrint()) + + disk := event["disk"].(common.MapStr) + free := disk["free"].(common.MapStr) + assert.EqualValues(t, 313352192, free["bytes"]) + + limit := free["limit"].(common.MapStr) + assert.EqualValues(t, 50000000, limit["bytes"]) + + fd := event["fd"].(common.MapStr) + assert.EqualValues(t, 65536, fd["total"]) + assert.EqualValues(t, 54, fd["used"]) + + gc := event["gc"].(common.MapStr) + num := gc["num"].(common.MapStr) + assert.EqualValues(t, 3184, num["count"]) + reclaimed := gc["reclaimed"].(common.MapStr) + assert.EqualValues(t, 270119840, reclaimed["bytes"]) + + io := event["io"].(common.MapStr) + file_handle := io["file_handle"].(common.MapStr) + open_attempt := file_handle["open_attempt"].(common.MapStr) + avg := open_attempt["avg"].(common.MapStr) + assert.EqualValues(t, 0, avg["ms"]) + assert.EqualValues(t, 10, open_attempt["count"]) + + read := io["read"].(common.MapStr) + avg = read["avg"].(common.MapStr) + assert.EqualValues(t, 33, avg["ms"]) + assert.EqualValues(t, 1, read["bytes"]) + assert.EqualValues(t, 1, read["count"]) + + reopen := io["reopen"].(common.MapStr) + assert.EqualValues(t, 1, reopen["count"]) + + seek := io["seek"].(common.MapStr) + avg = seek["avg"].(common.MapStr) + assert.EqualValues(t, 0, avg["ms"]) + assert.EqualValues(t, 0, seek["count"]) + + sync := io["sync"].(common.MapStr) + avg = sync["avg"].(common.MapStr) + assert.EqualValues(t, 0, avg["ms"]) + assert.EqualValues(t, 0, sync["count"]) + + write := io["write"].(common.MapStr) + avg = write["avg"].(common.MapStr) + assert.EqualValues(t, 0, avg["ms"]) + assert.EqualValues(t, 0, write["bytes"]) + assert.EqualValues(t, 0, write["count"]) + + mem := event["mem"].(common.MapStr) + limit = mem["limit"].(common.MapStr) + assert.EqualValues(t, 413047193, limit["bytes"]) + used := mem["used"].(common.MapStr) + assert.EqualValues(t, 57260080, used["bytes"]) + + mnesia := event["mnesia"].(common.MapStr) + disk = mnesia["disk"].(common.MapStr) + tx := disk["tx"].(common.MapStr) + assert.EqualValues(t, 0, tx["count"]) + ram := mnesia["ram"].(common.MapStr) + tx = ram["tx"].(common.MapStr) + assert.EqualValues(t, 11, tx["count"]) + + msg := event["msg"].(common.MapStr) + store_read := msg["store_read"].(common.MapStr) + assert.EqualValues(t, 0, store_read["count"]) + store_write := msg["store_write"].(common.MapStr) + assert.EqualValues(t, 0, store_write["count"]) + + assert.EqualValues(t, "rabbit@prcdsrvv1682", event["name"]) + + proc := event["proc"].(common.MapStr) + assert.EqualValues(t, 1048576, proc["total"]) + assert.EqualValues(t, 322, proc["used"]) + + assert.EqualValues(t, 2, event["processors"]) + + queue := event["queue"].(common.MapStr) + index := queue["index"].(common.MapStr) + journal_write := index["journal_write"].(common.MapStr) + assert.EqualValues(t, 0, journal_write["count"]) + read = index["read"].(common.MapStr) + assert.EqualValues(t, 0, read["count"]) + write = index["write"].(common.MapStr) + assert.EqualValues(t, 0, write["count"]) + + run := event["run"].(common.MapStr) + assert.EqualValues(t, 0, run["queue"]) + + socket := event["socket"].(common.MapStr) + assert.EqualValues(t, 58890, socket["total"]) + assert.EqualValues(t, 0, socket["used"]) + + assert.EqualValues(t, "disc", event["type"]) + + assert.EqualValues(t, 37139, event["uptime"]) + +}