-
Notifications
You must be signed in to change notification settings - Fork 26
/
Copy pathkafka-win-config.yml.sample
213 lines (198 loc) · 11 KB
/
kafka-win-config.yml.sample
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
integrations:
# Example kafka-inventory. This gives an example of collecting inventory with the integration
- name: nri-kafka
env:
NR_JMX: "C:\\Program Files\\New Relic\\nrjmx\\nrjmx.bat"
INVENTORY: "true"
CLUSTER_NAME: "testcluster2"
ZOOKEEPER_HOSTS: '[{"host": "localhost", "port": 2181}]'
# The only supported value is digest. If empty, no authentication is used.
# ZOOKEEPER_AUTH_SCHEME: "digest"
# Only honored if ZOOKEEPER_AUTH_SCHEME is not empty.
# If using "user" authentication, the credentials must be specified as a string of the form "<user>:<password>"
# Example: 'zookeeperuser:zookeeperpass'
# ZOOKEEPER_AUTH_SECRET: "username:password"
# Below are the fields used to fine tune/toggle topic inventory collection.
# In order to collect topics the "topic_mode" field must be set to "all", "list", or "regex"
TOPIC_MODE: 'all'
interval: 15s
labels:
env: production
role: kafka
inventory_source: config/kafka
# Example kafka-consumer-offsets. This gives an example configuration for collecting consumer offsets for the cluster
- name: nri-kafka
env:
NR_JMX: "C:\\Program Files\\New Relic\\nrjmx\\nrjmx.bat"
METRICS: "false"
INVENTORY: "false"
CONSUMER_OFFSET: "true"
CLUSTER_NAME: "testcluster3"
AUTODISCOVER_STRATEGY: "bootstrap"
BOOTSTRAP_BROKER_HOST: "localhost"
BOOTSTRAP_BROKER_KAFKA_PORT: 9092
BOOTSTRAP_BROKER_KAFKA_PROTOCOL: PLAINTEXT
# A regex pattern that matches the consumer groups to collect metrics from
CONSUMER_GROUP_REGEX: '.*'
interval: 15s
labels:
env: production
role: kafka
inventory_source: config/kafka
# Example kafka-metrics-zookeeper-discovery. This gives an example of autodiscovery of brokers with zookeeper
- name: nri-kafka
env:
NR_JMX: "C:\\Program Files\\New Relic\\nrjmx\\nrjmx.bat"
METRICS: "true"
# A cluster name is required to uniquely identify this collection result in Insights
CLUSTER_NAME: "testcluster1"
# Override the kafka API version to target. Defaults to 1.0.0, which will work for all post-1.0.0 versions. Older versions of the API may be missing features.
KAFKA_VERSION: "1.0.0"
# How to find brokers. Either "bootstrap" or "zookeeper"
AUTODISCOVER_STRATEGY: "zookeeper"
# A list of zookeeper hosts to discover brokers from.
# Only required and used if `autodiscover_mechanism` is "zookeeper"
#
# The "zookeeper_hosts" field is a JSON array, each entry in the array connection information for a Zookeeper node.
# Each entry should have the following fields:
# - host: The IP or Hostname of a Zookeeper node, if the New Relic agent is installed on a Zookeeper node "localhost" is an acceptable value
# - port: The port Zookeeper is listening on for incoming requests. If omitted, a default port of 2181 will be used.
ZOOKEEPER_HOSTS: '[{"host": "localhost", "port": 2181}]'
# The only supported value is digest. If empty, no authentication is used.
# ZOOKEEPER_AUTH_SCHEME: "digest"
# Only honored if ZOOKEEPER_AUTH_SCHEME is not empty.
# If using "user" authentication, the credentials must be specified as a string of the form "<user>:<password>"
# Example: 'zookeeperuser:zookeeperpass'
# ZOOKEEPER_AUTH_SECRET: "username:password"
# If the Kafka configuration files are not in the root node of Zookeeper, an alternative root node can be specified.
# The alternative root must have a leading slash.
ZOOKEEPER_PATH: "/kafka-root"
# It is common to use the same JMX configuration across a Kafka cluster
# The default username and password are the credentials that will be used to make
# a JMX connection to each broker found by Zookeeper. Theses values will also
# be used when connecting to a consumer and/or producer if the "username" or "password"
# field are omitted.
DEFAULT_JMX_USER: "username"
DEFAULT_JMX_PASSWORD: "password"
# Below are the fields used to fine tune/toggle topic metric collection.
# In order to collect topics the "topic_mode" field must be set to "all" or "list". If the field is set to "all"
# a Zookeeper connection is required, at least the "zookeeper_hosts" field is required, as topics are looked up via Zookeeper.
#
# It is recommended to use the "List" option to monitor a specific set of topics. If using "List" mode the "topic_list"
# field should be filled out. The "topic_list" is a JSON array of topic names to be monitored.
# Example of topic_list: '["topic1", "topic2"]'
#
# If monitoring topics via the "all" or "list" option for "topic_mode", the topic size can be collected from zookeeper by setting
"collect_topic_size" to true. This operation is intensive and can take a while to collect for a larger number of topics.
# It is recommended to only enable this feature if using a small "topic_list".
# If the field is omitted it will default to false.
TOPIC_MODE: "regex"
# topic_list: `["topic1", "topic2", "topic3"]`
TOPIC_REGEX: 'topic\d+'
# collect_topic_size collects the on-disk size for the topics collected. This can be very time intensive for large clusters,
# so it is disabled by default
COLLECT_TOPIC_SIZE: false
# topic_bucket is used to split topic metric collection across multiple instances. This is useful when the number of topics you want to collect
# is too large for a single collection, and are not easily partitionable with regex. It works by hashing the topic name, then using it to split
# the topics across a number of buckets. The first number is the index of the current instance, the second is the total number of instances the
# topics are split across. For example, if you want the topics matched by `topic_regex: 'mytopic.*'` to be split across three instances, one
# instance will be configured with `topic_bucket: 1/3`, one with `2/3`, and one with `3/3`
TOPIC_BUCKET: '1/3'
interval: 15s
labels:
env: production
role: kafka
inventory_source: config/kafka
# Example kafka-metrics-bootstrap-discovery. This gives an example of autodiscovery of brokers with a bootstrap broker
- name: nri-kafka
env:
NR_JMX: "C:\\Program Files\\New Relic\\nrjmx\\nrjmx.bat"
METRICS: "true"
# A cluster name is required to uniquely identify this collection result in Insights
CLUSTER_NAME: "testcluster1"
AUTODISCOVER_STRATEGY: "bootstrap"
# Bootstrap broker arguments. These configure a connection to a single broker. The rest of the brokers in the cluster
# will be discovered using that connection.
BOOTSTRAP_BROKER_HOST: "localhost"
BOOTSTRAP_BROKER_KAFKA_PORT: 9092
BOOTSTRAP_BROKER_KAFKA_PROTOCOL: PLAINTEXT # Currently support PLAINTEXT and SSL
BOOTSTRAP_BROKER_JMX_PORT: 9999
# JMX user and password default to `default_jmx_user` and `default_jmx_password` if unset
BOOTSTRAP_BROKER_JMX_USER: admin
BOOTSTRAP_BROKER_JMX_PASSWORD: password
# Only collect metrics from the bootstrap broker configured. The integration will not attempt to collect metrics
# for any other broker, nor will it collect cluster-level metrics like topic metrics. This is useful for things
# like deployment to kubernetes, where a single integration instance is desired per broker.
LOCAL_ONLY_COLLECTION: false
TOPIC_MODE: "all"
COLLECT_TOPIC_SIZE: false
interval: 15s
labels:
env: production
role: kafka
inventory_source: config/kafka
# Example kafka-producer-consumer-metrics. This gives an example for collecting JMX metrics form consumers and producers
- name: nri-kafka
env:
NR_JMX: "C:\\Program Files\\New Relic\\nrjmx\\nrjmx.bat"
METRICS: "true"
CLUSTER_NAME: "testcluster3"
# In order to collect Java producer and consumer metrics the "producers" and "consumers" fields should be filled out.
# Both fields are JSON arrays with each entry being a separate JAVA producer or consumer, in it's respective field.
# Each entry should have the following fields:
# - name: This is the client.id of the producer/consumer as it appears in Kafka
# - host: The IP or Hostname of the producer/consumser. If omitted, will use the value of the "default_jmx_host" field
# - port: The port in which JMX is setup for on the producer/consumer. If omitted will, use the value of the "default_jmx_port" field
# - username: The username used to connect to JMX. If omitted, will use the value of the "default_jmx_user" field
# - password: The password used to connect to JMX. If omitted, will use the value of the "default_jmx_password" field
# Example: {"name": "myProducer", "host": "localhost", "port": 24, "username": "me', "password": "secret"}
PRODUCERS: '[{"name": "myProducer", "host": "localhost", "port": 24, "username": "me", "password": "secret"}]'
CONSUMERS: '[{"name": "myConsumer", "host": "localhost", "port": 24, "username": "me", "password": "secret"}]'
# If several producers/consumers are on the same host an agent can be installed on that host and the
# "default_jmx_host" and "default_jmx_port" field can be set once and used for all producers/consumers that
# do not have the "host" or "port" field repsectively.
# These fields can be removed if each producer/consumer has it's own "host" and/or "port" field filled out.
DEFAULT_JMX_HOST: "localhost"
DEFAULT_JMX_PORT: "9999"
interval: 15s
labels:
env: production
role: kafka
inventory_source: config/kafka
# Example kafka-kerberos-auth
- name: nri-kafka
env:
NR_JMX: "C:\\Program Files\\New Relic\\nrjmx\\nrjmx.bat"
METRICS: "true"
# A cluster name is required to uniquely identify this collection result in Insights
CLUSTER_NAME: "testcluster1"
AUTODISCOVER_STRATEGY: "bootstrap"
# Bootstrap broker arguments. These configure a connection to a single broker. The rest of the brokers in the cluster
# will be discovered using that connection.
BOOTSTRAP_BROKER_HOST: "localhost"
BOOTSTRAP_BROKER_KAFKA_PORT: 9092
BOOTSTRAP_BROKER_KAFKA_PROTOCOL: PLAINTEXT # Currently support PLAINTEXT and SSL
BOOTSTRAP_BROKER_JMX_PORT: 9999
# JMX user and password default to `default_jmx_user` and `default_jmx_password` if unset
BOOTSTRAP_BROKER_JMX_USER: admin
BOOTSTRAP_BROKER_JMX_PASSWORD: password
# Kerberos authentication arguments
SASL_MECHANISM: GSSAPI
SASL_GSSAPI_REALM: SOMECORP.COM
SASL_GSSAPI_SERVICE_NAME: Kafka
SASL_GSSAPI_USERNAME: kafka
SASL_GSSAPI_KEY_TAB_PATH: /etc/newrelic-infra/kafka.keytab
SASL_GSSAPI_KERBEROS_CONFIG_PATH: /etc/krb5.conf
# disables FAST negotiation that causes issues with Active Directory
# sasl_gssapi_disable_fast_negotiation: false
# Only collect metrics from the bootstrap broker configured. The integration will not attempt to collect metrics
# for any other broker, nor will it collect cluster-level metrics like topic metrics. This is useful for things
# like deployment to kubernetes, where a single integration instance is desired per broker.
LOCAL_ONLY_COLLECTION: false
TOPIC_MODE: "all"
COLLECT_TOPIC_SIZE: false
interval: 15s
labels:
env: production
role: kafka
inventory_source: config/kafka