-
Notifications
You must be signed in to change notification settings - Fork 12
/
Copy pathvalues-flx-7.yaml
640 lines (543 loc) · 23.5 KB
/
values-flx-7.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
common:
## Elasticsearch cluster name, maps to cluster.name
cluster_name: "searchguard"
## Docker images used in Kubernetes cluster. Default ones are OSS images provided by Search Guard
## If you use custom images for ES, Kibana and sg-admin insatnces, please, use these naming convention for them:
## - for SG Admin image: <images.repository>/<images.provider>/<images.sgadmin_base_image>:<elkversion>-<sgversion>
## - for Elasticsearch OSS image: <images.repository>/<images.provider>/<images.elasticsearch_base_image>:<elkversion>-oss-<sgversion>
## - for Elasticsearch non-OSS image: <images.repository>/<images.provider>/<images.elasticsearch_base_image>:<elkversion>-<sgversion>
## - for Kibana OSS image: <images.repository>/<images.provider>/<images.kibana_base_image>:<elkversion>-oss-<sgversion>
## - for Kibana non-OSS image: <images.repository>/<images.provider>/<images.kibana_base_image>:<elkversion>-<sgversion>
images:
repository: "docker.io"
provider: "floragunncom"
elasticsearch_base_image: "sg-elasticsearch-h4"
kibana_base_image: "sg-kibana-h4"
sgctl_base_image: "sg-sgctl-h4"
kubectl_base_image: "sg-kubectl-h4"
## Fill in server, username, password, email to authenticate to private docker registry or specify pre-provisioned imagePullSecret with required data
docker_registry:
enabled: false
# server: https://index.docker.io/v1/
# username: username
# password: password
# email: [email protected]
# imagePullSecret: <your-custom-secret>
## Kubelet image pull policy
## https://kubernetes.io/docs/concepts/containers/images/
# pullPolicy: Always
pullPolicy: IfNotPresent
## Elasticsearch and Kibana version
## See whats available: https://hub.docker.com/r/floragunncom/sg-elasticsearch/tags
elkversion: "7.17.18"
## Search Guard plugin version
sgversion: "1.6.0-flx"
## Search Guard Kibana plugin version
## See whats available: https://hub.docker.com/r/floragunncom/sg-kibana/tags
sgkibanaversion: "1.6.0-flx"
sgctl_version: 1.6.0
## If true then install also all free and basic x-pack features
## If false then only the "oss" version of Elasticsearch and Kibana gets installed
xpack_basic: true
## DN of the admin certificate
## See https://docs.search-guard.com/latest/sgadmin#configuring-the-admin-certificate
admin_dn:
- "CN=admin,OU=Ops,O=Example Com\\, Inc.,DC=example,DC=com"
## Search Guard needs to securely and reliably identify internal communication between
## Elasticsearch nodes (inter-node traffic). This communication happens for example if
## one node receives a GET request on the HTTP layer, but needs to forward it to another
## node that holds the actual data.
## See https://docs.search-guard.com/latest/tls-in-production#node-certificates
nodes_dn:
- "CN=*-esnode,OU=Ops,O=Example Com\\, Inc.,DC=example,DC=com"
## Enable or disable Search Guard enterprise modules
## If you run Search Guard in production you need to obtain a license
## when set to 'true'
sg_enterprise_modules_enabled: true
## Run automatically sgctl whenever necessary
update_sgconfig_on_change: true
## Restart pods automatically when their configuration was changed
restart_pods_on_config_change: true
## Enable Pod Disruption budget feature for ES and Kibana pods. See more https://kubernetes.io/docs/tasks/run-application/configure-pdb/
pod_disruption_budget_enable: true
## After obtaining a Search Guard license, you can add it here. If you don't have a license, please, specify "none"
license: none
## By default the jobs that generated certificate are removed after completion by cleanup jobs starting every minute.
## By enabling debug_job_mode you suspend cleanup jobs and investigate the results of the sgctl-preinstall and sgctl-postinstall jobs
debug_job_mode: false
remove_initial_master_nodes_after_install: true
init_sysctl: true
## Defines the service type for all elasticsearch outward-facing (non-discovery) client services.
## This does not apply to Kibana.
## WARNING: Setting this to 'LoadBalancer' will probably expose elasticsearch to the outside/internet
serviceType: ClusterIP
# serviceType: NodePort
# serviceType: LoadBalancer
## static node port for client services. If omitted a random one is chosen. Only applies if serviceType == NodePort
## This does not apply to Kibana.
# nodePort: 30007
## load balancer ip for client services. Only applies when serviceType == LoadBalancer and supported by the cloud provider
## This does not apply to Kibana.
# loadBalancerIp: 192.0.2.127
## AWS specific ELB annotations for client services (https://kubernetes.io/docs/concepts/services-networking/service/#ssl-support-on-aws)
# service_annotations:
# service.beta.kubernetes.io/aws-load-balancer-internal: "true"
# service.beta.kubernetes.io/aws-load-balancer-backend-protocol: (https|http|ssl|tcp)
# service.beta.kubernetes.io/aws-load-balancer-ssl-ports: "443,8443"
# ...
# ... see below for more options
## PKI approaches are specified with the following keys:
## Set sgctl_certificates_enabled if certificates are self-signed and generated by SG TLS tool
sgctl_certificates_enabled: true
## Set ca_certificates_enabled to true if CA cert and key are copied to ../keys/ca/ folder and used by cluster to generate all certificates for nodes and services
ca_certificates_enabled: false
# Create pods with installed sgctl.sh tool
sgctl_cli: false
#To activate dynamic configuration stored in secrets, use the configuration described in the file examples/common/configuration_from_secret
sg_dynamic_configuration_from_secret:
enabled: false
secret_name: "sg-dynamic-configuration-secret"
tls:
## root_dn only has an effect when sgctl_certificates_enabled is false and ca_certificates_enabled is true
root_dn: 'CN=root-ca,OU=CA,O=Example Com\, Inc.,DC=example,DC=com'
## node_dn must match common.nodes_dn
node_dn: 'CN=$NODE_NAME-esnode,OU=Ops,O=Example Com\, Inc.,DC=example,DC=com'
## admin_dn must match common.admin_dn and may not overlap with common.nodes_dn
admin_dn: 'CN=admin,OU=Ops,O=Example Com\, Inc.,DC=example,DC=com'
keysize: 2048
validity_days: 365
certificates_directory: secrets
## Any extra or specific configuration that is needed can be added here.
## Will be added to all elasticsearch.yml files on all nodes
config:
logger.org.elasticsearch: "ERROR"
# logger.com.floragunn: "ERROR"
## memory_lock not necessary because swap is off on K8S
# bootstrap.memory_lock: false
# http.compression: false
# http.cors.enabled: false
# http.cors.allow-origin: "*"
# index.codec: best_compression
ingest.geoip.downloader.enabled: false
searchguard.restapi.roles_enabled: ["SGS_ALL_ACCESS","sg_all_access"]
searchguard.check_snapshot_restore_write_privileges: true
searchguard.audit.type: internal_elasticsearch
## Configure additional users (maps to sg_internal_users.yml)
# users:
# demouser:
# hash: ${envbc.SG_DEMOUSER_PWD}
# backend_roles:
# - beatsreader
## Configure additional rolemappings (maps to sg_roles_mapping.yml)
rolesmapping:
# demo roles:
SGS_ALL_ACCESS:
reserved: true
backend_roles:
- "admin"
description: "Maps admin to SGS_ALL_ACCESS"
SGS_KIBANA_USER:
reserved: false
backend_roles:
- "kibanauser"
description: "Maps kibanauser to SGS_KIBANA_USER"
SGS_READALL:
reserved: true
backend_roles:
- "readall"
SGS_KIBANA_SERVER:
reserved: true
users:
- "kibanaserver"
## Configure additional role (maps to sg_roles.yml)
# roles:
# sg_read_beats:
# cluster_permissions:
# - SGS_CLUSTER_COMPOSITE_OPS_RO
# index_permissions:
# - index_patterns:
# - "*beat*"
# allowed_actions:
# - SGS_READ
authc:
debug: false
auth_domains:
- type: basic/internal_users_db
authz:
debug: false
ignore_unauthorized_indices.enabled: true
frontend_authc:
default:
debug: false
auth_domains:
- type: basic
- type: saml
enabled: false
label: "SAML Login"
saml.idp.metadata_url: "https://your.idp.example.com/saml-metadata.xml"
saml.idp.entity_id: urn:saml-metadata-entity-id
saml.sp.entity_id: service-provider-id
user_mapping.roles.from: saml_response.roles
- type: oidc
enabled: false
label: "OIDC Login"
oidc.client_id: "your-oidc-client-id"
oidc.client_secret: "your-oidc-client-secret"
oidc.idp.openid_configuration_url: "https://your.idp.example.com/auth/realms/master/.well-known/openid-configuration"
user_mapping.roles.from: oidc_id_token.roles
# blocks:
# demo_ip_blocked:
# type: "ip"
# verdict: "disallow"
# value: ["172.16.0.254", "172.16.0.230"]
# description: "Demo IP blocked, i.e. this specific IP gets blocked"
frontend_multi_tenancy:
default:
enabled: true
index: .kibana
server_user: kibanaserver
action_groups:
MY_ACTION_GROUP:
allowed_actions:
- "indices:data/read/search*"
- "indices:data/read/msearch*"
tenants:
human_resources:
description: "The human resources tenant"
# auth_token_service:
# enabled: true
# jwt_signing_key_hs512: "..."
# jwt_encryption_key_a256kw: "..." # Omit this to have unencrypted keys
# max_validity: "1y" # Omit this to have keys with unlimited lifetime
# max_tokens_per_user: 100
## tolerations for all pods which are not master, client, data or kibana
# tolerations:
# - key: "key1"
# operator: "Equal"
# value: "value1"
# effect: "NoSchedule"
# - key: "key1"
# operator: "Equal"
# value: "value1"
# effect: "NoExecute"
## node affinity for all pods which are not master, client, data or kibana
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: kubernetes.io/os
# operator: In
# values:
# - linux
# preferredDuringSchedulingIgnoredDuringExecution:
# - weight: 1
# preference:
# matchExpressions:
# - key: label-1
# operator: In
# values:
# - key-1
## Client/ingest nodes can execute pre-processing pipelines, composed of
## one or more ingest processors. Depending on the type of operations performed
## by the ingest processors and the required resources, it may make sense to
## have dedicated ingest nodes, that will only perform this specific task.
custom_elasticsearch_keystore:
enabled: false
client:
## For production we recommend at least 2 replicas
replicas: 2
## 'hard' means that pods will only be scheduled if there are enough nodes for them and that they will never end up on the same node.
## 'soft' will do this "best effort"
## For production we recommend setting this to 'hard'
antiAffinity: "soft"
## Specify this if external_ca_certificates_enabled: true
storage: 2Gi
## Set storageClass if non-default storage is used.
# storageClass:
## For production we recommend to set this to at least 4g and adjust the memory limits and requests accordingly
heapSize: 1g
## More info on what this setting does is in the config map. Only change this
## if you set the cpu limit to over 1 full cpu.
processors: 1
labels: {}
annotations: {}
resources:
limits:
cpu: 500m
memory: 1500Mi
requests:
cpu: 100m
memory: 1500Mi
tolerations:
- key: "key1"
operator: "Equal"
value: "value1"
effect: "NoSchedule"
- key: "key1"
operator: "Equal"
value: "value1"
effect: "NoExecute"
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- linux
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 1
preference:
matchExpressions:
- key: label-1
operator: In
values:
- key-1
## Data nodes hold the shards that contain the documents you have indexed. Data
## nodes handle data related operations like CRUD, search, and aggregations.
## These operations are I/O-, memory-, and CPU-intensive. It is important to
## monitor these resources and to add more data nodes if they are overloaded.
##
## The main benefit of having dedicated data nodes is the separation of the
## master and data roles.
data:
## For production we recommend at least 2 replicas
replicas: 2
## 'hard' means that pods will only be scheduled if there are enough nodes for them and that they will never end up on the same node.
## 'soft' will do this "best effort"
## For production we recommend setting this to 'hard'
antiAffinity: "soft"
storage: 4Gi
## Set storageClass if non-default storage is used.
# storageClass: "gp2"
## For production we recommend to set this to at least 8g and adjust the memory limits and requests accordingly
heapSize: 1g
## More info on what this setting does is in the config map. Only change this
## if you set the cpu limit to over 1 full cpu.
processors: 1
labels: {}
annotations: {}
resources:
limits:
cpu: 1
memory: 1500Mi
requests:
cpu: 1
memory: 1500Mi
# tolerations:
# - key: "key1"
# operator: "Equal"
# value: "value1"
# effect: "NoSchedule"
# - key: "key1"
# operator: "Equal"
# value: "value1"
# effect: "NoExecute"
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: kubernetes.io/os
# operator: In
# values:
# - linux
# preferredDuringSchedulingIgnoredDuringExecution:
# - weight: 1
# preference:
# matchExpressions:
# - key: label-1
# operator: In
# values:
# - key-1
## The master node is responsible for lightweight cluster-wide actions such as
## creating or deleting an index, tracking which nodes are part of the
## cluster, and deciding which shards to allocate to which nodes. It is
## important for cluster health to have a stable master node.
master:
## For production we recommend at least 3 replicas. Number must be odd.
replicas: 3
## 'hard' means that pods will only be scheduled if there are enough nodes for them and that they will never end up on the same node.
## 'soft' will do this "best effort"
## For production we recommend setting this to 'hard'
antiAffinity: "soft"
storage: 2Gi
## Set storageClass if non-default storage is used.
# storageClass: "gp2"
## For production we recommend to set this to at least 2g and adjust the memory limits and requests accordingly
heapSize: 1g
## More info on what this setting does is in the config map. Only change this
## if you set the cpu limit to over 1 full cpu.
processors: 1
labels: {}
annotations: {}
resources:
limits:
cpu: 500m
memory: 1500Mi
requests:
cpu: 100m
memory: 1500Mi
# tolerations:
# - key: "key1"
# operator: "Equal"
# value: "value1"
# effect: "NoSchedule"
# - key: "key1"
# operator: "Equal"
# value: "value1"
# effect: "NoExecute"
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: kubernetes.io/os
# operator: In
# values:
# - linux
# preferredDuringSchedulingIgnoredDuringExecution:
# - weight: 1
# preference:
# matchExpressions:
# - key: label-1
# operator: In
# values:
# - key-1
kibana:
## Incoming port of the service
httpPort: 5601
replicas: 1
## 'hard' means that pods will only be scheduled if there are enough nodes for them and that they will never end up on the same node.
## 'soft' will do this "best effort"
## For production we recommend setting this to 'hard'
antiAffinity: "soft"
## Specify this if external_ca_certificates_enabled: true. Set storageClass if non-default storage is used.
storage: 2Gi
## Set storageClass if non-default storage is used.
# storageClass:
## Defines the service type for all Kibana outward-facing (non-discovery) services.
## WARNING: Setting this to 'LoadBalancer' will probably expose Kibana to the outside/internet
serviceType: ClusterIP
# serviceType: NodePort
# serviceType: LoadBalancer
## static node port. If omitted a random one is chosen. Only applies if serviceType == NodePort
# nodePort: 30007
## load balancer ip. Only applies when serviceType == LoadBalancer and supported by the cloud provider
# loadBalancerIp: 192.0.2.127
## AWS specific ELB annotations (https://kubernetes.io/docs/concepts/services-networking/service/#ssl-support-on-aws)
# service_annotations:
# service.beta.kubernetes.io/aws-load-balancer-internal: "true"
# service.beta.kubernetes.io/aws-load-balancer-backend-protocol: (https|http|ssl|tcp)
# service.beta.kubernetes.io/aws-load-balancer-ssl-ports: "443,8443"
# service.beta.kubernetes.io/aws-load-balancer-ssl-cert: arn:aws:acm:us-east-1:123456789012:certificate/12345678-1234-1234-1234-123456789012
# service.beta.kubernetes.io/aws-load-balancer-ssl-negotiation-policy: "ELBSecurityPolicy-TLS-1-2-2017-01"
# service.beta.kubernetes.io/aws-load-balancer-proxy-protocol: "*"
# ## Specifies whether access logs are enabled for the load balancer
# service.beta.kubernetes.io/aws-load-balancer-access-log-enabled: "true"
# ## The interval for publishing the access logs. You can specify an interval of either 5 or 60 (minutes).
# service.beta.kubernetes.io/aws-load-balancer-access-log-emit-interval: "60"
# ## The name of the Amazon S3 bucket where the access logs are stored
# service.beta.kubernetes.io/aws-load-balancer-access-log-s3-bucket-name: "my-bucket"
# ## The logical hierarchy you created for your Amazon S3 bucket, for example `my-bucket-prefix/prod`
# service.beta.kubernetes.io/aws-load-balancer-access-log-s3-bucket-prefix: "my-bucket-prefix/prod"
# service.beta.kubernetes.io/aws-load-balancer-connection-draining-enabled: "true"
# service.beta.kubernetes.io/aws-load-balancer-connection-draining-timeout: "60"
# ## The time, in seconds, that the connection is allowed to be idle (no data has been sent
# ## over the connection) before it is closed by the load balancer
# service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout: "60"
# ## Specifies whether cross-zone load balancing is enabled for the load balancer
# service.beta.kubernetes.io/aws-load-balancer-cross-zone-load-balancing-enabled: "true"
# ## A comma-separated list of key-value pairs which will be recorded as
# ## additional tags in the ELB.
# service.beta.kubernetes.io/aws-load-balancer-additional-resource-tags: "environment=prod,owner=devops"
# ## The number of successive successful health checks required for a backend to
# ## be considered healthy for traffic. Defaults to 2, must be between 2 and 10
# service.beta.kubernetes.io/aws-load-balancer-healthcheck-healthy-threshold: ""
# ## The number of unsuccessful health checks required for a backend to be
# ## considered unhealthy for traffic. Defaults to 6, must be between 2 and 10
# service.beta.kubernetes.io/aws-load-balancer-healthcheck-unhealthy-threshold: "3"
# ## The approximate interval, in seconds, between health checks of an
# ## individual instance. Defaults to 10, must be between 5 and 300
# service.beta.kubernetes.io/aws-load-balancer-healthcheck-interval: "20"
# ## The amount of time, in seconds, during which no response means a failed
# ## health check. This value must be less than the service.beta.kubernetes.io/aws-load-balancer-healthcheck-interval
# ## value. Defaults to 5, must be between 2 and 60
# service.beta.kubernetes.io/aws-load-balancer-healthcheck-timeout: "5"
# ## A list of existing security groups to be configured on the ELB created. Unlike the annotation
# ## service.beta.kubernetes.io/aws-load-balancer-extra-security-groups, this replaces all other
# ## security groups previously assigned to the ELB and also overrides the creation
# ## of a uniquely generated security group for this ELB.
# ## The first security group ID on this list is used as a source to permit incoming traffic to
# ## target worker nodes (service traffic and health checks).
# ## If multiple ELBs are configured with the same security group ID, only a single permit line
# ## will be added to the worker node security groups, that means if you delete any
# ## of those ELBs it will remove the single permit line and block access for all ELBs that shared the same security group ID.
# ## This can cause a cross-service outage if not used properly
# service.beta.kubernetes.io/aws-load-balancer-security-groups: "sg-53fae93f"
# ## A list of additional security groups to be added to the created ELB, this leaves the uniquely
# ## generated security group in place, this ensures that every ELB
# ## has a unique security group ID and a matching permit line to allow traffic to the target worker nodes
# ## (service traffic and health checks).
# ## Security groups defined here can be shared between services.
# service.beta.kubernetes.io/aws-load-balancer-extra-security-groups: "sg-53fae93f,sg-42efd82e"
# ## A comma separated list of key-value pairs which are used
# ## to select the target nodes for the load balancer
# service.beta.kubernetes.io/aws-load-balancer-target-node-labels: "ingress-gw,gw-name=public-api"
# tolerations:
# - key: "key1"
# operator: "Equal"
# value: "value1"
# effect: "NoSchedule"
# - key: "key1"
# operator: "Equal"
# value: "value1"
# effect: "NoExecute"
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: kubernetes.io/os
# operator: In
# values:
# - linux
# preferredDuringSchedulingIgnoredDuringExecution:
# - weight: 1
# preference:
# matchExpressions:
# - key: label-1
# operator: In
# values:
# - key-1
readinessProbe:
httpGet:
## path might need to be changed when server.basePath is set
path: api/status
port: http
scheme: HTTPS
initialDelaySeconds: 60
timeoutSeconds: 5
## Additional config which will be appended to kibana.yml
config:
# server.publicBaseUrl: https://domain:5601
elasticsearch.requestHeadersWhitelist: ["sgtenant","authorization"]
searchguard.auth.type: default
## Disallow login for service users
searchguard.basicauth.forbidden_usernames: ["kibanaserver"]
labels: {}
annotations: {}
resources:
limits:
cpu: 500m
memory: 2500Mi
requests:
cpu: 100m
memory: 2500Mi
## Incoming port of the service
service:
httpPort: 9200
transportPort: 9300
## Kubernetes Role-based access control
## https://kubernetes.io/docs/reference/access-authn-authz/rbac/
## Use "true" for first time installation for certain release in certain cluster/namespace
## Use "false" for second+ time installations for certain release in certain cluster/namespace
rbac:
create: true