diff --git a/deploy/kwatch/.helmignore b/deploy/kwatch/.helmignore new file mode 100644 index 00000000..50af0317 --- /dev/null +++ b/deploy/kwatch/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/deploy/kwatch/Chart.yaml b/deploy/kwatch/Chart.yaml new file mode 100644 index 00000000..325d846f --- /dev/null +++ b/deploy/kwatch/Chart.yaml @@ -0,0 +1,30 @@ +apiVersion: v2 +name: kwatch +version: "0.8.0" +appVersion: "v0.8.0" +description: monitor all changes in your Kubernetes(K8s) cluster, detects crashes + in your running apps in realtime, and publishes notifications to your channels (Slack, + Discord, etc.) instantly +type: application +home: https://kwatch.dev +icon: https://kwatch.dev/img/kwatch-logo.png +sources: + - https://github.com/abahmed/kwatch +keywords: + - kwatch + - kubernetes + - monitoring + - crash-reporting + - event-notifier +maintainers: + - email: mohaamer5@gmail.com + name: Maha Gamal Amer + url: github.com/MahaGamal + - email: a.ahmed1026@gmail.com + name: Abdelrahman Ahmed + url: github.com/abahmed + - email: yaserkalali.work@gmail.com + name: yaser + url: github.com/yaskinny + + diff --git a/deploy/kwatch/README.md b/deploy/kwatch/README.md new file mode 100644 index 00000000..9c281e2c --- /dev/null +++ b/deploy/kwatch/README.md @@ -0,0 +1,65 @@ +# Kwatch Helm Chart +monitor all changes in your Kubernetes(K8s) cluster, detects crashes in your running apps in realtime, and publishes notifications to your channels (Slack, +Discord, etc.) instantly + +## Add Repository + +```console +helm repo add kwatch https://kwatch.dev/charts +helm repo update +``` + +## Install & Upgrade your Chart + +```console +helm install [RELEASE_NAME] kwatch/kwatch +helm upgrade -i [RELEASE_NAME] --install --namespace ./ [--version CHART-VERSION] --debug + +``` + +## Uninstall Chart + +```console +helm delete --purge [RELEASE_NAME] +``` + +## Configuration + +### Using helm starter https://github.com/MahaGamal/helm-starter/tree/main/templates + + +The following table lists the configurable parameters of the chart and their default values. + +Parameter | Description | Default +--- | --- | --- +`image.registry` | server container image registry | `` +`image.repository` | server container image repository | `` +`image.tag` | server container image tag | `v0.8.0` +`image.pullPolicy` | server container image pull policy | `IfNotPresent` +`image.runAsUser` | User ID of the server process. Value depends on the Linux distribution used inside of the container image. | `101` +`server.replicaCount` | desired number of server pods | `1` +`server.httpPort` | The port that the server container listens on for http connections. | `80` +`server.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | 10 +`server.livenessProbe.periodSeconds` | How often to perform the probe | 10 +`server.livenessProbe.timeoutSeconds` | When the probe times out | 5 +`server.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | 1 +`server.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 3 +`server.livenessProbe.port` | The port number that the liveness probe will listen on. | 8080 +`server.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | 10 +`server.readinessProbe.periodSeconds` | How often to perform the probe | 10 +`server.readinessProbe.timeoutSeconds` | When the probe times out | 1 +`server.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | 1 +`server.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 3 +`server.readinessProbe.port` | The port number that the readiness probe will listen on. | 8080 +`server.resources` | server pod resource requests & limits | `{}` +`server.envs` | any additional environment variables to set in the pods | `{}` +`server.VolumeMounts` | volumeMounts to the server main container | `{}` +`server.Volumes` | volumes to the server pod | `{}` +`server.tolerations` | node taints to tolerate (requires Kubernetes >=1.6) | `[]` +`server.affinity` | node/pod affinities (requires Kubernetes >=1.6) | `{}` +`server.nodeSelector` | node labels for pod assignment | `{}` +`serviceAccounts.enabled` | if `true`, Create service accounts | `false` +`service.ports.http` | Sets service http port | `80` +`service.type` | type of server service to create | `ClusterIP` +Service Monitoring configurations +`serviceMonitor.enabled` | if `true`, enable Prometheus metrics | `false` diff --git a/deploy/kwatch/templates/NOTES.txt b/deploy/kwatch/templates/NOTES.txt new file mode 100644 index 00000000..6b3155ab --- /dev/null +++ b/deploy/kwatch/templates/NOTES.txt @@ -0,0 +1,5 @@ +{{- $fullName := include "kwatch.fullname" . -}} +1. Get the application pod running these commands: +echo "kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "kwatch.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}" + + diff --git a/deploy/kwatch/templates/_helpers.tpl b/deploy/kwatch/templates/_helpers.tpl new file mode 100644 index 00000000..0ea063d9 --- /dev/null +++ b/deploy/kwatch/templates/_helpers.tpl @@ -0,0 +1,38 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "kwatch.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "kwatch.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "kwatch.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Release name without chart name +*/}} +{{- define "kwatch.releasePrefix" -}} +{{- printf .Release.Name | replace (printf "-%s" .Chart.Name) "" -}} +{{- end -}} diff --git a/deploy/kwatch/templates/configmap.yaml b/deploy/kwatch/templates/configmap.yaml new file mode 100644 index 00000000..3d563657 --- /dev/null +++ b/deploy/kwatch/templates/configmap.yaml @@ -0,0 +1,18 @@ +{{- if .Values.server.config -}} +apiVersion: v1 +kind: ConfigMap +metadata: +metadata: + name: {{ include "kwatch.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app: {{ include "kwatch.name" . }} + chart: {{ include "kwatch.chart" . }} + release: {{ .Release.Name }} + helm.sh/chart: {{ include "kwatch.chart" . }} + app.kubernetes.io/name: {{ include "kwatch.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} +data: +{{{ tpl $value $ | quote}} +{{- end }} diff --git a/deploy/kwatch/templates/hpa.yaml b/deploy/kwatch/templates/hpa.yaml new file mode 100644 index 00000000..c7eea918 --- /dev/null +++ b/deploy/kwatch/templates/hpa.yaml @@ -0,0 +1,62 @@ +{{- if .Values.hpa.enabled }} +{{- if .Capabilities.APIVersions.Has "autoscaling/v2" }} +apiVersion: autoscaling/v2 +{{- else }} +apiVersion: autoscaling/v2beta2 +{{- end }} +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "kwatch.fullname" . }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "kwatch.fullname" . }} + minReplicas: {{ .Values.hpa.minpods }} + maxReplicas: {{ .Values.hpa.maxpods }} + metrics: + + {{- if .Values.hpa.memoryAverageUtilization }} + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: {{ .Values.hpa.memoryAverageUtilization }} + {{- end }} + + {{- if .Values.hpa.cpuAverageUtilization }} + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ .Values.hpa.cpuAverageUtilization }} + {{- end }} + + {{- if .Values.hpa.customMetrics -}} + {{- toYaml .Values.hpa.customMetrics | nindent 2 }} + {{- end }} + + behavior: + + scaleDown: + stabilizationWindowSeconds: {{ default 60 .Values.hpa.scaleDown.stabilizationWindowSeconds }} + policies: + - type: {{default "Pods" .Values.hpa.scaleDown.type }} + value: {{ default 1 .Values.hpa.scaleDown.value }} + periodSeconds: {{ default 15 .Values.hpa.scaleDown.periodSeconds }} + + scaleUp: + stabilizationWindowSeconds: {{ default 60 .Values.hpa.scaleUp.stabilizationWindowSeconds }} + policies: + - type: {{ default "Pods" .Values.hpa.scaleUp.type }} + value: {{ default 5 .Values.hpa.scaleUp.value }} + periodSeconds: {{default 15 .Values.hpa.scaleUp.periodSeconds }} + + + {{- if .Values.hpa.customBehavior -}} + {{- toYaml .Values.hpa.customBehavior | nindent 2 }} + {{- end }} + +{{- end }} diff --git a/deploy/kwatch/templates/rbac.yaml b/deploy/kwatch/templates/rbac.yaml new file mode 100644 index 00000000..16a9f1a3 --- /dev/null +++ b/deploy/kwatch/templates/rbac.yaml @@ -0,0 +1,39 @@ +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Release.Name }} + namespace: {{ .Release.Namespace }} + labels: + app: {{ include "kwatch.name" . }} + chart: {{ include "kwatch.chart" . }} + release: {{ .Release.Name }} + helm.sh/chart: {{ include "kwatch.chart" . }} + app.kubernetes.io/name: {{ include "kwatch.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} +rules: +- apiGroups: [""] + resources: ["pods", "pods/log", "events"] + verbs: ["get", "watch", "list"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ .Release.Name }} + namespace: {{ .Release.Namespace }} + labels: + app: {{ include "kwatch.name" . }} + chart: {{ include "kwatch.chart" . }} + release: {{ .Release.Name }} + helm.sh/chart: {{ include "kwatch.chart" . }} + app.kubernetes.io/name: {{ include "kwatch.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ .Release.Name }} +subjects: + - kind: ServiceAccount + name: {{ .Release.Name }} + namespace: {{ .Release.Namespace }} \ No newline at end of file diff --git a/deploy/kwatch/templates/server-deployment.yaml b/deploy/kwatch/templates/server-deployment.yaml new file mode 100644 index 00000000..0922aa2f --- /dev/null +++ b/deploy/kwatch/templates/server-deployment.yaml @@ -0,0 +1,143 @@ +{{- $fullName := include "kwatch.fullname" . -}} +{{- $releasePrefix := include "kwatch.releasePrefix" . -}} +{{- $_ := set $.Release "Prefix" $releasePrefix }} +{{- if .Capabilities.APIVersions.Has "apps/v1" }} +apiVersion: apps/v1 +{{- else }} +apiVersion: apps/v1beta2 +{{- end }} +kind: Deployment +metadata: + name: {{ include "kwatch.fullname" . }} + labels: + app: {{ include "kwatch.name" . }} + chart: {{ include "kwatch.chart" . }} + release: {{ .Release.Name }} + helm.sh/chart: {{ include "kwatch.chart" . }} + app.kubernetes.io/name: {{ include "kwatch.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/component: server +spec: + replicas: {{ .Values.server.replicaCount }} + selector: + matchLabels: + app.kubernetes.io/name: {{ include "kwatch.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: server +{{- with .Values.server.updateStrategy }} + strategy: + {{- toYaml . | nindent 4 }} +{{- end }} + template: + metadata: + labels: + app: {{ include "kwatch.name" . }} + chart: {{ include "kwatch.chart" . }} + release: {{ .Release.Name }} + helm.sh/chart: {{ include "kwatch.chart" . }} + app.kubernetes.io/name: {{ include "kwatch.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: server + {{- with .Values.server.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.server.podAnnotations }} + annotations: + {{- range $key, $value := . }} + {{ $key }}: {{ $value | quote }} + {{- end }} + {{- end }} + spec: + containers: + - name: {{ .Chart.Name }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + {{- with .Values.server.command }} + command: + {{- . | toYaml | nindent 12 }} + {{- end }} + {{- with .Values.server.args }} + args: + {{- . | toYaml | nindent 12 }} + {{- end }} + {{- with .Values.server.resources }} + resources: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.server.health.livenessProbe }} + livenessProbe: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.server.health.readinessProbe }} + readinessProbe: + {{- toYaml . | nindent 12 }} + {{- end }} + env: + {{- range $key, $value := .Values.server.env }} + - name: {{ $key }} + value: {{ tpl $value $ | quote }} + {{- end }} + {{- if .Values.server.envVarSource }} + {{- range $key, $value := .Values.server.envVarSource }} + - name: {{ $key }} + valueFrom: {{ tpl $value $ }} + {{- end }} + {{- end }} + {{- if .Values.global }} + {{- range $key, $value := .Values.global.env }} + - name: {{ $key }} + value: {{ tpl $value $ | quote }} + {{- end }} + {{- end }} + {{- if .Values.server.existSecretNames }} + envFrom: + {{- range $secretname := .Values.server.existSecretNames }} + - secretRef: + name: {{ tpl $secretname $ | quote }} + {{- end }} + {{- end }} + {{- with .Values.server.securityContext }} + securityContext: + {{ toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.server.containerVolumeMounts }} + volumeMounts: + {{- tpl (toYaml . ) $ | nindent 12 }} + {{- end }} + {{- if .Values.server.antiAffinity }} + affinity: + {{- if eq .Values.server.antiAffinity "hard" }} + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: "kubernetes.io/hostname" + labelSelector: + matchLabels: + app.kubernetes.io/name: {{ include "kwatch.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: server + {{- else if eq .Values.server.antiAffinity "soft" }} + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + podAffinityTerm: + topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + app.kubernetes.io/name: {{ include "kwatch.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: server + {{- end }} + {{- end }} + {{- with .Values.server.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.server.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.server.volumes }} + volumes: + {{- tpl (toYaml . ) $ | nindent 8 }} + {{- end }} diff --git a/deploy/kwatch/templates/serviceaccount.yaml b/deploy/kwatch/templates/serviceaccount.yaml new file mode 100644 index 00000000..4a3e24e2 --- /dev/null +++ b/deploy/kwatch/templates/serviceaccount.yaml @@ -0,0 +1,15 @@ +{{- if .Values.serviceAccount.enabled -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "kwatch.fullname" . }}-sa + namespace: {{ .Release.Namespace }} + labels: + app: {{ include "kwatch.name" . }} + chart: {{ include "kwatch.chart" . }} + release: {{ .Release.Name }} + helm.sh/chart: {{ include "kwatch.chart" . }} + app.kubernetes.io/name: {{ include "kwatch.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end } diff --git a/deploy/kwatch/templates/servicemonitor.yaml b/deploy/kwatch/templates/servicemonitor.yaml new file mode 100644 index 00000000..3b163cfc --- /dev/null +++ b/deploy/kwatch/templates/servicemonitor.yaml @@ -0,0 +1,28 @@ +{{- if .Values.serviceMonitor.enabled -}} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "kwatch.fullname" . }}-sm + namespace: {{ $.Values.serviceMonitor.namespace }} + labels: + app: {{ include "kwatch.name" . }} + chart: {{ include "kwatch.chart" . }} + release: {{ .Release.Name }} + helm.sh/chart: {{ include "kwatch.chart" . }} + app.kubernetes.io/name: {{ include "kwatch.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} +spec: + selector: + matchLabels: + app: {{ include "kwatch.name" . }} + release: {{ .Release.Name }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} + endpoints: + - port: http + interval: 10s + path: /prometheus + honorLabels: true +{{- end }} diff --git a/deploy/kwatch/values.yaml b/deploy/kwatch/values.yaml new file mode 100644 index 00000000..0dd5f292 --- /dev/null +++ b/deploy/kwatch/values.yaml @@ -0,0 +1,207 @@ +# Values for kwatch should update image version +image: + repository: ghcr.io/abahmed/kwatch + tag: v0.8.0 + pullPolicy: Always + +# nameOverride: "" +# fullnameOverride: "" + +# Configuration for server deployment +server: + # Number of replicas (Required) + replicaCount: 1 + + # How kubernetes determines when the server is ready and if it's still alive (Suggested) + health: {} + # readinessProbe: + # httpGet: + # path: /health/readiness + # port: 8080 + # initialDelaySeconds: 60 + # periodSeconds: 60 + # livenessProbe: + # httpGet: + # path: /health/liveness + # port: 8080 + # initialDelaySeconds: 60 + # periodSeconds: 60 + # Determing processes securityContext. + + # https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#podsecuritycontext-v1-core ( podSecurityContext [ Optioanl ]) + podSecurityContext: + fsGroup: 2000 + + # Requested resources and limits for the server (Suggested) + resources: {} + # requests: + # cpu: 100m + # memory: 128Mi + # limits: + # cpu: 100m + # memory: 128Mi + + # Pass custom command. Defaults to docker image ENTRYPOINT (Optional) + command: [] + + # Pass custom args. Defaults to docker image CMD (Optional) + args: [] + + # Constraints how pods should be spread across nodes + # valid values: + # - "" (no constraints) + # - "soft" (tries to spread the pods if possible) + # - "hard" (forces the pods to be spread) + antiAffinity: "" + + # The deployment strategy to use to replace existing pods with new ones. (Optional) + updateStrategy: {} + # rollingUpdate: + # maxUnavailable: 1 + # type: RollingUpdate + + # Annotations to add to server pods. (Optional) + podAnnotations: {} + # fluentbit.io/parser: nginx-ingress + + # Labels to add to server pods. (Optional) + podLabels: {} + # stack: node + + # Which nodes should the server run on. (Suggested) + nodeSelector: {} + # environment: dev + + # https://kubernetes.io/docs/concepts/configuration/taint-and-toleration. (Optional) + tolerations: [] + # - key: "instancegroup" + # operation: "Equal" + # value: "dev" + + # Server environment variables. (Optional) + # server also inherits variables from envVarSource, secretEnv and existSecretNames but env has higher priority. + # values can contain go templates + env: + # KEY: val + # URL: value-{{ .Release.Prefix }} + CONFIG_FILE: "/config/config.yaml" + + # Server environment variables from a source/reference for the value. (Optional) + # Sources Like: + # fieldRef: Selects a field of the pod + # metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + # Examaples: https://raw.githubusercontent.com/kubernetes/website/main/content/en/examples/pods/inject/dapi-envars-pod.yaml + # resourceFieldRef: Selects a resource of the container + # Note: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + # Examples: https://kubernetes.io/docs/tasks/inject-data-application/_print/#use-container-fields-as-values-for-environment-variables + # configMapKeyRef: Selects a key of a ConfigMap. + # secretKeyRef: Selects a key of a secret in the pod's namespaceource for the value of an Env + envVarSource: {} + # KEY: "{ : { } }" + # POD_IP: "{ fieldRef: { fieldPath: status.podIP } }" + + # Will add custom configuration options & Create Configmap in k8s cluster with config + # Should Update Configmap config data + config: + config.yaml: | + # maxRecentLogLines: + # ignoreFailedGracefulShutdown: + # alert: + # slack: + # webhook: + # pagerduty: + # integrationKey: + # discord: + # webhook: + # telegram: + # token: + # chatId: + # email: + # form: + # to: + # password: + # host: + # port: + # teams: + # webhook: + # rocketchat: + # webhook: + # mattermost: + # webhook: + # opsgenie: + # apiKey: + # namespaces: + # - + # reasons: + # - + # ignoreContainerNames: + # - + + + # to use existing secret in deployment file + existSecretNames: [] + # the names of existing secrets names + # - existsecret + # - secondsecret + + # add containerVolumeMounts to container + containerVolumeMounts: + - name: config-volume + mountPath: /config + readOnly: true + + # add the volumes to pod + volumes: + - name: config-volume + configMap: + name: {{ .Release.Name }} + +# To Create ServiceAccount +serviceAccount: + enabled: true + +# Configuration for Prometheus monintoring +serviceMonitor: + enabled: false + namespace: monitoring + +# Configuration for HorizontalPodAutoscaler +hpa: + # add if true hpa.yaml is applied + enabled: false + + # minimum number of replicas created + minpods: 2 + + # maximum number of replicas created + maxpods: 5 + + # Scaling policies + scaleDown: + # restrict the flapping of replicas count when the metrics used for scaling keep fluctuating + stabilizationWindowSeconds: {} + # the Scaling down policy Configurations: + type: {} + periodSeconds: {} + value: {} + + scaleUp: + # restrict the flapping of replicas count when the metrics used for scaling keep fluctuating + stabilizationWindowSeconds: {} + # the Scaling down policy Configurations: + type: {} + periodSeconds: {} + value: {} + + # Memory metric Configurations if value given it creates a memory metric and start scaling depending on memoryAverageUtilization + memoryAverageUtilization: {} + + # CPU metric Configurations if value given it creates a cpu metric and start scaling depending on cpuAverageUtilization + cpuAverageUtilization: 50 + + +# add your custom metrics confgurations here + customMetrics: {} + +# add your custom behavior confgurations here + customBehavior: {}