diff --git a/README.md b/README.md index 28f3620..d9f06f9 100644 --- a/README.md +++ b/README.md @@ -44,6 +44,40 @@ helm upgrade -i rabbitmq-crm1 rabbitmq/ --values local-values/rabbitmq/crm1.yam ``` +## redis + +``` +cd redis && helm dependency update && cd .. + +helm upgrade -i redis-crm1 redis/ --values local-values/redis/crm1.yaml -n crm1 + +``` + + + +## mongodb + +``` +cd mongodb && helm dependency update && cd .. + +helm upgrade -i mongodb-crm1 mongodb/ --values local-values/mongodb/crm1.yaml -n crm1 + +``` + + + +## elasticsearch + +``` +cd elasticsearch && helm dependency update && cd .. + +helm upgrade -i elasticsearch-crm1 mongodb/ --values local-values/es/crm1.yaml -n crm1 + +``` + + + + ## nacos https://hub.fastgit.org/nacos-group/nacos-k8s.git diff --git a/elasticsearch/.helmignore b/elasticsearch/.helmignore new file mode 100644 index 0000000..f0c1319 --- /dev/null +++ b/elasticsearch/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/elasticsearch/Chart.lock b/elasticsearch/Chart.lock new file mode 100644 index 0000000..41dd8c3 --- /dev/null +++ b/elasticsearch/Chart.lock @@ -0,0 +1,9 @@ +dependencies: +- name: common + repository: https://charts.bitnami.com/bitnami + version: 1.10.3 +- name: kibana + repository: https://charts.bitnami.com/bitnami + version: 9.1.6 +digest: sha256:041be74b9823cc010a01721868fb21c97a05e6e1f5bf72df50d85fc7a7bdec4c +generated: "2021-12-19T20:26:11.528367846Z" diff --git a/elasticsearch/Chart.yaml b/elasticsearch/Chart.yaml new file mode 100644 index 0000000..937770a --- /dev/null +++ b/elasticsearch/Chart.yaml @@ -0,0 +1,28 @@ +annotations: + category: Analytics +apiVersion: v2 +appVersion: 7.16.2 +dependencies: + - name: common + repository: https://charts.bitnami.com/bitnami + tags: + - bitnami-common + version: 1.x.x + - condition: global.kibanaEnabled + name: kibana + repository: https://charts.bitnami.com/bitnami + version: 9.x.x +description: A highly scalable open-source full-text search and analytics engine +engine: gotpl +home: https://github.com/bitnami/charts/tree/master/bitnami/elasticsearch +icon: https://bitnami.com/assets/stacks/elasticsearch/img/elasticsearch-stack-220x234.png +keywords: + - elasticsearch +maintainers: + - email: containers@bitnami.com + name: Bitnami +name: elasticsearch +sources: + - https://github.com/bitnami/bitnami-docker-elasticsearch + - https://www.elastic.co/products/elasticsearch +version: 17.5.5 diff --git a/elasticsearch/README.md b/elasticsearch/README.md new file mode 100644 index 0000000..9b06c9f --- /dev/null +++ b/elasticsearch/README.md @@ -0,0 +1,822 @@ +# Elasticsearch + +[Elasticsearch](https://www.elastic.co/products/elasticsearch) is a highly scalable open-source full-text search and analytics engine. It allows you to store, search, and analyze big volumes of data quickly and in near real time. + +## TL;DR + +```console +$ helm repo add bitnami https://charts.bitnami.com/bitnami +$ helm install my-release bitnami/elasticsearch +``` + +## Introduction + +This chart bootstraps a [Elasticsearch](https://github.com/bitnami/bitnami-docker-elasticsearch) deployment on a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This Helm chart has been tested on top of [Bitnami Kubernetes Production Runtime](https://kubeprod.io/) (BKPR). Deploy BKPR to get automated TLS certificates, logging and monitoring for your applications. + +## Prerequisites + +- Kubernetes 1.12+ +- Helm 3.1.0 +- PV provisioner support in the underlying infrastructure + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```console +$ helm repo add bitnami https://charts.bitnami.com/bitnami +$ helm install my-release bitnami/elasticsearch +``` + +These commands deploy Elasticsearch on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` release: + +```console +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. Remove also the chart using `--purge` option: + +```console +$ helm delete --purge my-release +``` + +## Parameters + +### Global parameters + +| Name | Description | Value | +| -------------------------- | ------------------------------------------------------------------ | ------------------- | +| `global.imageRegistry` | Global Docker image registry | `""` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` | +| `global.storageClass` | Global StorageClass for Persistent Volume(s) | `""` | +| `global.coordinating.name` | Coordinating name to be used in the Kibana subchart (service name) | `coordinating-only` | +| `global.kibanaEnabled` | Whether or not to enable Kibana | `false` | + + +### Common parameters + +| Name | Description | Value | +| ------------------------ | -------------------------------------------------------------------------------------------- | --------------- | +| `nameOverride` | String to partially override common.names.fullname template (will maintain the release name) | `""` | +| `fullnameOverride` | String to fully override common.names.fullname template | `""` | +| `clusterDomain` | Kubernetes cluster domain | `cluster.local` | +| `extraDeploy` | Array of extra objects to deploy with the release | `[]` | +| `diagnosticMode.enabled` | Enable diagnostic mode (all probes will be disabled and the command will be overridden) | `false` | +| `diagnosticMode.command` | Command to override all containers in the deployment | `["sleep"]` | +| `diagnosticMode.args` | Args to override all containers in the deployment | `["infinity"]` | + + +### Elasticsearch parameters + +| Name | Description | Value | +| ------------------------------------------ | --------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------ | +| `image.registry` | Elasticsearch image registry | `docker.io` | +| `image.repository` | Elasticsearch image repository | `bitnami/elasticsearch` | +| `image.tag` | Elasticsearch image tag (immutable tags are recommended) | `7.15.2-debian-10-r10` | +| `image.pullPolicy` | Elasticsearch image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Elasticsearch image pull secrets | `[]` | +| `image.debug` | Enable image debug mode | `false` | +| `security.enabled` | Enable X-Pack Security settings | `false` | +| `security.elasticPassword` | Password for 'elastic' user | `""` | +| `security.existingSecret` | Name of the existing secret containing the Elasticsearch password | `""` | +| `security.fipsMode` | Configure elasticsearch with FIPS 140 compliant mode | `false` | +| `security.tls.restEncryption` | Enable SSL/TLS encryption for Elasticsearch REST API. | `true` | +| `security.tls.autoGenerated` | Create self-signed TLS certificates. | `false` | +| `security.tls.verificationMode` | Verification mode for SSL communications. | `full` | +| `security.tls.master.existingSecret` | Existing secret containing the certificates for the master nodes | `""` | +| `security.tls.data.existingSecret` | Existing secret containing the certificates for the data nodes | `""` | +| `security.tls.ingest.existingSecret` | Existing secret containing the certificates for the ingest nodes | `""` | +| `security.tls.coordinating.existingSecret` | Existing secret containing the certificates for the coordinating nodes | `""` | +| `security.tls.keystorePassword` | Password to access the JKS/PKCS12 keystore or PEM key when they are password-protected. | `""` | +| `security.tls.truststorePassword` | Password to access the JKS/PKCS12 truststore when they are password-protected. | `""` | +| `security.tls.keystoreFilename` | Name of the keystore file | `elasticsearch.keystore.jks` | +| `security.tls.truststoreFilename` | Name of the truststore | `elasticsearch.truststore.jks` | +| `security.tls.usePemCerts` | Use this variable if your secrets contain PEM certificates instead of JKS/PKCS12 | `false` | +| `security.tls.keyPassword` | Password to access the PEM key when they are password-protected. | `""` | +| `name` | Elasticsearch cluster name | `elastic` | +| `plugins` | Comma, semi-colon or space separated list of plugins to install at initialization | `""` | +| `snapshotRepoPath` | File System snapshot repository path | `""` | +| `config` | Override elasticsearch configuration | `{}` | +| `extraConfig` | Append extra configuration to the elasticsearch node configuration | `{}` | +| `extraVolumes` | A list of volumes to be added to the pod | `[]` | +| `extraVolumeMounts` | A list of volume mounts to be added to the pod | `[]` | +| `initScripts` | Dictionary of init scripts. Evaluated as a template. | `{}` | +| `initScriptsCM` | ConfigMap with the init scripts. Evaluated as a template. | `""` | +| `initScriptsSecret` | Secret containing `/docker-entrypoint-initdb.d` scripts to be executed at initialization time that contain sensitive data. Evaluated as a template. | `""` | +| `extraEnvVars` | Array containing extra env vars to be added to all pods (evaluated as a template) | `[]` | +| `extraEnvVarsConfigMap` | ConfigMap containing extra env vars to be added to all pods (evaluated as a template) | `""` | +| `extraEnvVarsSecret` | Secret containing extra env vars to be added to all pods (evaluated as a template) | `""` | + + +### Master parameters + +| Name | Description | Value | +| ------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------- | +| `master.name` | Master-eligible node pod name | `master` | +| `master.fullnameOverride` | String to fully override elasticsearch.master.fullname template with a string | `""` | +| `master.replicas` | Desired number of Elasticsearch master-eligible nodes. Consider using an odd number of master nodes to prevent "split brain" situation. See: https://www.elastic.co/guide/en/elasticsearch/reference/7.x/modules-discovery-voting.html | `3` | +| `master.updateStrategy.type` | Update strategy for Master statefulset | `RollingUpdate` | +| `master.hostAliases` | Add deployment host aliases | `[]` | +| `master.schedulerName` | Name of the k8s scheduler (other than default) | `""` | +| `master.heapSize` | Master-eligible node heap size | `128m` | +| `master.podAnnotations` | Annotations for master-eligible pods. | `{}` | +| `master.podLabels` | Extra labels to add to Pod | `{}` | +| `master.securityContext.enabled` | Enable security context for master-eligible pods | `true` | +| `master.securityContext.fsGroup` | Group ID for the container for master-eligible pods | `1001` | +| `master.securityContext.runAsUser` | User ID for the container for master-eligible pods | `1001` | +| `master.podSecurityContext.enabled` | Enable security context for master-eligible pods | `false` | +| `master.podSecurityContext.fsGroup` | Group ID for the container for master-eligible pods | `1001` | +| `master.containerSecurityContext.enabled` | Enable security context for master-eligible pods | `false` | +| `master.containerSecurityContext.runAsUser` | User ID for the container for master-eligible pods | `1001` | +| `master.podAffinityPreset` | Master-eligible Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `master.podAntiAffinityPreset` | Master-eligible Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `master.nodeAffinityPreset.type` | Master-eligible Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `master.nodeAffinityPreset.key` | Master-eligible Node label key to match Ignored if `affinity` is set. | `""` | +| `master.nodeAffinityPreset.values` | Master-eligible Node label values to match. Ignored if `affinity` is set. | `[]` | +| `master.affinity` | Master-eligible Affinity for pod assignment | `{}` | +| `master.priorityClassName` | Master pods Priority Class Name | `""` | +| `master.nodeSelector` | Master-eligible Node labels for pod assignment | `{}` | +| `master.tolerations` | Master-eligible Tolerations for pod assignment | `[]` | +| `master.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` | +| `master.resources.limits` | The resources limits for the container | `{}` | +| `master.resources.requests` | The requested resources for the container | `{}` | +| `master.startupProbe.enabled` | Enable/disable the startup probe (master nodes pod) | `false` | +| `master.startupProbe.initialDelaySeconds` | Delay before startup probe is initiated (master nodes pod) | `90` | +| `master.startupProbe.periodSeconds` | How often to perform the probe (master nodes pod) | `10` | +| `master.startupProbe.timeoutSeconds` | When the probe times out (master nodes pod) | `5` | +| `master.startupProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (master nodes pod) | `1` | +| `master.startupProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` | +| `master.livenessProbe.enabled` | Enable/disable the liveness probe (master-eligible nodes pod) | `true` | +| `master.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (master-eligible nodes pod) | `90` | +| `master.livenessProbe.periodSeconds` | How often to perform the probe (master-eligible nodes pod) | `10` | +| `master.livenessProbe.timeoutSeconds` | When the probe times out (master-eligible nodes pod) | `5` | +| `master.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (master-eligible nodes pod) | `1` | +| `master.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` | +| `master.readinessProbe.enabled` | Enable/disable the readiness probe (master-eligible nodes pod) | `true` | +| `master.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated (master-eligible nodes pod) | `90` | +| `master.readinessProbe.periodSeconds` | How often to perform the probe (master-eligible nodes pod) | `10` | +| `master.readinessProbe.timeoutSeconds` | When the probe times out (master-eligible nodes pod) | `5` | +| `master.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (master-eligible nodes pod) | `1` | +| `master.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` | +| `master.customStartupProbe` | Override default startup probe | `{}` | +| `master.customLivenessProbe` | Override default liveness probe | `{}` | +| `master.customReadinessProbe` | Override default readiness probe | `{}` | +| `master.initContainers` | Extra init containers to add to the Elasticsearch master-eligible pod(s) | `[]` | +| `master.sidecars` | Extra sidecar containers to add to the Elasticsearch master-eligible pod(s) | `[]` | +| `master.persistence.enabled` | Enable persistence using a `PersistentVolumeClaim` | `true` | +| `master.persistence.storageClass` | Persistent Volume Storage Class | `""` | +| `master.persistence.existingClaim` | Existing Persistent Volume Claim | `""` | +| `master.persistence.existingVolume` | Existing Persistent Volume for use as volume match label selector to the `volumeClaimTemplate`. Ignored when `master.persistence.selector` is set. | `""` | +| `master.persistence.selector` | Configure custom selector for existing Persistent Volume. Overwrites `master.persistence.existingVolume` | `{}` | +| `master.persistence.annotations` | Persistent Volume Claim annotations | `{}` | +| `master.persistence.accessModes` | Persistent Volume Access Modes | `["ReadWriteOnce"]` | +| `master.persistence.size` | Persistent Volume Size | `8Gi` | +| `master.service.type` | Kubernetes Service type (master-eligible nodes) | `ClusterIP` | +| `master.service.port` | Kubernetes Service port for Elasticsearch transport port (master-eligible nodes) | `9300` | +| `master.service.nodePort` | Kubernetes Service nodePort (master-eligible nodes) | `""` | +| `master.service.annotations` | Annotations for master-eligible nodes service | `{}` | +| `master.service.loadBalancerIP` | loadBalancerIP if master-eligible nodes service type is `LoadBalancer` | `""` | +| `master.serviceAccount.create` | Enable creation of ServiceAccount for the master node | `false` | +| `master.serviceAccount.name` | Name of the created serviceAccount | `""` | +| `master.autoscaling.enabled` | Enable autoscaling for master replicas | `false` | +| `master.autoscaling.minReplicas` | Minimum number of master replicas | `2` | +| `master.autoscaling.maxReplicas` | Maximum number of master replicas | `11` | +| `master.autoscaling.targetCPU` | Target CPU utilization percentage for master replica autoscaling | `""` | +| `master.autoscaling.targetMemory` | Target Memory utilization percentage for master replica autoscaling | `""` | + + +### Coordinating parameters + +| Name | Description | Value | +| ------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------- | --------------- | +| `coordinating.fullnameOverride` | String to fully override elasticsearch.coordinating.fullname template with a string | `""` | +| `coordinating.replicas` | Desired number of Elasticsearch coordinating-only nodes | `2` | +| `coordinating.hostAliases` | Add deployment host aliases | `[]` | +| `coordinating.schedulerName` | Name of the k8s scheduler (other than default) | `""` | +| `coordinating.updateStrategy.type` | Update strategy for Coordinating Statefulset | `RollingUpdate` | +| `coordinating.heapSize` | Coordinating-only node heap size | `128m` | +| `coordinating.podAnnotations` | Annotations for coordinating pods. | `{}` | +| `coordinating.podLabels` | Extra labels to add to Pod | `{}` | +| `coordinating.securityContext.enabled` | Enable security context for coordinating-only pods | `true` | +| `coordinating.securityContext.fsGroup` | Group ID for the container for coordinating-only pods | `1001` | +| `coordinating.securityContext.runAsUser` | User ID for the container for coordinating-only pods | `1001` | +| `coordinating.podSecurityContext.enabled` | Enable security context for coordinating pods | `false` | +| `coordinating.podSecurityContext.fsGroup` | Group ID for the container for coordinating pods | `1001` | +| `coordinating.containerSecurityContext.enabled` | Enable security context for coordinating pods | `false` | +| `coordinating.containerSecurityContext.runAsUser` | User ID for the container for coordinating pods | `1001` | +| `coordinating.podAffinityPreset` | Coordinating Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `coordinating.podAntiAffinityPreset` | Coordinating Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `coordinating.nodeAffinityPreset.type` | Coordinating Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `coordinating.nodeAffinityPreset.key` | Coordinating Node label key to match Ignored if `affinity` is set. | `""` | +| `coordinating.nodeAffinityPreset.values` | Coordinating Node label values to match. Ignored if `affinity` is set. | `[]` | +| `coordinating.affinity` | Coordinating Affinity for pod assignment | `{}` | +| `coordinating.priorityClassName` | Coordinating pods Priority Class Name | `""` | +| `coordinating.nodeSelector` | Coordinating Node labels for pod assignment | `{}` | +| `coordinating.tolerations` | Coordinating Tolerations for pod assignment | `[]` | +| `coordinating.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` | +| `coordinating.resources.limits` | The resources limits for the container | `{}` | +| `coordinating.resources.requests` | The requested resources for the container | `{}` | +| `coordinating.startupProbe.enabled` | Enable/disable the startup probe (coordinating nodes pod) | `false` | +| `coordinating.startupProbe.initialDelaySeconds` | Delay before startup probe is initiated (coordinating nodes pod) | `90` | +| `coordinating.startupProbe.periodSeconds` | How often to perform the probe (coordinating nodes pod) | `10` | +| `coordinating.startupProbe.timeoutSeconds` | When the probe times out (coordinating nodes pod) | `5` | +| `coordinating.startupProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` | +| `coordinating.startupProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (coordinating nodes pod) | `1` | +| `coordinating.livenessProbe.enabled` | Enable/disable the liveness probe (coordinating-only nodes pod) | `true` | +| `coordinating.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (coordinating-only nodes pod) | `90` | +| `coordinating.livenessProbe.periodSeconds` | How often to perform the probe (coordinating-only nodes pod) | `10` | +| `coordinating.livenessProbe.timeoutSeconds` | When the probe times out (coordinating-only nodes pod) | `5` | +| `coordinating.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` | +| `coordinating.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (coordinating-only nodes pod) | `1` | +| `coordinating.readinessProbe.enabled` | Enable/disable the readiness probe (coordinating-only nodes pod) | `true` | +| `coordinating.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated (coordinating-only nodes pod) | `90` | +| `coordinating.readinessProbe.periodSeconds` | How often to perform the probe (coordinating-only nodes pod) | `10` | +| `coordinating.readinessProbe.timeoutSeconds` | When the probe times out (coordinating-only nodes pod) | `5` | +| `coordinating.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` | +| `coordinating.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (coordinating-only nodes pod) | `1` | +| `coordinating.customStartupProbe` | Override default startup probe | `{}` | +| `coordinating.customLivenessProbe` | Override default liveness probe | `{}` | +| `coordinating.customReadinessProbe` | Override default readiness probe | `{}` | +| `coordinating.initContainers` | Extra init containers to add to the Elasticsearch coordinating-only pod(s) | `[]` | +| `coordinating.sidecars` | Extra sidecar containers to add to the Elasticsearch coordinating-only pod(s) | `[]` | +| `coordinating.service.type` | Kubernetes Service type (coordinating-only nodes) | `ClusterIP` | +| `coordinating.service.port` | Kubernetes Service port for REST API (coordinating-only nodes) | `9200` | +| `coordinating.service.nodePort` | Kubernetes Service nodePort (coordinating-only nodes) | `""` | +| `coordinating.service.annotations` | Annotations for coordinating-only nodes service | `{}` | +| `coordinating.service.loadBalancerIP` | loadBalancerIP if coordinating-only nodes service type is `LoadBalancer` | `""` | +| `coordinating.service.externalTrafficPolicy` | Set `externalTrafficPolicy` to `Local` to enable client source IP preservation | `Cluster` | +| `coordinating.serviceAccount.create` | Enable creation of ServiceAccount for the coordinating-only node | `false` | +| `coordinating.serviceAccount.name` | Name of the created serviceAccount | `""` | +| `coordinating.autoscaling.enabled` | Enable autoscaling for coordinating replicas | `false` | +| `coordinating.autoscaling.minReplicas` | Minimum number of coordinating replicas | `2` | +| `coordinating.autoscaling.maxReplicas` | Maximum number of coordinating replicas | `11` | +| `coordinating.autoscaling.targetCPU` | Target CPU utilization percentage for coordinating replica autoscaling | `""` | +| `coordinating.autoscaling.targetMemory` | Target Memory utilization percentage for coordinating replica autoscaling | `""` | + + +### Data parameters + +| Name | Description | Value | +| -------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------- | +| `data.name` | Data node pod name | `data` | +| `data.fullnameOverride` | String to fully override elasticsearch.data.fullname template with a string | `""` | +| `data.replicas` | Desired number of Elasticsearch data nodes | `2` | +| `data.hostAliases` | Add deployment host aliases | `[]` | +| `data.schedulerName` | Name of the k8s scheduler (other than default) | `""` | +| `data.updateStrategy.type` | Update strategy for Data statefulset | `RollingUpdate` | +| `data.updateStrategy.rollingUpdatePartition` | Partition update strategy for Data statefulset | `""` | +| `data.heapSize` | Data node heap size | `1024m` | +| `data.podAnnotations` | Annotations for data pods. | `{}` | +| `data.podLabels` | Extra labels to add to Pod | `{}` | +| `data.securityContext.enabled` | Enable security context for data pods | `true` | +| `data.securityContext.fsGroup` | Group ID for the container for data pods | `1001` | +| `data.securityContext.runAsUser` | User ID for the container for data pods | `1001` | +| `data.podSecurityContext.enabled` | Enable security context for data pods | `false` | +| `data.podSecurityContext.fsGroup` | Group ID for the container for data pods | `1001` | +| `data.containerSecurityContext.enabled` | Enable security context for data pods | `false` | +| `data.containerSecurityContext.runAsUser` | User ID for the container for data pods | `1001` | +| `data.podAffinityPreset` | Data Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `data.podAntiAffinityPreset` | Data Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `data.nodeAffinityPreset.type` | Data Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `data.nodeAffinityPreset.key` | Data Node label key to match Ignored if `affinity` is set. | `""` | +| `data.nodeAffinityPreset.values` | Data Node label values to match. Ignored if `affinity` is set. | `[]` | +| `data.affinity` | Data Affinity for pod assignment | `{}` | +| `data.priorityClassName` | Data pods Priority Class Name | `""` | +| `data.nodeSelector` | Data Node labels for pod assignment | `{}` | +| `data.tolerations` | Data Tolerations for pod assignment | `[]` | +| `data.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` | +| `data.resources.limits` | The resources limits for the container | `{}` | +| `data.resources.requests` | The requested resources for the container | `{}` | +| `data.startupProbe.enabled` | Enable/disable the startup probe (data nodes pod) | `false` | +| `data.startupProbe.initialDelaySeconds` | Delay before startup probe is initiated (data nodes pod) | `90` | +| `data.startupProbe.periodSeconds` | How often to perform the probe (data nodes pod) | `10` | +| `data.startupProbe.timeoutSeconds` | When the probe times out (data nodes pod) | `5` | +| `data.startupProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` | +| `data.startupProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (data nodes pod) | `1` | +| `data.livenessProbe.enabled` | Enable/disable the liveness probe (data nodes pod) | `true` | +| `data.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (data nodes pod) | `90` | +| `data.livenessProbe.periodSeconds` | How often to perform the probe (data nodes pod) | `10` | +| `data.livenessProbe.timeoutSeconds` | When the probe times out (data nodes pod) | `5` | +| `data.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` | +| `data.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (data nodes pod) | `1` | +| `data.readinessProbe.enabled` | Enable/disable the readiness probe (data nodes pod) | `true` | +| `data.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated (data nodes pod) | `90` | +| `data.readinessProbe.periodSeconds` | How often to perform the probe (data nodes pod) | `10` | +| `data.readinessProbe.timeoutSeconds` | When the probe times out (data nodes pod) | `5` | +| `data.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` | +| `data.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (data nodes pod) | `1` | +| `data.customStartupProbe` | Override default startup probe | `{}` | +| `data.customLivenessProbe` | Override default liveness probe | `{}` | +| `data.customReadinessProbe` | Override default readiness probe | `{}` | +| `data.initContainers` | Extra init containers to add to the Elasticsearch data pod(s) | `[]` | +| `data.sidecars` | Extra sidecar containers to add to the Elasticsearch data pod(s) | `[]` | +| `data.persistence.enabled` | Enable persistence using a `PersistentVolumeClaim` | `true` | +| `data.persistence.storageClass` | Persistent Volume Storage Class | `""` | +| `data.persistence.existingClaim` | Existing Persistent Volume Claim | `""` | +| `data.persistence.existingVolume` | Existing Persistent Volume for use as volume match label selector to the `volumeClaimTemplate`. Ignored when `data.persistence.selector` ist set. | `""` | +| `data.persistence.selector` | Configure custom selector for existing Persistent Volume. Overwrites `data.persistence.existingVolume` | `{}` | +| `data.persistence.annotations` | Persistent Volume Claim annotations | `{}` | +| `data.persistence.accessModes` | Persistent Volume Access Modes | `["ReadWriteOnce"]` | +| `data.persistence.size` | Persistent Volume Size | `8Gi` | +| `data.serviceAccount.create` | Enable creation of ServiceAccount for the data node | `false` | +| `data.serviceAccount.name` | Name of the created serviceAccount | `""` | +| `data.autoscaling.enabled` | Enable autoscaling for data replicas | `false` | +| `data.autoscaling.minReplicas` | Minimum number of data replicas | `2` | +| `data.autoscaling.maxReplicas` | Maximum number of data replicas | `11` | +| `data.autoscaling.targetCPU` | Target CPU utilization percentage for data replica autoscaling | `""` | +| `data.autoscaling.targetMemory` | Target Memory utilization percentage for data replica autoscaling | `""` | + + +### Ingest parameters + +| Name | Description | Value | +| ------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------ | --------------- | +| `ingest.enabled` | Enable ingest nodes | `false` | +| `ingest.name` | Ingest node pod name | `ingest` | +| `ingest.fullnameOverride` | String to fully override elasticsearch.ingest.fullname template with a string | `""` | +| `ingest.replicas` | Desired number of Elasticsearch ingest nodes | `2` | +| `ingest.updateStrategy.type` | Update strategy for Ingest statefulset | `RollingUpdate` | +| `ingest.heapSize` | Ingest node heap size | `128m` | +| `ingest.podAnnotations` | Annotations for ingest pods. | `{}` | +| `ingest.hostAliases` | Add deployment host aliases | `[]` | +| `ingest.schedulerName` | Name of the k8s scheduler (other than default) | `""` | +| `ingest.podLabels` | Extra labels to add to Pod | `{}` | +| `ingest.securityContext.enabled` | Enable security context for ingest pods | `true` | +| `ingest.securityContext.fsGroup` | Group ID for the container for ingest pods | `1001` | +| `ingest.securityContext.runAsUser` | User ID for the container for ingest pods | `1001` | +| `ingest.podSecurityContext.enabled` | Enable security context for ingest pods | `false` | +| `ingest.podSecurityContext.fsGroup` | Group ID for the container for ingest pods | `1001` | +| `ingest.containerSecurityContext.enabled` | Enable security context for data pods | `false` | +| `ingest.containerSecurityContext.runAsUser` | User ID for the container for data pods | `1001` | +| `ingest.podAffinityPreset` | Ingest Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `ingest.podAntiAffinityPreset` | Ingest Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `ingest.nodeAffinityPreset.type` | Ingest Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `ingest.nodeAffinityPreset.key` | Ingest Node label key to match Ignored if `affinity` is set. | `""` | +| `ingest.nodeAffinityPreset.values` | Ingest Node label values to match. Ignored if `affinity` is set. | `[]` | +| `ingest.affinity` | Ingest Affinity for pod assignment | `{}` | +| `ingest.priorityClassName` | Ingest pods Priority Class Name | `""` | +| `ingest.nodeSelector` | Ingest Node labels for pod assignment | `{}` | +| `ingest.tolerations` | Ingest Tolerations for pod assignment | `[]` | +| `ingest.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` | +| `ingest.resources.limits` | The resources limits for the container | `{}` | +| `ingest.resources.requests` | The requested resources for the container | `{}` | +| `ingest.startupProbe.enabled` | Enable/disable the startup probe (ingest nodes pod) | `false` | +| `ingest.startupProbe.initialDelaySeconds` | Delay before startup probe is initiated (ingest nodes pod) | `90` | +| `ingest.startupProbe.periodSeconds` | How often to perform the probe (ingest nodes pod) | `10` | +| `ingest.startupProbe.timeoutSeconds` | When the probe times out (ingest nodes pod) | `5` | +| `ingest.startupProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` | +| `ingest.startupProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (ingest nodes pod) | `1` | +| `ingest.livenessProbe.enabled` | Enable/disable the liveness probe (ingest nodes pod) | `true` | +| `ingest.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (ingest nodes pod) | `90` | +| `ingest.livenessProbe.periodSeconds` | How often to perform the probe (ingest nodes pod) | `10` | +| `ingest.livenessProbe.timeoutSeconds` | When the probe times out (ingest nodes pod) | `5` | +| `ingest.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` | +| `ingest.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (ingest nodes pod) | `1` | +| `ingest.readinessProbe.enabled` | Enable/disable the readiness probe (ingest nodes pod) | `true` | +| `ingest.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated (ingest nodes pod) | `90` | +| `ingest.readinessProbe.periodSeconds` | How often to perform the probe (ingest nodes pod) | `10` | +| `ingest.readinessProbe.timeoutSeconds` | When the probe times out (ingest nodes pod) | `5` | +| `ingest.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` | +| `ingest.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (ingest nodes pod) | `1` | +| `ingest.customStartupProbe` | Override default startup probe | `{}` | +| `ingest.customLivenessProbe` | Override default liveness probe | `{}` | +| `ingest.customReadinessProbe` | Override default readiness probe | `{}` | +| `ingest.initContainers` | Extra init containers to add to the Elasticsearch ingest pod(s) | `[]` | +| `ingest.sidecars` | Extra sidecar containers to add to the Elasticsearch ingest pod(s) | `[]` | +| `ingest.service.type` | Kubernetes Service type (ingest nodes) | `ClusterIP` | +| `ingest.service.port` | Kubernetes Service port Elasticsearch transport port (ingest nodes) | `9300` | +| `ingest.service.nodePort` | Kubernetes Service nodePort (ingest nodes) | `""` | +| `ingest.service.annotations` | Annotations for ingest nodes service | `{}` | +| `ingest.service.loadBalancerIP` | loadBalancerIP if ingest nodes service type is `LoadBalancer` | `""` | +| `ingest.serviceAccount.create` | Create a default serviceaccount for elasticsearch curator | `false` | +| `ingest.serviceAccount.name` | Name of the created serviceAccount | `""` | + + +### Curator parameters + +| Name | Description | Value | +| -------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------- | +| `curator.enabled` | Enable Elasticsearch Curator cron job | `false` | +| `curator.name` | Elasticsearch Curator pod name | `curator` | +| `curator.image.registry` | Elasticsearch Curator image registry | `docker.io` | +| `curator.image.repository` | Elasticsearch Curator image repository | `bitnami/elasticsearch-curator` | +| `curator.image.tag` | Elasticsearch Curator image tag | `5.8.4-debian-10-r190` | +| `curator.image.pullPolicy` | Elasticsearch Curator image pull policy | `IfNotPresent` | +| `curator.image.pullSecrets` | Elasticsearch Curator image pull secrets | `[]` | +| `curator.cronjob.schedule` | Schedule for the CronJob | `0 1 * * *` | +| `curator.cronjob.annotations` | Annotations to add to the cronjob | `{}` | +| `curator.cronjob.concurrencyPolicy` | `Allow,Forbid,Replace` concurrent jobs | `""` | +| `curator.cronjob.failedJobsHistoryLimit` | Specify the number of failed Jobs to keep | `""` | +| `curator.cronjob.successfulJobsHistoryLimit` | Specify the number of completed Jobs to keep | `""` | +| `curator.cronjob.jobRestartPolicy` | Control the Job restartPolicy | `Never` | +| `curator.schedulerName` | Name of the k8s scheduler (other than default) | `""` | +| `curator.podAnnotations` | Annotations to add to the pod | `{}` | +| `curator.podLabels` | Extra labels to add to Pod | `{}` | +| `curator.podAffinityPreset` | Curator Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `curator.podAntiAffinityPreset` | Curator Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `curator.nodeAffinityPreset.type` | Curator Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `curator.nodeAffinityPreset.key` | Curator Node label key to match Ignored if `affinity` is set. | `""` | +| `curator.nodeAffinityPreset.values` | Curator Node label values to match. Ignored if `affinity` is set. | `[]` | +| `curator.initContainers` | Extra init containers to add to the Elasticsearch coordinating-only pod(s) | `[]` | +| `curator.sidecars` | Extra sidecar containers to add to the Elasticsearch ingest pod(s) | `[]` | +| `curator.affinity` | Curator Affinity for pod assignment | `{}` | +| `curator.nodeSelector` | Curator Node labels for pod assignment | `{}` | +| `curator.tolerations` | Curator Tolerations for pod assignment | `[]` | +| `curator.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` | +| `curator.rbac.enabled` | Enable RBAC resources | `false` | +| `curator.serviceAccount.create` | Create a default serviceaccount for elasticsearch curator | `true` | +| `curator.serviceAccount.name` | Name for elasticsearch curator serviceaccount | `""` | +| `curator.psp.create` | Whether to create a PodSecurityPolicy. WARNING: PodSecurityPolicy is deprecated in Kubernetes v1.21 or later, unavailable in v1.25 or later | `false` | +| `curator.hooks` | Whether to run job on selected hooks | `{}` | +| `curator.dryrun` | Run Curator in dry-run mode | `false` | +| `curator.command` | Command to execute | `["curator"]` | +| `curator.env` | Environment variables to add to the cronjob container | `{}` | +| `curator.configMaps.action_file_yml` | Contents of the Curator action_file.yml | `""` | +| `curator.configMaps.config_yml` | Contents of the Curator config.yml (overrides config) | `""` | +| `curator.resources.limits` | The resources limits for the container | `{}` | +| `curator.resources.requests` | The requested resources for the container | `{}` | +| `curator.priorityClassName` | Curator Pods Priority Class Name | `""` | +| `curator.extraVolumes` | Extra volumes | `[]` | +| `curator.extraVolumeMounts` | Mount extra volume(s) | `[]` | +| `curator.extraInitContainers` | DEPRECATED. Use `curator.initContainers` instead. Init containers to add to the cronjob container | `[]` | + + +### Metrics parameters + +| Name | Description | Value | +| -------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------ | -------------------------------- | +| `metrics.enabled` | Enable prometheus exporter | `false` | +| `metrics.name` | Metrics pod name | `metrics` | +| `metrics.image.registry` | Metrics exporter image registry | `docker.io` | +| `metrics.image.repository` | Metrics exporter image repository | `bitnami/elasticsearch-exporter` | +| `metrics.image.tag` | Metrics exporter image tag | `1.3.0-debian-10-r31` | +| `metrics.image.pullPolicy` | Metrics exporter image pull policy | `IfNotPresent` | +| `metrics.image.pullSecrets` | Metrics exporter image pull secrets | `[]` | +| `metrics.extraArgs` | Extra arguments to add to the default exporter command | `[]` | +| `metrics.hostAliases` | Add deployment host aliases | `[]` | +| `metrics.schedulerName` | Name of the k8s scheduler (other than default) | `""` | +| `metrics.service.type` | Metrics exporter endpoint service type | `ClusterIP` | +| `metrics.service.annotations` | Provide any additional annotations which may be required. | `{}` | +| `metrics.podAffinityPreset` | Metrics Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `metrics.podAntiAffinityPreset` | Metrics Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `metrics.nodeAffinityPreset.type` | Metrics Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `metrics.nodeAffinityPreset.key` | Metrics Node label key to match Ignored if `affinity` is set. | `""` | +| `metrics.nodeAffinityPreset.values` | Metrics Node label values to match. Ignored if `affinity` is set. | `[]` | +| `metrics.affinity` | Metrics Affinity for pod assignment | `{}` | +| `metrics.nodeSelector` | Metrics Node labels for pod assignment | `{}` | +| `metrics.tolerations` | Metrics Tolerations for pod assignment | `[]` | +| `metrics.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` | +| `metrics.resources.limits` | The resources limits for the container | `{}` | +| `metrics.resources.requests` | The requested resources for the container | `{}` | +| `metrics.livenessProbe.enabled` | Enable/disable the liveness probe (metrics pod) | `true` | +| `metrics.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (metrics pod) | `60` | +| `metrics.livenessProbe.periodSeconds` | How often to perform the probe (metrics pod) | `10` | +| `metrics.livenessProbe.timeoutSeconds` | When the probe times out (metrics pod) | `5` | +| `metrics.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` | +| `metrics.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (metrics pod) | `1` | +| `metrics.readinessProbe.enabled` | Enable/disable the readiness probe (metrics pod) | `true` | +| `metrics.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated (metrics pod) | `5` | +| `metrics.readinessProbe.periodSeconds` | How often to perform the probe (metrics pod) | `10` | +| `metrics.readinessProbe.timeoutSeconds` | When the probe times out (metrics pod) | `1` | +| `metrics.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` | +| `metrics.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (metrics pod) | `1` | +| `metrics.podAnnotations` | Metrics exporter pod Annotation and Labels | `{}` | +| `metrics.podLabels` | Extra labels to add to Pod | `{}` | +| `metrics.serviceMonitor.enabled` | if `true`, creates a Prometheus Operator ServiceMonitor (also requires `metrics.enabled` to be `true`) | `false` | +| `metrics.serviceMonitor.namespace` | Namespace in which Prometheus is running | `""` | +| `metrics.serviceMonitor.interval` | Interval at which metrics should be scraped. | `""` | +| `metrics.serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `""` | +| `metrics.serviceMonitor.selector` | Prometheus instance selector labels | `{}` | + + +### Sysctl Image parameters + +| Name | Description | Value | +| -------------------------------- | ------------------------------------------- | ----------------------- | +| `sysctlImage.enabled` | Enable kernel settings modifier image | `true` | +| `sysctlImage.registry` | Kernel settings modifier image registry | `docker.io` | +| `sysctlImage.repository` | Kernel settings modifier image repository | `bitnami/bitnami-shell` | +| `sysctlImage.tag` | Kernel settings modifier image tag | `10-debian-10-r259` | +| `sysctlImage.pullPolicy` | Kernel settings modifier image pull policy | `IfNotPresent` | +| `sysctlImage.pullSecrets` | Kernel settings modifier image pull secrets | `[]` | +| `sysctlImage.resources.limits` | The resources limits for the container | `{}` | +| `sysctlImage.resources.requests` | The requested resources for the container | `{}` | + + +### VolumePermissions parameters + +| Name | Description | Value | +| -------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------- | +| `volumePermissions.enabled` | Enable init container that changes volume permissions in the data directory (for cases where the default k8s `runAsUser` and `fsUser` values do not work) | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | +| `volumePermissions.image.repository` | Init container volume-permissions image name | `bitnami/bitnami-shell` | +| `volumePermissions.image.tag` | Init container volume-permissions image tag | `10-debian-10-r259` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `IfNotPresent` | +| `volumePermissions.image.pullSecrets` | Init container volume-permissions image pull secrets | `[]` | +| `volumePermissions.resources.limits` | The resources limits for the container | `{}` | +| `volumePermissions.resources.requests` | The requested resources for the container | `{}` | + + +### Kibana Parameters + +| Name | Description | Value | +| ---------------------------- | ------------------------------------------------------------------------- | ------ | +| `kibana.elasticsearch.hosts` | Array containing hostnames for the ES instances. Used to generate the URL | `[]` | +| `kibana.elasticsearch.port` | Port to connect Kibana and ES instance. Used to generate the URL | `9200` | + + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```console +$ helm install my-release \ + --set name=my-elastic,client.service.port=8080 \ + bitnami/elasticsearch +``` + +The above command sets the Elasticsearch cluster name to `my-elastic` and REST port number to `8080`. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```console +$ helm install my-release -f values.yaml bitnami/elasticsearch +``` + +> **Tip**: You can use the default [values.yaml](values.yaml). + +## Configuration and installation details + +### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Change ElasticSearch version + +To modify the ElasticSearch version used in this chart you can specify a [valid image tag](https://hub.docker.com/r/bitnami/elasticsearch/tags/) using the `image.tag` parameter. For example, `image.tag=X.Y.Z`. This approach is also applicable to other images like exporters. + +### Default kernel settings + +Currently, Elasticsearch requires some changes in the kernel of the host machine to work as expected. If those values are not set in the underlying operating system, the ES containers fail to boot with ERROR messages. More information about these requirements can be found in the links below: + +- [File Descriptor requirements](https://www.elastic.co/guide/en/elasticsearch/reference/current/file-descriptors.html) +- [Virtual memory requirements](https://www.elastic.co/guide/en/elasticsearch/reference/current/vm-max-map-count.html) + +This chart uses a **privileged** initContainer to change those settings in the Kernel by running: `sysctl -w vm.max_map_count=262144 && sysctl -w fs.file-max=65536`. +You can disable the initContainer using the `sysctlImage.enabled=false` parameter. + +### Enable bundled Kibana + +This Elasticsearch chart contains Kibana as subchart, you can enable it just setting the `global.kibanaEnabled=true` parameter. +To see the notes with some operational instructions from the Kibana chart, please use the `--render-subchart-notes` as part of your `helm install` command, in this way you can see the Kibana and ES notes in your terminal. + +When enabling the bundled kibana subchart, there are a few gotchas that you should be aware of listed below. + +#### Elasticsearch rest Encryption + +When enabling elasticsearch' rest endpoint encryption you will also need to set `kibana.elasticsearch.security.tls.enabled` to the SAME value along with some additional values shown below for an "out of the box experience": + +```yaml +security: + enabled: true + # PASSWORD must be the same value passed to elasticsearch to get an "out of the box" experience + elasticPassword: "" + tls: + # AutoGenerate TLS certs for elastic + autoGenerated: true + +kibana: + elasticsearch: + security: + auth: + enabled: true + # default in the elasticsearch chart is elastic + kibanaUsername: "" + kibanaPassword: "" + tls: + # Instruct kibana to connect to elastic over https + enabled: true + # Bit of a catch 22, as you will need to know the name upfront of your release + existingSecret: RELEASENAME-elasticsearch-coordinating-only-crt + # As the certs are auto-generated, they are pemCerts so set to true + usePemCerts: true +``` + +At a bare-minimum, when working with kibana and elasticsearch together the following values MUST be the same, otherwise things will fail: + +```yaml +security: + tls: + restEncryption: true + +# assumes global.kibanaEnabled=true +kibana: + elasticsearch: + security: + tls: + enabled: true +``` + +### Adding extra environment variables + +In case you want to add extra environment variables (useful for advanced operations like custom init scripts), you can use the `extraEnvVars` property. + +```yaml +extraEnvVars: + - name: ELASTICSEARCH_VERSION + value: 7.0 +``` + +Alternatively, you can use a ConfigMap or a Secret with the environment variables. To do so, use the `extraEnvVarsConfigMap` or the `extraEnvVarsSecret` values. + +### Using custom init scripts + +For advanced operations, the Bitnami Elasticsearch charts allows using custom init scripts that will be mounted inside `/docker-entrypoint.init-db`. You can include the file directly in your `values.yaml` with `initScripts`, or use a ConfigMap or a Secret (in case of sensitive data) for mounting these extra scripts. In this case you use the `initScriptsCM` and `initScriptsSecret` values. + +```console +initScriptsCM=special-scripts +initScriptsSecret=special-scripts-sensitive +``` + +### Snapshot and restore operations + +As it's described in the [official documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/snapshots-register-repository.html#snapshots-filesystem-repository), it's necessary to register a snapshot repository before you can perform snapshot and restore operations. + +This chart allows you to configure Elasticsearch to use a shared file system to store snapshots. To do so, you need to mount a RWX volume on every Elasticsearch node, and set the parameter `snapshotRepoPath` with the path where the volume is mounted. In the example below, you can find the values to set when using a NFS Perstitent Volume: + +```yaml +extraVolumes: + - name: snapshot-repository + nfs: + server: nfs.example.com # Please change this to your NFS server + path: /share1 +extraVolumeMounts: + - name: snapshot-repository + mountPath: /snapshots +snapshotRepoPath: "/snapshots" +``` + +### Sidecars and Init Containers + +If you have a need for additional containers to run within the same pod as Elasticsearch components (e.g. an additional metrics or logging exporter), you can do so via the `XXX.sidecars` parameter(s), where XXX is placeholder you need to replace with the actual component(s). Simply define your container according to the Kubernetes container spec. + + +```yaml +sidecars: + - name: your-image-name + image: your-image + imagePullPolicy: Always + ports: + - name: portname + containerPort: 1234 +``` + +Similarly, you can add extra init containers using the `initContainers` parameter. + +```yaml +initContainers: + - name: your-image-name + image: your-image + imagePullPolicy: Always + ports: + - name: portname +``` + +### Setting Pod's affinity + +This chart allows you to set your custom affinity using the `XXX.affinity` parameter(s). Find more information about Pod's affinity in the [kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity). + +As an alternative, you can use of the preset configurations for pod affinity, pod anti-affinity, and node affinity available at the [bitnami/common](https://github.com/bitnami/charts/tree/master/bitnami/common#affinities) chart. To do so, set the `XXX.podAffinityPreset`, `XXX.podAntiAffinityPreset`, or `XXX.nodeAffinityPreset` parameters. + +## Persistence + +The [Bitnami Elasticsearch](https://github.com/bitnami/bitnami-docker-elasticsearch) image stores the Elasticsearch data at the `/bitnami/elasticsearch/data` path of the container. + +By default, the chart mounts a [Persistent Volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) at this location. The volume is created using dynamic volume provisioning. See the [Parameters](#parameters) section to configure the PVC. + +### Adjust permissions of persistent volume mountpoint + +As the image run as non-root by default, it is necessary to adjust the ownership of the persistent volume so that the container can write data into it. + +By default, the chart is configured to use Kubernetes Security Context to automatically change the ownership of the volume. However, this feature does not work in all Kubernetes distributions. +As an alternative, this chart supports using an initContainer to change the ownership of the volume before mounting it in the final destination. + +You can enable this initContainer by setting `volumePermissions.enabled` to `true`. + +## Troubleshooting + +Find more information about how to deal with common errors related to Bitnami’s Helm charts in [this troubleshooting guide](https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues). + +## Upgrading + +### To 17.0.0 + +This version bumps in a major the version of the Kibana Helm Chart bundled as dependecy, [here](https://github.com/bitnami/charts/tree/master/bitnami/kibana#to-900) you can see the changes implemented in this Kibana major version. + +### To 16.0.0 + +This version replaces the Ingest and Coordinating Deployments with Statefulsets. This change is required so Coordinating and Ingest nodes have their services associated, required for TLS hostname verification. + +We haven't encountered any issues during our upgrade test, but we recommend creating volumes backups before upgrading this major version, especially for users with additional volumes and custom configurations. + +Additionally, this version adds support for X-Pack Security features such as TLS/SSL encryption and basic authentication. + +### To 15.0.0 + +From this version onwards, Elasticsearch container components are now licensed under the [Elastic License](https://www.elastic.co/licensing/elastic-license) that is not currently accepted as an Open Source license by the Open Source Initiative (OSI). + +Also, from now on, the Helm Chart will include the X-Pack plugin installed by default. + +Regular upgrade is compatible from previous versions. + +### To 14.0.0 + +This version standardizes the way of defining Ingress rules in the Kibana subchart. When configuring a single hostname for the Ingress rule, set the `kibana.ingress.hostname` value. When defining more than one, set the `kibana.ingress.extraHosts` array. Apart from this case, no issues are expected to appear when upgrading. + +### To 13.0.0 + +[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +**What changes were introduced in this major version?** + +- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field. +- Move dependency information from the *requirements.yaml* to the *Chart.yaml* +- After running `helm dependency update`, a *Chart.lock* file is generated containing the same structure used in the previous *requirements.lock* +- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts + +**Considerations when upgrading to this version** + +- If you want to upgrade to this version from a previous one installed with Helm v3, you shouldn't face any issues +- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version doesn't support Helm v2 anymore +- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3 + +**Useful links** + +- https://docs.bitnami.com/tutorials/resolve-helm2-helm3-post-migration-issues/ +- https://helm.sh/docs/topics/v2_v3_migration/ +- https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/ + +### To 12.0.0 + +Several changes were introduced that breaks backwards compatibility: + +- Ports names were prefixed with the protocol to comply with Istio (see https://istio.io/docs/ops/deployment/requirements/). +- Labels are adapted to follow the Helm charts best practices. +- Elasticsearch data pods are now deployed in parallel in order to bootstrap the cluster and be discovered. + +### To 11.0.0 + +Elasticsearch master pods are now deployed in parallel in order to bootstrap the cluster and be discovered. + +The field `podManagementPolicy` can't be updated in a StatefulSet, so you need to destroy it before you upgrade the chart to this version. + +```console +$ kubectl delete statefulset elasticsearch-master +$ helm upgrade bitnami/elasticsearch +``` + +### TO 10.0.0 + +In this version, Kibana was added as dependent chart. More info about how to enable and work with this bundled Kibana in the ["Enable bundled Kibana"](#enable-bundled-kibana) section. + +### To 9.0.0 + +Elasticsearch master nodes store the cluster status at `/bitnami/elasticsearch/data`. Among other things this includes the UUID of the elasticsearch cluster. Without a persistent data store for this data, the UUID of a cluster could change if k8s node(s) hosting the es master nodes go down and are scheduled on some other master node. In the event that this happens, the data nodes will no longer be able to join a cluster as the uuid changed resulting in a broken cluster. + +To resolve such issues, PVC's are now attached for master node data persistence. + +--- + +Helm performs a lookup for the object based on its group (apps), version (v1), and kind (Deployment). Also known as its GroupVersionKind, or GVK. Changing the GVK is considered a compatibility breaker from Kubernetes' point of view, so you cannot "upgrade" those objects to the new GVK in-place. Earlier versions of Helm 3 did not perform the lookup correctly which has since been fixed to match the spec. + +In [4dfac075aacf74405e31ae5b27df4369e84eb0b0](https://github.com/bitnami/charts/commit/4dfac075aacf74405e31ae5b27df4369e84eb0b0) the `apiVersion` of the deployment resources was updated to `apps/v1` in tune with the api's deprecated, resulting in compatibility breakage. + +### To 7.4.0 + +This version also introduces `bitnami/common`, a [library chart](https://helm.sh/docs/topics/library_charts/#helm) as a dependency. More documentation about this new utility could be found [here](https://github.com/bitnami/charts/tree/master/bitnami/common#bitnami-common-library-chart). Please, make sure that you have updated the chart dependencies before executing any upgrade. + +### To 7.0.0 + +This version enabled by default the initContainer that modify some kernel settings to meet the Elasticsearch requirements. More info in the ["Default kernel settings"](#default-kernel-settings) section. +You can disable the initContainer using the `sysctlImage.enabled=false` parameter. + +### To 3.0.0 + +Backwards compatibility is not guaranteed unless you modify the labels used on the chart's deployments. +Use the workaround below to upgrade from versions previous to 3.0.0. The following example assumes that the release name is elasticsearch: + +```console +$ kubectl patch deployment elasticsearch-coordinating --type=json -p='[{"op": "remove", "path": "/spec/selector/matchLabels/chart"}]' +$ kubectl patch deployment elasticsearch-ingest --type=json -p='[{"op": "remove", "path": "/spec/selector/matchLabels/chart"}]' +$ kubectl patch deployment elasticsearch-master --type=json -p='[{"op": "remove", "path": "/spec/selector/matchLabels/chart"}]' +$ kubectl patch deployment elasticsearch-metrics --type=json -p='[{"op": "remove", "path": "/spec/selector/matchLabels/chart"}]' +$ kubectl delete statefulset elasticsearch-data --cascade=false +``` diff --git a/elasticsearch/ci/ct-values.yaml b/elasticsearch/ci/ct-values.yaml new file mode 100644 index 0000000..bb6c5dd --- /dev/null +++ b/elasticsearch/ci/ct-values.yaml @@ -0,0 +1,6 @@ +master: + replicas: 1 +data: + replicas: 1 +coordinating: + replicas: 1 diff --git a/elasticsearch/templates/NOTES.txt b/elasticsearch/templates/NOTES.txt new file mode 100644 index 0000000..61657c2 --- /dev/null +++ b/elasticsearch/templates/NOTES.txt @@ -0,0 +1,132 @@ +CHART NAME: {{ .Chart.Name }} +CHART VERSION: {{ .Chart.Version }} +APP VERSION: {{ .Chart.AppVersion }} + +{{- if contains .Values.coordinating.service.type "LoadBalancer" }} + +------------------------------------------------------------------------------- + WARNING + + By specifying "coordinating.service.type=LoadBalancer" you have most likely + exposed the Elasticsearch service externally. + + Please note that Elasticsearch does not implement a authentication + mechanism to secure your cluster. For security reasons, we strongly + suggest that you switch to "ClusterIP" or "NodePort". +------------------------------------------------------------------------------- +{{- end }} +{{- if not .Values.sysctlImage.enabled }} + +------------------------------------------------------------------------------- + WARNING + + Elasticsearch requires some changes in the kernel of the host machine to + work as expected. If those values are not set in the underlying operating + system, the ES containers fail to boot with ERROR messages. + + To check whether the host machine meets the requirements, run the command + below: + + kubectl logs --namespace {{ .Release.Namespace }} $(kubectl get --namespace {{ .Release.Namespace }} \ + pods -l app={{ template "common.names.name" . }},role=master -o jsonpath='{.items[0].metadata.name}') \ + elasticsearch + + You can adapt the Kernel parameters on you cluster as described in the + official documentation: + + https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster + + As an alternative, you can specify "sysctlImage.enabled=true" to use a + privileged initContainer to change those settings in the Kernel: + + helm upgrade --namespace {{ .Release.Namespace }} {{ .Release.Name }} bitnami/elasticsearch --set sysctlImage.enabled=true + + Note that this requires the ability to run privileged containers, which is likely not + the case on many secure clusters. To cover this use case, you can also set some parameters + in the config file to customize the default settings: + + https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules-store.html + https://www.elastic.co/guide/en/cloud-on-k8s/master/k8s-virtual-memory.html + + For that, you can place the desired parameters by using the "config" block present in the values.yaml + +{{- else if .Values.sysctlImage.enabled }} + +------------------------------------------------------------------------------- + WARNING + + Elasticsearch requires some changes in the kernel of the host machine to + work as expected. If those values are not set in the underlying operating + system, the ES containers fail to boot with ERROR messages. + + More information about these requirements can be found in the links below: + + https://www.elastic.co/guide/en/elasticsearch/reference/current/file-descriptors.html + https://www.elastic.co/guide/en/elasticsearch/reference/current/vm-max-map-count.html + + This chart uses a privileged initContainer to change those settings in the Kernel + by running: sysctl -w vm.max_map_count=262144 && sysctl -w fs.file-max=65536 + +{{- end }} + +** Please be patient while the chart is being deployed ** + +{{- if .Values.diagnosticMode.enabled }} +The chart has been deployed in diagnostic mode. All probes have been disabled and the command has been overwritten with: + + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 4 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 4 }} + +Get the list of pods by executing: + + kubectl get pods --namespace {{ .Release.Namespace }} -l app.kubernetes.io/instance={{ .Release.Name }} + +Access the pod you want to debug by executing + + kubectl exec --namespace {{ .Release.Namespace }} -ti -- bash + +In order to replicate the container startup scripts execute this command: + + /opt/bitnami/scripts/elasticsearch/entrypoint.sh /opt/bitnami/scripts/elasticsearch/run.sh + +{{- else }} + +{{- if .Values.curator.enabled }} + + A CronJob will run with schedule {{ .Values.curator.cronjob.schedule }}. + + The Jobs will not be removed automagically when deleting this Helm chart. + To remove these jobs, run the following: + + kubectl --namespace {{ .Release.Namespace }} delete job -l app={{ template "common.names.name" . }},role=curator + +{{- end }} + + Elasticsearch can be accessed within the cluster on port {{ .Values.coordinating.service.port }} at {{ template "elasticsearch.coordinating.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + + To access from outside the cluster execute the following commands: + +{{- if contains "NodePort" .Values.coordinating.service.type }} + + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "elasticsearch.coordinating.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + curl http://$NODE_IP:$NODE_PORT/ +{{- else if contains "LoadBalancer" .Values.coordinating.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "elasticsearch.coordinating.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "elasticsearch.coordinating.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + curl http://$SERVICE_IP:{{ .Values.coordinating.service.port }}/ +{{- else if contains "ClusterIP" .Values.coordinating.service.type }} + + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "elasticsearch.coordinating.fullname" . }} {{ .Values.coordinating.service.port }}:9200 & + curl http://127.0.0.1:9200/ +{{- end }} + +{{- include "common.warnings.rollingTag" .Values.image }} +{{- include "common.warnings.rollingTag" .Values.volumePermissions.image }} +{{- include "common.warnings.rollingTag" .Values.sysctlImage }} + +{{- end }} +{{ include "elasticsearch.validateValues" . }} diff --git a/elasticsearch/templates/_helpers.tpl b/elasticsearch/templates/_helpers.tpl new file mode 100644 index 0000000..79cdd62 --- /dev/null +++ b/elasticsearch/templates/_helpers.tpl @@ -0,0 +1,490 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return the proper ES image name +*/}} +{{- define "elasticsearch.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.image "global" .Values.global) }} +{{- end -}} + + +{{/* +Create a default fully qualified master name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "elasticsearch.master.fullname" -}} +{{- if .Values.master.fullnameOverride -}} +{{- .Values.master.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" (include "common.names.fullname" .) .Values.master.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified ingest name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "elasticsearch.ingest.fullname" -}} +{{- if .Values.ingest.fullnameOverride -}} +{{- .Values.ingest.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" (include "common.names.fullname" .) .Values.ingest.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified coordinating name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "elasticsearch.coordinating.fullname" -}} +{{- if .Values.global.kibanaEnabled -}} +{{- printf "%s-%s" .Release.Name .Values.global.coordinating.name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- if .Values.coordinating -}} +{{- if .Values.coordinating.fullnameOverride -}} +{{- .Values.coordinating.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" (include "common.names.fullname" .) .Values.global.coordinating.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Return the hostname of every ElasticSearch seed node +*/}} +{{- define "elasticsearch.hosts" -}} +{{- $clusterDomain := .Values.clusterDomain }} +{{- $releaseNamespace := .Release.Namespace }} +{{- $masterFullname := include "elasticsearch.master.fullname" . }} +{{- $coordinatingFullname := include "elasticsearch.coordinating.fullname" . }} +{{- $dataFullname := include "elasticsearch.data.fullname" . }} +{{- $ingestFullname := include "elasticsearch.ingest.fullname" . }} +{{- $masterFullname }}.{{ $releaseNamespace }}.svc.{{ $clusterDomain }}, +{{- $coordinatingFullname }}.{{ $releaseNamespace }}.svc.{{ $clusterDomain }}, +{{- $dataFullname }}.{{ $releaseNamespace }}.svc.{{ $clusterDomain }}, +{{- if .Values.ingest.enabled }} +{{- $ingestFullname }}.{{ $releaseNamespace }}.svc.{{ $clusterDomain }}, +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified data name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "elasticsearch.data.fullname" -}} +{{- if .Values.data.fullnameOverride -}} +{{- .Values.data.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" (include "common.names.fullname" .) .Values.data.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{ template "elasticsearch.initScriptsSecret" . }} +{{/* +Get the initialization scripts volume name. +*/}} +{{- define "elasticsearch.initScripts" -}} +{{- printf "%s-init-scripts" (include "common.names.fullname" .) -}} +{{- end -}} + +{{ template "elasticsearch.initScriptsCM" . }} +{{/* +Get the initialization scripts ConfigMap name. +*/}} +{{- define "elasticsearch.initScriptsCM" -}} +{{- printf "%s" .Values.initScriptsCM -}} +{{- end -}} + +{{ template "elasticsearch.initScriptsSecret" . }} +{{/* +Get the initialization scripts Secret name. +*/}} +{{- define "elasticsearch.initScriptsSecret" -}} +{{- printf "%s" .Values.initScriptsSecret -}} +{{- end -}} + +{{/* + Create the name of the master service account to use + */}} +{{- define "elasticsearch.master.serviceAccountName" -}} +{{- if .Values.master.serviceAccount.create -}} + {{ default (include "elasticsearch.master.fullname" .) .Values.master.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.master.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* + Create the name of the coordinating-only service account to use + */}} +{{- define "elasticsearch.coordinating.serviceAccountName" -}} +{{- if .Values.coordinating.serviceAccount.create -}} + {{ default (include "elasticsearch.coordinating.fullname" .) .Values.coordinating.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.coordinating.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* + Create the name of the data service account to use + */}} +{{- define "elasticsearch.data.serviceAccountName" -}} +{{- if .Values.data.serviceAccount.create -}} + {{ default (include "elasticsearch.data.fullname" .) .Values.data.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.data.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* + Create the name of the ingest service account to use + */}} +{{- define "elasticsearch.ingest.serviceAccountName" -}} +{{- if .Values.ingest.serviceAccount.create -}} + {{ default (include "elasticsearch.ingest.fullname" .) .Values.ingest.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.ingest.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified metrics name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "elasticsearch.metrics.fullname" -}} +{{- printf "%s-%s" (include "common.names.fullname" .) .Values.metrics.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Return the proper ES exporter image name +*/}} +{{- define "elasticsearch.metrics.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.metrics.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper sysctl image name +*/}} +{{- define "elasticsearch.sysctl.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.sysctlImage "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "elasticsearch.imagePullSecrets" -}} +{{ include "common.images.pullSecrets" (dict "images" (list .Values.image .Values.metrics.image .Values.curator.image .Values.sysctlImage .Values.volumePermissions.image) "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper image name (for the init container volume-permissions image) +*/}} +{{- define "elasticsearch.volumePermissions.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.volumePermissions.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper Storage Class +Usage: +{{ include "elasticsearch.storageClass" (dict "global" .Values.global "local" .Values.master) }} +*/}} +{{- define "elasticsearch.storageClass" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +*/}} +{{- if .global -}} + {{- if .global.storageClass -}} + {{- if (eq "-" .global.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .global.storageClass -}} + {{- end -}} + {{- else -}} + {{- if .local.persistence.storageClass -}} + {{- if (eq "-" .local.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .local.persistence.storageClass -}} + {{- end -}} + {{- end -}} + {{- end -}} +{{- else -}} + {{- if .local.persistence.storageClass -}} + {{- if (eq "-" .local.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .local.persistence.storageClass -}} + {{- end -}} + {{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for cronjob APIs. +*/}} +{{- define "cronjob.apiVersion" -}} +{{- if semverCompare "< 1.8-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "batch/v2alpha1" }} +{{- else if and (semverCompare ">=1.8-0" .Capabilities.KubeVersion.GitVersion) (semverCompare "< 1.21-0" .Capabilities.KubeVersion.GitVersion) -}} +{{- print "batch/v1beta1" }} +{{- else if semverCompare ">=1.21-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "batch/v1" }} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "elasticsearch.curator.fullname" -}} +{{- printf "%s-%s" (include "common.names.fullname" .) .Values.curator.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create the name of the service account to use +*/}} +{{- define "elasticsearch.curator.serviceAccountName" -}} +{{- if .Values.curator.serviceAccount.create -}} + {{ default (include "elasticsearch.curator.fullname" .) .Values.curator.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.curator.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Return the proper ES curator image name +*/}} +{{- define "elasticsearch.curator.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.curator.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the elasticsearch TLS credentials secret for master nodes. +*/}} +{{- define "elasticsearch.master.tlsSecretName" -}} +{{- $secretName := .Values.security.tls.master.existingSecret -}} +{{- if $secretName -}} + {{- printf "%s" (tpl $secretName $) -}} +{{- else -}} + {{- printf "%s-crt" (include "elasticsearch.master.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return the elasticsearch TLS credentials secret for data nodes. +*/}} +{{- define "elasticsearch.data.tlsSecretName" -}} +{{- $secretName := .Values.security.tls.data.existingSecret -}} +{{- if $secretName -}} + {{- printf "%s" (tpl $secretName $) -}} +{{- else -}} + {{- printf "%s-crt" (include "elasticsearch.data.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return the elasticsearch TLS credentials secret for ingest nodes. +*/}} +{{- define "elasticsearch.ingest.tlsSecretName" -}} +{{- $secretName := .Values.security.tls.ingest.existingSecret -}} +{{- if $secretName -}} + {{- printf "%s" (tpl $secretName $) -}} +{{- else -}} + {{- printf "%s-crt" (include "elasticsearch.ingest.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return the elasticsearch TLS credentials secret for coordinating-only nodes. +*/}} +{{- define "elasticsearch.coordinating.tlsSecretName" -}} +{{- $secretName := .Values.security.tls.coordinating.existingSecret -}} +{{- if $secretName -}} + {{- printf "%s" (tpl $secretName $) -}} +{{- else -}} + {{- printf "%s-crt" (include "elasticsearch.coordinating.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a TLS credentials secret object should be created +*/}} +{{- define "elasticsearch.createTlsSecret" -}} +{{- if and .Values.security.enabled .Values.security.tls.autoGenerated (not (include "elasticsearch.security.tlsSecretsProvided" .)) }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if an authentication credentials secret object should be created +*/}} +{{- define "elasticsearch.createSecret" -}} +{{- if and .Values.security.enabled (not .Values.security.existingSecret) }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return the Elasticsearch authentication credentials secret name +*/}} +{{- define "elasticsearch.secretName" -}} +{{- coalesce .Values.security.existingSecret (include "common.names.fullname" .) -}} +{{- end -}} + +{{/* +Return true if a TLS password secret object should be created +*/}} +{{- define "elasticsearch.createTlsPasswordsSecret" -}} +{{- if and .Values.security.enabled (not .Values.security.tls.passwordsSecret) (or .Values.security.tls.keystorePassword .Values.security.tls.truststorePassword .Values.security.tls.keyPassword ) }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return the Elasticsearch TLS password secret name +*/}} +{{- define "elasticsearch.tlsPasswordsSecret" -}} +{{- coalesce .Values.security.tls.passwordsSecret (printf "%s-tls-pass" (include "common.names.fullname" .)) -}} +{{- end -}} + +{{/* +Add environment variables to configure database values +*/}} +{{- define "elasticsearch.configure.security" -}} +- name: ELASTICSEARCH_ENABLE_SECURITY + value: "true" +- name: ELASTICSEARCH_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "elasticsearch.secretName" . }} + key: elasticsearch-password +- name: ELASTICSEARCH_ENABLE_FIPS_MODE + value: {{ .Values.security.fipsMode | quote }} +- name: ELASTICSEARCH_TLS_VERIFICATION_MODE + value: {{ .Values.security.tls.verificationMode | quote }} +- name: ELASTICSEARCH_ENABLE_REST_TLS + value: {{ ternary "true" "false" .Values.security.tls.restEncryption | quote }} +{{- if or (include "elasticsearch.createTlsSecret" .) .Values.security.tls.usePemCerts }} +- name: ELASTICSEARCH_TLS_USE_PEM + value: "true" +{{- else }} +- name: ELASTICSEARCH_KEYSTORE_LOCATION + value: "/opt/bitnami/elasticsearch/config/certs/{{ .Values.security.tls.keystoreFilename }}" +- name: ELASTICSEARCH_TRUSTSTORE_LOCATION + value: "/opt/bitnami/elasticsearch/config/certs/{{ .Values.security.tls.truststoreFilename }}" +{{- end }} +{{- if and (not .Values.security.tls.usePemCerts) (or .Values.security.tls.keystorePassword .Values.security.tls.passwordsSecret) }} +- name: ELASTICSEARCH_KEYSTORE_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "elasticsearch.tlsPasswordsSecret" . }} + key: keystore-password +{{- end }} +{{- if and (not .Values.security.tls.usePemCerts) (or .Values.security.tls.truststorePassword .Values.security.tls.passwordsSecret) }} +- name: ELASTICSEARCH_TRUSTSTORE_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "elasticsearch.tlsPasswordsSecret" . }} + key: truststore-password +{{- end }} +{{- if and .Values.security.tls.usePemCerts (or .Values.security.tls.keyPassword .Values.security.tls.passwordsSecret) }} +- name: ELASTICSEARCH_KEY_PASSWORD + value: {{ .Values.security.tls.keyPassword | quote }} +{{- end }} +{{- end -}} + +{{/* +Returns true if at least 1 existing secret was provided +*/}} +{{- define "elasticsearch.security.tlsSecretsProvided" -}} +{{- $masterSecret :=.Values.security.tls.master.existingSecret -}} +{{- $dataSecret :=.Values.security.tls.data.existingSecret -}} +{{- $coordSecret :=.Values.security.tls.coordinating.existingSecret -}} +{{- $ingestSecret :=.Values.security.tls.ingest.existingSecret -}} +{{- $ingestEnabled := .Values.ingest.enabled -}} +{{- if or $masterSecret $dataSecret $coordSecret (and $ingestEnabled $ingestSecret) }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* Validate values of Elasticsearch - Existing secret not provided for master nodes */}} +{{- define "elasticsearch.validateValues.security.missingTlsSecrets.master" -}} +{{- if and .Values.security.enabled (include "elasticsearch.security.tlsSecretsProvided" .) (not .Values.security.tls.master.existingSecret) -}} +elasticsearch: security.tls.master.existingSecret + Missing secret containing the TLS certificates for the Elasticsearch master nodes. + Provide the certificates using --set .Values.security.tls.master.existingSecret="my-secret". +{{- end -}} +{{- end -}} + +{{/* Validate values of Elasticsearch - Existing secret not provided for data nodes */}} +{{- define "elasticsearch.validateValues.security.missingTlsSecrets.data" -}} +{{- if and .Values.security.enabled (include "elasticsearch.security.tlsSecretsProvided" .) (not .Values.security.tls.data.existingSecret) -}} +elasticsearch: security.tls.data.existingSecret + Missing secret containing the TLS certificates for the Elasticsearch data nodes. + Provide the certificates using --set .Values.security.tls.data.existingSecret="my-secret". +{{- end -}} +{{- end -}} + +{{/* Validate values of Elasticsearch - Existing secret not provided for coordinating-only nodes */}} +{{- define "elasticsearch.validateValues.security.missingTlsSecrets.coordinating" -}} +{{- if and .Values.security.enabled (include "elasticsearch.security.tlsSecretsProvided" .) (not .Values.security.tls.coordinating.existingSecret) -}} +elasticsearch: security.tls.coordinating.existingSecret + Missing secret containing the TLS certificates for the Elasticsearch coordinating-only nodes. + Provide the certificates using --set .Values.security.tls.coordinating.existingSecret="my-secret". +{{- end -}} +{{- end -}} + +{{/* Validate values of Elasticsearch - Existing secret not provided for ingest nodes */}} +{{- define "elasticsearch.validateValues.security.missingTlsSecrets.ingest" -}} +{{- if and .Values.security.enabled .Values.ingest.enabled (include "elasticsearch.security.tlsSecretsProvided" .) (not .Values.security.tls.ingest.existingSecret) -}} +elasticsearch: security.tls.ingest.existingSecret + Missing secret containing the TLS certificates for the Elasticsearch ingest nodes. + Provide the certificates using --set .Values.security.tls.ingest.existingSecret="my-secret". +{{- end -}} +{{- end -}} + +{{/* Validate values of Elasticsearch - TLS enabled but no certificates provided */}} +{{- define "elasticsearch.validateValues.security.tls" -}} +{{- if and .Values.security.enabled (not .Values.security.tls.autoGenerated) (not (include "elasticsearch.security.tlsSecretsProvided" .)) -}} +elasticsearch: security.tls + In order to enable X-Pack Security, it is necessary to configure TLS. + Three different mechanisms can be used: + - Provide an existing secret containing the Keystore and Truststore for each role + - Provide an existing secret containing the PEM certificates for each role and enable `security.tls.usePemCerts=true` + - Enable using auto-generated certificates with `security.tls.autoGenerated=true` + Existing secrets containing either JKS/PKCS12 or PEM certificates can be provided using --set Values.security.tls.master.existingSecret=master-certs, + --set Values.security.tls.data.existingSecret=data-certs, --set Values.security.tls.coordinating.existingSecret=coordinating-certs, --set Values.security.tls.ingest.existingSecret=ingest-certs +{{- end -}} +{{- end -}} + +{{/* +Compile all warnings into a single message, and call fail. +*/}} +{{- define "elasticsearch.validateValues" -}} +{{- $messages := list -}} +{{- $messages := append $messages (include "elasticsearch.validateValues.security.tls" .) -}} +{{- $messages := append $messages (include "elasticsearch.validateValues.security.missingTlsSecrets.master" .) -}} +{{- $messages := append $messages (include "elasticsearch.validateValues.security.missingTlsSecrets.data" .) -}} +{{- $messages := append $messages (include "elasticsearch.validateValues.security.missingTlsSecrets.coordinating" .) -}} +{{- $messages := append $messages (include "elasticsearch.validateValues.security.missingTlsSecrets.ingest" .) -}} +{{- $messages := without $messages "" -}} +{{- $message := join "\n" $messages -}} + +{{- if $message -}} +{{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}} +{{- end -}} +{{- end -}} + +{{/* +Sysctl set if less then +*/}} +{{- define "elasticsearch.sysctlIfLess" -}} +CURRENT=`sysctl -n {{ .key }}`; +DESIRED="{{ .value }}"; +if [ "$DESIRED" -gt "$CURRENT" ]; then + sysctl -w {{ .key }}={{ .value }}; +fi; +{{- end -}} \ No newline at end of file diff --git a/elasticsearch/templates/configmap-curator.yaml b/elasticsearch/templates/configmap-curator.yaml new file mode 100644 index 0000000..14ee395 --- /dev/null +++ b/elasticsearch/templates/configmap-curator.yaml @@ -0,0 +1,11 @@ +{{- if .Values.curator.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "elasticsearch.curator.fullname" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: curator +data: + action_file.yml: {{ required "A valid .Values.curator.configMaps.action_file_yml entry is required!" (toYaml .Values.curator.configMaps.action_file_yml | indent 2) }} + config.yml: {{ required "A valid .Values.curator.configMaps.config_yml entry is required!" (tpl (toYaml .Values.curator.configMaps.config_yml | indent 2) $) }} +{{- end }} diff --git a/elasticsearch/templates/configmap-es.yaml b/elasticsearch/templates/configmap-es.yaml new file mode 100644 index 0000000..912d980 --- /dev/null +++ b/elasticsearch/templates/configmap-es.yaml @@ -0,0 +1,16 @@ +{{- if or .Values.config .Values.extraConfig }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.names.fullname" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} +data: + {{- if .Values.config }} + elasticsearch.yml: |- + {{- toYaml .Values.config | nindent 4 }} + {{- end}} + {{- if .Values.extraConfig }} + my_elasticsearch.yml: |- + {{- toYaml .Values.extraConfig | nindent 4 }} + {{- end }} +{{- end }} diff --git a/elasticsearch/templates/configmap-initscripts.yaml b/elasticsearch/templates/configmap-initscripts.yaml new file mode 100644 index 0000000..e764d3c --- /dev/null +++ b/elasticsearch/templates/configmap-initscripts.yaml @@ -0,0 +1,12 @@ +{{- if .Values.initScripts }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "elasticsearch.initScripts" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + component: master +data: +{{- with .Values.initScripts }} +{{ toYaml . | indent 2 }} +{{- end }} +{{ end }} diff --git a/elasticsearch/templates/coordinating-hpa.yaml b/elasticsearch/templates/coordinating-hpa.yaml new file mode 100644 index 0000000..59e330c --- /dev/null +++ b/elasticsearch/templates/coordinating-hpa.yaml @@ -0,0 +1,35 @@ +{{- if .Values.coordinating.autoscaling.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "elasticsearch.coordinating.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: coordinating-only + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.commonLabels "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + scaleTargetRef: + apiVersion: {{ template "common.capabilities.deployment.apiVersion" . }} + kind: StatefulSet + name: {{ include "elasticsearch.coordinating.fullname" . }} + minReplicas: {{ .Values.coordinating.autoscaling.minReplicas }} + maxReplicas: {{ .Values.coordinating.autoscaling.maxReplicas }} + metrics: + {{- if .Values.coordinating.autoscaling.targetCPU }} + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ .Values.coordinating.autoscaling.targetCPU }} + {{- end }} + {{- if .Values.coordinating.autoscaling.targetMemory }} + - type: Resource + resource: + name: memory + targetAverageUtilization: {{ .Values.coordinating.autoscaling.targetMemory }} + {{- end }} +{{- end }} diff --git a/elasticsearch/templates/coordinating-statefulset.yaml b/elasticsearch/templates/coordinating-statefulset.yaml new file mode 100644 index 0000000..79e0169 --- /dev/null +++ b/elasticsearch/templates/coordinating-statefulset.yaml @@ -0,0 +1,280 @@ +apiVersion: {{ template "common.capabilities.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ include "elasticsearch.coordinating.fullname" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: coordinating-only + ## Istio Labels: https://istio.io/docs/ops/deployment/requirements/ + app: coordinating-only +spec: + updateStrategy: + type: {{ .Values.coordinating.updateStrategy.type }} + {{- if (eq "Recreate" .Values.coordinating.updateStrategy.type) }} + rollingUpdate: null + {{- end }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: coordinating-only + podManagementPolicy: Parallel + {{- if not .Values.coordinating.autoscaling.enabled }} + replicas: {{ .Values.coordinating.replicas }} + {{- end }} + serviceName: {{ template "elasticsearch.coordinating.fullname" . }} + template: + metadata: + labels: {{- include "common.labels.standard" . | nindent 8 }} + app.kubernetes.io/component: coordinating-only + ## Istio Labels: https://istio.io/docs/ops/deployment/requirements/ + app: coordinating-only + {{- if .Values.coordinating.podLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.coordinating.podLabels "context" $) | nindent 8 }} + {{- end }} + annotations: + {{- if and (include "elasticsearch.createTlsSecret" .) (not .Values.security.tls.coordinating.existingSecret) }} + checksum/tls: {{ include (print $.Template.BasePath "/tls-secret.yaml") . | sha256sum }} + {{- end }} + {{- if .Values.coordinating.podAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.coordinating.podAnnotations "context" $) | nindent 8 }} + {{- end }} + spec: + {{- include "elasticsearch.imagePullSecrets" . | nindent 6 }} + {{- if .Values.coordinating.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.coordinating.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.coordinating.schedulerName }} + schedulerName: {{ .Values.coordinating.schedulerName }} + {{- end }} + {{- if .Values.coordinating.priorityClassName }} + priorityClassName: {{ .Values.coordinating.priorityClassName | quote }} + {{- end }} + {{- if .Values.coordinating.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.coordinating.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.coordinating.podAffinityPreset "component" "coordinating-only" "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.coordinating.podAntiAffinityPreset "component" "coordinating-only" "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.coordinating.nodeAffinityPreset.type "key" .Values.coordinating.nodeAffinityPreset.key "values" .Values.coordinating.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.coordinating.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.coordinating.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.coordinating.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.coordinating.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.coordinating.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.coordinating.topologySpreadConstraints "context" $) | nindent 8 }} + {{- end }} + serviceAccountName: {{ template "elasticsearch.coordinating.serviceAccountName" . }} + {{- if or .Values.coordinating.podSecurityContext.enabled .Values.coordinating.securityContext.enabled }} + securityContext: + {{- if .Values.coordinating.podSecurityContext.enabled }} + {{- omit .Values.coordinating.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- else }} + fsGroup: {{ .Values.coordinating.securityContext.fsGroup }} + {{- end }} + {{- end }} + {{- if or .Values.coordinating.initContainers .Values.sysctlImage.enabled }} + initContainers: + {{- if .Values.sysctlImage.enabled }} + ## Image that performs the sysctl operation to modify Kernel settings (needed sometimes to avoid boot errors) + - name: sysctl + image: {{ include "elasticsearch.sysctl.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + command: + - /bin/bash + - -ec + - | + {{- include "elasticsearch.sysctlIfLess" (dict "key" "vm.max_map_count" "value" "262144") | nindent 14 }} + {{- include "elasticsearch.sysctlIfLess" (dict "key" "fs.file-max" "value" "65536") | nindent 14 }} + securityContext: + privileged: true + {{- if .Values.sysctlImage.resources }} + resources: {{- toYaml .Values.sysctlImage.resources | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.coordinating.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.coordinating.initContainers "context" $) | nindent 8 }} + {{- end }} + {{- end }} + containers: + - name: elasticsearch + image: {{ include "elasticsearch.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if or .Values.coordinating.containerSecurityContext.enabled .Values.coordinating.securityContext.enabled }} + securityContext: + {{- if .Values.coordinating.containerSecurityContext.enabled }} + {{- omit .Values.coordinating.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- else }} + runAsUser: {{ .Values.coordinating.securityContext.runAsUser }} + {{- end }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- end }} + env: + - name: MY_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + - name: ELASTICSEARCH_CLUSTER_NAME + value: {{ .Values.name | quote }} + - name: ELASTICSEARCH_CLUSTER_HOSTS + value: {{ include "elasticsearch.hosts" . | quote }} + - name: ELASTICSEARCH_TOTAL_NODES + value: {{ add (ternary .Values.master.autoscaling.minReplicas .Values.master.replicas .Values.master.autoscaling.enabled) (ternary .Values.data.autoscaling.minReplicas .Values.data.replicas .Values.data.autoscaling.enabled) | quote }} + - name: ELASTICSEARCH_CLUSTER_MASTER_HOSTS + {{- $elasticsearchMasterFullname := include "elasticsearch.master.fullname" . }} + {{- $replicas := int (ternary .Values.master.autoscaling.minReplicas .Values.master.replicas .Values.master.autoscaling.enabled) }} + value: {{range $i, $e := until $replicas }}{{ $elasticsearchMasterFullname }}-{{ $e }} {{ end }} + - name: ELASTICSEARCH_MINIMUM_MASTER_NODES + value: {{ add (div (ternary .Values.master.autoscaling.minReplicas .Values.master.replicas .Values.master.autoscaling.enabled) 2) 1 | quote }} + - name: ELASTICSEARCH_ADVERTISED_HOSTNAME + value: "$(MY_POD_NAME).{{ include "elasticsearch.coordinating.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}" + {{- if .Values.plugins }} + - name: ELASTICSEARCH_PLUGINS + value: {{ .Values.plugins | quote }} + {{- end }} + - name: ELASTICSEARCH_HEAP_SIZE + value: {{ .Values.coordinating.heapSize | quote }} + - name: ELASTICSEARCH_IS_DEDICATED_NODE + value: "yes" + - name: ELASTICSEARCH_NODE_TYPE + value: "coordinating" + {{- if .Values.security.enabled }} + {{- include "elasticsearch.configure.security" . | nindent 12 }} + {{- end }} + {{- if .Values.extraEnvVars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.extraEnvVars "context" $ ) | nindent 12 }} + {{- end }} + {{- if or .Values.extraEnvVarsConfigMap .Values.extraEnvVarsSecret }} + envFrom: + {{- if .Values.extraEnvVarsConfigMap }} + - configMapRef: + name: {{ include "common.tplvalues.render" ( dict "value" .Values.extraEnvVarsConfigMap "context" $ ) }} + {{- end }} + {{- if .Values.extraEnvVarsSecret }} + - secretRef: + name: {{ include "common.tplvalues.render" ( dict "value" .Values.extraEnvVarsSecret "context" $ ) }} + {{- end }} + {{- end }} + ports: + - name: http + containerPort: 9200 + - name: transport + containerPort: 9300 + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.coordinating.startupProbe.enabled }} + startupProbe: + initialDelaySeconds: {{ .Values.coordinating.startupProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.coordinating.startupProbe.periodSeconds }} + timeoutSeconds: {{ .Values.coordinating.startupProbe.timeoutSeconds }} + successThreshold: {{ .Values.coordinating.startupProbe.successThreshold }} + failureThreshold: {{ .Values.coordinating.startupProbe.failureThreshold }} + exec: + command: + - /opt/bitnami/scripts/elasticsearch/healthcheck.sh + {{- else if .Values.coordinating.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.coordinating.customStartupProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.coordinating.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.coordinating.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.coordinating.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.coordinating.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.coordinating.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.coordinating.livenessProbe.failureThreshold }} + exec: + command: + - /opt/bitnami/scripts/elasticsearch/healthcheck.sh + {{- else if .Values.coordinating.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.coordinating.customLivenessProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.coordinating.readinessProbe.enabled}} + readinessProbe: + initialDelaySeconds: {{ .Values.coordinating.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.coordinating.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.coordinating.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.coordinating.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.coordinating.readinessProbe.failureThreshold }} + exec: + command: + - /opt/bitnami/scripts/elasticsearch/healthcheck.sh + {{- else if .Values.coordinating.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.coordinating.customReadinessProbe "context" $) | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.coordinating.resources }} + resources: {{- toYaml .Values.coordinating.resources | nindent 12 }} + {{- end}} + volumeMounts: + {{- if .Values.config }} + - mountPath: /opt/bitnami/elasticsearch/config/elasticsearch.yml + name: config + subPath: elasticsearch.yml + {{- end }} + {{- if .Values.extraConfig }} + - mountPath: /opt/bitnami/elasticsearch/config/my_elasticsearch.yml + name: config + subPath: my_elasticsearch.yml + {{- end }} + - name: data + mountPath: /bitnami/elasticsearch/data + {{- if .Values.security.enabled }} + - name: elasticsearch-certificates + mountPath: /opt/bitnami/elasticsearch/config/certs + readOnly: true + {{- end }} + {{- if .Values.extraVolumeMounts }} + {{- toYaml .Values.extraVolumeMounts | nindent 12 }} + {{- end }} + {{- if .Values.initScripts }} + - name: custom-init-scripts + mountPath: /docker-entrypoint-initdb.d/init-scripts + {{- end }} + {{- if .Values.initScriptsCM }} + - name: custom-init-scripts-cm + mountPath: /docker-entrypoint-initdb.d/init-scripts-cm + {{- end }} + {{- if .Values.initScriptsSecret }} + - name: custom-init-scripts-secret + mountPath: /docker-entrypoint-initdb.d/init-scripts-secret + {{- end }} + {{- if .Values.coordinating.sidecars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.coordinating.sidecars "context" $) | nindent 8 }} + {{- end }} + volumes: + {{- if or .Values.config .Values.extraConfig }} + - name: config + configMap: + name: {{ include "common.names.fullname" . }} + {{- end }} + {{- if .Values.security.enabled }} + - name: elasticsearch-certificates + secret: + secretName: {{ include "elasticsearch.coordinating.tlsSecretName" . }} + defaultMode: 256 + {{- end }} + {{- if .Values.extraVolumes }} + {{- toYaml .Values.extraVolumes | nindent 8 }} + {{- end }} + {{- if .Values.initScripts }} + - name: custom-init-scripts + configMap: + name: {{ template "elasticsearch.initScripts" . }} + {{- end }} + {{- if .Values.initScriptsCM }} + - name: custom-init-scripts-cm + configMap: + name: {{ template "elasticsearch.initScriptsCM" . }} + {{- end }} + {{- if .Values.initScriptsSecret }} + - name: custom-init-scripts-secret + secret: + secretName: {{ template "elasticsearch.initScriptsSecret" . }} + defaultMode: 0755 + {{- end }} + - name: "data" + emptyDir: {} diff --git a/elasticsearch/templates/coordinating-svc.yaml b/elasticsearch/templates/coordinating-svc.yaml new file mode 100644 index 0000000..3c8f79b --- /dev/null +++ b/elasticsearch/templates/coordinating-svc.yaml @@ -0,0 +1,27 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "elasticsearch.coordinating.fullname" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: coordinating-only + annotations: {{ include "common.tplvalues.render" ( dict "value" .Values.coordinating.service.annotations "context" $) | nindent 4 }} +spec: + type: {{ .Values.coordinating.service.type | quote }} + {{- if and (eq .Values.coordinating.service.type "LoadBalancer") (not (empty .Values.coordinating.service.loadBalancerIP)) }} + loadBalancerIP: {{ .Values.coordinating.service.loadBalancerIP }} + externalTrafficPolicy: {{ .Values.coordinating.service.externalTrafficPolicy | quote }} + {{- end }} + publishNotReadyAddresses: true + ports: + - name: http + port: {{ .Values.coordinating.service.port }} + targetPort: http + {{- if and (or (eq .Values.coordinating.service.type "NodePort") (eq .Values.coordinating.service.type "LoadBalancer")) (not (empty .Values.coordinating.service.nodePort)) }} + nodePort: {{ .Values.coordinating.service.nodePort }} + {{- else if eq .Values.coordinating.service.type "ClusterIP" }} + nodePort: null + {{- end }} + - name: tcp-transport + port: 9300 + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: coordinating-only diff --git a/elasticsearch/templates/cronjob.yaml b/elasticsearch/templates/cronjob.yaml new file mode 100644 index 0000000..8f6cb23 --- /dev/null +++ b/elasticsearch/templates/cronjob.yaml @@ -0,0 +1,130 @@ +{{- if .Values.curator.enabled }} +apiVersion: {{ template "cronjob.apiVersion" . }} +kind: CronJob +metadata: + name: {{ template "elasticsearch.curator.fullname" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: curator + ## Istio Labels: https://istio.io/docs/ops/deployment/requirements/ + app: curator + {{- if .Values.curator.cronjob.annotations }} + annotations: {{- toYaml .Values.curator.cronjob.annotations | nindent 4 }} + {{- end }} +spec: + schedule: "{{ .Values.curator.cronjob.schedule }}" + {{- with .Values.curator.cronjob.concurrencyPolicy }} + concurrencyPolicy: {{ . }} + {{- end }} + {{- with .Values.curator.cronjob.failedJobsHistoryLimit }} + failedJobsHistoryLimit: {{ . }} + {{- end }} + {{- with .Values.curator.cronjob.successfulJobsHistoryLimit }} + successfulJobsHistoryLimit: {{ . }} + {{- end }} + jobTemplate: + metadata: + labels: {{- include "common.labels.standard" . | nindent 8 }} + app.kubernetes.io/component: curator + ## Istio Labels: https://istio.io/docs/ops/deployment/requirements/ + app: curator + {{- if .Values.curator.podLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.curator.podLabels "context" $) | nindent 8 }} + {{- end }} + spec: + template: + metadata: + labels: {{- include "common.labels.standard" . | nindent 12 }} + app.kubernetes.io/component: curator + ## Istio Labels: https://istio.io/docs/ops/deployment/requirements/ + app: curator + {{- if .Values.curator.podAnnotations }} + annotations: {{- toYaml .Values.curator.podAnnotations | nindent 12 }} + {{- end }} + spec: + volumes: + - name: config-volume + configMap: + name: {{ template "elasticsearch.curator.fullname" . }} + {{- if .Values.curator.extraVolumes }} + {{- toYaml .Values.curator.extraVolumes | nindent 12 }} + {{- end }} + restartPolicy: {{ .Values.curator.cronjob.jobRestartPolicy }} + {{- if .Values.curator.priorityClassName }} + priorityClassName: {{ .Values.curator.priorityClassName | quote }} + {{- end }} +{{- include "elasticsearch.imagePullSecrets" . | indent 10 }} + {{- $initContainers := coalesce .Values.curator.initContainers .Values.curator.extraInitContainers -}} + {{- if $initContainers }} + initContainers: {{- include "common.tplvalues.render" (dict "value" $initContainers "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.curator.schedulerName }} + schedulerName: {{ .Values.curator.schedulerName }} + {{- end }} + {{- if .Values.curator.rbac.enabled }} + serviceAccountName: {{ include "elasticsearch.curator.serviceAccountName" . }} + {{- end }} + {{- if .Values.curator.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.curator.affinity "context" $) | nindent 12 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.curator.podAffinityPreset "component" "curator" "context" $) | nindent 14 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.curator.podAntiAffinityPreset "component" "curator" "context" $) | nindent 14 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.curator.nodeAffinityPreset.type "key" .Values.curator.nodeAffinityPreset.key "values" .Values.curator.nodeAffinityPreset.values) | nindent 14 }} + {{- end }} + {{- if .Values.curator.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.curator.nodeSelector "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.curator.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.curator.tolerations "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.curator.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.curator.topologySpreadConstraints "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.curator.securityContext }} + securityContext: {{- toYaml .Values.curator.securityContext | nindent 12 }} + {{- end }} + containers: + - name: {{ template "elasticsearch.curator.fullname" . }} + image: {{ template "elasticsearch.curator.image" . }} + imagePullPolicy: {{ .Values.curator.image.pullPolicy | quote }} + volumeMounts: + - name: config-volume + mountPath: /etc/es-curator + {{- if .Values.curator.extraVolumeMounts }} + {{- toYaml .Values.curator.extraVolumeMounts | nindent 16 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 16 }} + {{ else if .Values.curator.command }} + command: {{ toYaml .Values.curator.command | nindent 16 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 16 }} + {{- else if .Values.curator.dryrun }} + args: [ "--dry-run", "--config", "/etc/es-curator/config.yml", "/etc/es-curator/action_file.yml" ] + {{- else }} + args: [ "--config", "/etc/es-curator/config.yml", "/etc/es-curator/action_file.yml" ] + {{- end }} + env: + {{- if .Values.curator.env }} + {{- range $key,$value := .Values.curator.env }} + - name: {{ $key | upper | quote}} + value: {{ $value | quote}} + {{- end }} + {{- end }} + {{- if .Values.curator.envFromSecrets }} + {{- range $key,$value := .Values.curator.envFromSecrets }} + - name: {{ $key | upper | quote}} + valueFrom: + secretKeyRef: + name: {{ $value.from.secret | quote}} + key: {{ $value.from.key | quote}} + {{- end }} + {{- end }} + {{- if .Values.curator.resources }} + resources: {{- toYaml .Values.curator.resources | nindent 16 }} + {{- end }} + {{- if .Values.curator.sidecars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.curator.sidecars "context" $) | nindent 12 }} + {{- end }} +{{- end }} diff --git a/elasticsearch/templates/data-hpa.yaml b/elasticsearch/templates/data-hpa.yaml new file mode 100644 index 0000000..67bc687 --- /dev/null +++ b/elasticsearch/templates/data-hpa.yaml @@ -0,0 +1,35 @@ +{{- if .Values.data.autoscaling.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "elasticsearch.data.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: data + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.commonLabels "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + scaleTargetRef: + apiVersion: {{ template "common.capabilities.statefulset.apiVersion" . }} + kind: StatefulSet + name: {{ include "elasticsearch.data.fullname" . }} + minReplicas: {{ .Values.data.autoscaling.minReplicas }} + maxReplicas: {{ .Values.data.autoscaling.maxReplicas }} + metrics: + {{- if .Values.data.autoscaling.targetCPU }} + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ .Values.data.autoscaling.targetCPU }} + {{- end }} + {{- if .Values.data.autoscaling.targetMemory }} + - type: Resource + resource: + name: memory + targetAverageUtilization: {{ .Values.data.autoscaling.targetMemory }} + {{- end }} +{{- end }} diff --git a/elasticsearch/templates/data-statefulset.yaml b/elasticsearch/templates/data-statefulset.yaml new file mode 100644 index 0000000..b71ed43 --- /dev/null +++ b/elasticsearch/templates/data-statefulset.yaml @@ -0,0 +1,326 @@ +apiVersion: {{ template "common.capabilities.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ include "elasticsearch.data.fullname" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: data + ## Istio Labels: https://istio.io/docs/ops/deployment/requirements/ + app: data +spec: + updateStrategy: + type: {{ .Values.data.updateStrategy.type }} + {{- if (eq "OnDelete" .Values.data.updateStrategy.type) }} + rollingUpdate: null + {{- else if .Values.data.updateStrategy.rollingUpdatePartition }} + rollingUpdate: + partition: {{ .Values.data.updateStrategy.rollingUpdatePartition }} + {{- end }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: data + podManagementPolicy: Parallel + {{- if not .Values.data.autoscaling.enabled }} + replicas: {{ .Values.data.replicas }} + {{- end }} + serviceName: {{ template "elasticsearch.data.fullname" . }} + template: + metadata: + labels: {{- include "common.labels.standard" . | nindent 8 }} + app.kubernetes.io/component: data + ## Istio Labels: https://istio.io/docs/ops/deployment/requirements/ + app: data + {{- if .Values.data.podLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.data.podLabels "context" $) | nindent 8 }} + {{- end }} + annotations: + {{- if and (include "elasticsearch.createTlsSecret" .) (not .Values.security.tls.data.existingSecret) }} + checksum/tls: {{ include (print $.Template.BasePath "/tls-secret.yaml") . | sha256sum }} + {{- end }} + {{- if .Values.data.podAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.data.podAnnotations "context" $) | nindent 8 }} + {{- end }} + spec: + {{- include "elasticsearch.imagePullSecrets" . | nindent 6 }} + {{- if .Values.data.priorityClassName }} + priorityClassName: {{ .Values.data.priorityClassName | quote }} + {{- end }} + {{- if .Values.data.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.data.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.data.podAffinityPreset "component" "data" "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.data.podAntiAffinityPreset "component" "data" "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.data.nodeAffinityPreset.type "key" .Values.data.nodeAffinityPreset.key "values" .Values.data.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.data.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.data.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.data.schedulerName }} + schedulerName: {{ .Values.data.schedulerName }} + {{- end }} + {{- if .Values.data.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.data.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.data.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.data.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.data.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.data.topologySpreadConstraints "context" $) | nindent 8 }} + {{- end }} + serviceAccountName: {{ template "elasticsearch.data.serviceAccountName" . }} + {{- if or .Values.data.podSecurityContext.enabled .Values.data.securityContext.enabled }} + securityContext: + {{- if .Values.data.podSecurityContext.enabled }} + {{- omit .Values.data.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- else }} + fsGroup: {{ .Values.data.securityContext.fsGroup }} + {{- end }} + {{- end }} + {{- if or .Values.data.initContainers .Values.sysctlImage.enabled (and .Values.volumePermissions.enabled .Values.data.persistence.enabled) }} + initContainers: + {{- if .Values.sysctlImage.enabled }} + ## Image that performs the sysctl operation to modify Kernel settings (needed sometimes to avoid boot errors) + - name: sysctl + image: {{ include "elasticsearch.sysctl.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + command: + - /bin/bash + - -ec + - | + {{- include "elasticsearch.sysctlIfLess" (dict "key" "vm.max_map_count" "value" "262144") | nindent 14 }} + {{- include "elasticsearch.sysctlIfLess" (dict "key" "fs.file-max" "value" "65536") | nindent 14 }} + securityContext: + privileged: true + {{- if .Values.sysctlImage.resources }} + resources: {{- toYaml .Values.sysctlImage.resources | nindent 12 }} + {{- end }} + {{- end }} + {{- if and .Values.volumePermissions.enabled .Values.data.persistence.enabled }} + - name: volume-permissions + image: {{ include "elasticsearch.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/bash + - -ec + - | + chown -R {{ .Values.data.securityContext.runAsUser }}:{{ .Values.data.securityContext.fsGroup }} //bitnami/elasticsearch/data + securityContext: + runAsUser: 0 + {{- if .Values.volumePermissions.resources }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: data + mountPath: "/bitnami/elasticsearch/data" + {{- end }} + {{- if .Values.data.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.data.initContainers "context" $) | nindent 8 }} + {{- end }} + {{- end }} + containers: + - name: elasticsearch + image: {{ include "elasticsearch.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if or .Values.data.containerSecurityContext.enabled .Values.data.securityContext.enabled }} + securityContext: + {{- if .Values.data.containerSecurityContext.enabled }} + {{- omit .Values.data.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- else }} + runAsUser: {{ .Values.data.securityContext.runAsUser }} + {{- end }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- end }} + env: + - name: MY_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + - name: ELASTICSEARCH_CLUSTER_NAME + value: {{ .Values.name | quote }} + - name: ELASTICSEARCH_CLUSTER_HOSTS + value: {{ include "elasticsearch.hosts" . | quote }} + - name: ELASTICSEARCH_TOTAL_NODES + value: {{ add (ternary .Values.master.autoscaling.minReplicas .Values.master.replicas .Values.master.autoscaling.enabled) (ternary .Values.data.autoscaling.minReplicas .Values.data.replicas .Values.data.autoscaling.enabled) | quote }} + {{- if .Values.plugins }} + - name: ELASTICSEARCH_PLUGINS + value: {{ .Values.plugins | quote }} + {{- end }} + {{- if .Values.snapshotRepoPath }} + - name: ELASTICSEARCH_FS_SNAPSHOT_REPO_PATH + value: {{ .Values.snapshotRepoPath | quote }} + {{- end }} + - name: ELASTICSEARCH_HEAP_SIZE + value: {{ .Values.data.heapSize | quote }} + - name: ELASTICSEARCH_IS_DEDICATED_NODE + value: "yes" + - name: ELASTICSEARCH_NODE_TYPE + value: "data" + - name: ELASTICSEARCH_ADVERTISED_HOSTNAME + value: "$(MY_POD_NAME).{{ include "elasticsearch.data.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}" + {{- if .Values.security.enabled }} + {{- include "elasticsearch.configure.security" . | nindent 12 }} + {{- end }} + {{- if .Values.extraEnvVars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.extraEnvVars "context" $ ) | nindent 12 }} + {{- end }} + {{- if or .Values.extraEnvVarsConfigMap .Values.extraEnvVarsSecret }} + envFrom: + {{- if .Values.extraEnvVarsConfigMap }} + - configMapRef: + name: {{ include "common.tplvalues.render" ( dict "value" .Values.extraEnvVarsConfigMap "context" $ ) }} + {{- end }} + {{- if .Values.extraEnvVarsSecret }} + - secretRef: + name: {{ include "common.tplvalues.render" ( dict "value" .Values.extraEnvVarsSecret "context" $ ) }} + {{- end }} + {{- end }} + ports: + - name: http + containerPort: 9200 + - name: transport + containerPort: 9300 + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.data.startupProbe.enabled }} + startupProbe: + initialDelaySeconds: {{ .Values.data.startupProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.data.startupProbe.periodSeconds }} + timeoutSeconds: {{ .Values.data.startupProbe.timeoutSeconds }} + successThreshold: {{ .Values.data.startupProbe.successThreshold }} + failureThreshold: {{ .Values.data.startupProbe.failureThreshold }} + exec: + command: + - /opt/bitnami/scripts/elasticsearch/healthcheck.sh + {{- else if .Values.data.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.data.customStartupProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.data.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.data.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.data.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.data.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.data.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.data.livenessProbe.failureThreshold }} + exec: + command: + - /opt/bitnami/scripts/elasticsearch/healthcheck.sh + {{- else if .Values.data.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.data.customLivenessProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.data.readinessProbe.enabled }} + readinessProbe: + initialDelaySeconds: {{ .Values.data.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.data.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.data.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.data.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.data.readinessProbe.failureThreshold }} + exec: + command: + - /opt/bitnami/scripts/elasticsearch/healthcheck.sh + {{- else if .Values.data.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.data.customReadinessProbe "context" $) | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.data.resources }} + resources: {{- toYaml .Values.data.resources | nindent 12 }} + {{- end }} + volumeMounts: + {{- if .Values.config }} + - mountPath: /opt/bitnami/elasticsearch/config/elasticsearch.yml + name: "config" + subPath: elasticsearch.yml + {{- end }} + {{- if .Values.extraConfig }} + - mountPath: /opt/bitnami/elasticsearch/config/my_elasticsearch.yml + name: config + subPath: my_elasticsearch.yml + {{- end }} + - name: "data" + mountPath: "/bitnami/elasticsearch/data" + {{- if .Values.security.enabled }} + - name: elasticsearch-certificates + mountPath: /opt/bitnami/elasticsearch/config/certs + readOnly: true + {{- end }} + {{- if .Values.extraVolumeMounts }} + {{- toYaml .Values.extraVolumeMounts | nindent 12 }} + {{- end }} + {{- if .Values.initScripts }} + - name: custom-init-scripts + mountPath: /docker-entrypoint-initdb.d/init-scripts + {{- end }} + {{- if .Values.initScriptsCM }} + - name: custom-init-scripts-cm + mountPath: /docker-entrypoint-initdb.d/init-scripts-cm + {{- end }} + {{- if .Values.initScriptsSecret }} + - name: custom-init-scripts-secret + mountPath: /docker-entrypoint-initdb.d/init-scripts-secret + {{- end }} + {{- if .Values.data.sidecars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.data.sidecars "context" $) | nindent 8 }} + {{- end }} + volumes: + {{- if or .Values.config .Values.extraConfig }} + - name: "config" + configMap: + name: {{ template "common.names.fullname" . }} + {{- end }} + {{- if .Values.security.enabled }} + - name: elasticsearch-certificates + secret: + secretName: {{ include "elasticsearch.data.tlsSecretName" . }} + defaultMode: 256 + {{- end }} + {{- if .Values.extraVolumes }} + {{- toYaml .Values.extraVolumes | nindent 8 }} + {{- end }} + {{- if .Values.initScripts }} + - name: custom-init-scripts + configMap: + name: {{ template "elasticsearch.initScripts" . }} + {{- end }} + {{- if .Values.initScriptsCM }} + - name: custom-init-scripts-cm + configMap: + name: {{ template "elasticsearch.initScriptsCM" . }} + {{- end }} + {{- if .Values.initScriptsSecret }} + - name: custom-init-scripts-secret + secret: + secretName: {{ template "elasticsearch.initScriptsSecret" . }} + defaultMode: 0755 + {{- end }} +{{- if not .Values.data.persistence.enabled }} + - name: "data" + emptyDir: {} +{{- else if .Values.data.persistence.existingClaim }} + - name: "data" + persistentVolumeClaim: + claimName: {{ .Values.data.persistence.existingClaim }} +{{- else }} + volumeClaimTemplates: + - metadata: + name: "data" + {{- if .Values.data.persistence.annotations }} + annotations: {{- toYaml .Values.data.persistence.annotations | nindent 10 }} + {{- end }} + spec: + accessModes: {{- toYaml .Values.data.persistence.accessModes | nindent 10 }} + {{ $storage := dict "global" .Values.global "local" .Values.data }} + {{ include "elasticsearch.storageClass" $storage }} + resources: + requests: + storage: {{ .Values.data.persistence.size | quote }} + {{- if .Values.data.persistence.selector }} + selector: {{- include "common.tplvalues.render" (dict "value" .Values.data.persistence.selector "context" $) | nindent 10 }} + {{- else if .Values.data.persistence.existingVolume }} + selector: + matchLabels: + volume: {{ .Values.data.persistence.existingVolume }} + {{- end }} +{{- end }} diff --git a/elasticsearch/templates/data-svc.yaml b/elasticsearch/templates/data-svc.yaml new file mode 100644 index 0000000..b26530d --- /dev/null +++ b/elasticsearch/templates/data-svc.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "elasticsearch.data.fullname" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: data +spec: + type: ClusterIP + publishNotReadyAddresses: true + ports: + - name: http + port: 9200 + targetPort: http + - name: tcp-transport + port: 9300 + targetPort: transport + nodePort: null + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: data diff --git a/elasticsearch/templates/extra-list.yaml b/elasticsearch/templates/extra-list.yaml new file mode 100644 index 0000000..9ac65f9 --- /dev/null +++ b/elasticsearch/templates/extra-list.yaml @@ -0,0 +1,4 @@ +{{- range .Values.extraDeploy }} +--- +{{ include "common.tplvalues.render" (dict "value" . "context" $) }} +{{- end }} diff --git a/elasticsearch/templates/hooks/job.install.yaml b/elasticsearch/templates/hooks/job.install.yaml new file mode 100644 index 0000000..e655537 --- /dev/null +++ b/elasticsearch/templates/hooks/job.install.yaml @@ -0,0 +1,73 @@ +{{- if .Values.curator.enabled }} +{{- range $kind, $enabled := .Values.curator.hooks }} +{{- if $enabled }} +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ template "elasticsearch.curator.fullname" . }}-curator-on-{{ $kind }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: curator + ## Istio Labels: https://istio.io/docs/ops/deployment/requirements/ + app: curator + annotations: + "helm.sh/hook": post-{{ $kind }} + "helm.sh/hook-weight": "1" + {{- if $.Values.cronjob.annotations }} + {{- toYaml $.Values.cronjob.annotations | nindent 4 }} + {{- end }} +spec: + template: + metadata: + labels: {{- include "common.labels.standard" . | nindent 10 }} + app.kubernetes.io/component: curator + ## Istio Labels: https://istio.io/docs/ops/deployment/requirements/ + app: curator + {{- if $.Values.podAnnotations }} + annotations: {{- toYaml $.Values.podAnnotations | nindent 8 }} + {{- end }} + spec: + restartPolicy: Never + {{- if $.Values.curator.priorityClassName }} + priorityClassName: {{ $.Values.curator.priorityClassName | quote }} + {{- end }} + {{- if .Values.curator.schedulerName }} + schedulerName: {{ .Values.curator.schedulerName }} + {{- end }} + {{- if $.Values.curator.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" $.Values.curator.affinity "context" $) | nindent 8 }} + {{- end }} + {{- if $.Values.curator.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" $.Values.curator.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if $.Values.curator.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" $.Values.curator.tolerations "context" $) | nindent 8 }} + {{- end }} + containers: + - name: {{ template "elasticsearch.curator.fullname" . }} + image: {{ template "elasticsearch.curator.image" . }} + imagePullPolicy: {{ .Values.curator.image.pullPolicy | quote }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else }} + command: [ "curator" ] + args: [ "--config", "/etc/es-curator/config.yml", "/etc/es-curator/action_file.yml" ] + {{- end }} + resources: {{- toYaml $.Values.curator.resources | nindent 12 }} + volumeMounts: + - name: config-volume + mountPath: /etc/es-curator + {{- if $.Values.curator.extraVolumeMounts }} + {{- toYaml $.Values.curator.extraVolumeMounts | nindent 12 }} + {{- end }} + volumes: + - name: config-volume + configMap: + name: {{ template "elasticsearch.curator.fullname" . }} + {{- if $.Values.curator.extraVolumes }} + {{- toYaml $.Values.curator.extraVolumes | nindent 8 }} + {{- end }} +{{- end }} +{{- end }} +{{- end }} diff --git a/elasticsearch/templates/ingest-statefulset.yaml b/elasticsearch/templates/ingest-statefulset.yaml new file mode 100644 index 0000000..41c1fb5 --- /dev/null +++ b/elasticsearch/templates/ingest-statefulset.yaml @@ -0,0 +1,280 @@ +{{- if .Values.ingest.enabled }} +apiVersion: {{ template "common.capabilities.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ include "elasticsearch.ingest.fullname" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: ingest + ## Istio Labels: https://istio.io/docs/ops/deployment/requirements/ + app: ingest +spec: + updateStrategy: + type: {{ .Values.ingest.updateStrategy.type }} + {{- if (eq "OnDelete" .Values.ingest.updateStrategy.type) }} + rollingUpdate: null + {{- end }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: ingest + podManagementPolicy: Parallel + replicas: {{ .Values.ingest.replicas }} + serviceName: {{ template "elasticsearch.ingest.fullname" . }} + template: + metadata: + labels: {{- include "common.labels.standard" . | nindent 8 }} + app.kubernetes.io/component: ingest + ## Istio Labels: https://istio.io/docs/ops/deployment/requirements/ + app: ingest + {{- if .Values.ingest.podLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.ingest.podLabels "context" $) | nindent 8 }} + {{- end }} + annotations: + {{- if and (include "elasticsearch.createTlsSecret" .) (not .Values.security.tls.ingest.existingSecret) }} + checksum/tls: {{ include (print $.Template.BasePath "/tls-secret.yaml") . | sha256sum }} + {{- end }} + {{- if .Values.ingest.podAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.ingest.podAnnotations "context" $) | nindent 8 }} + {{- end }} + spec: + {{- include "elasticsearch.imagePullSecrets" . | nindent 6 }} + {{- if .Values.ingest.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.ingest.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.ingest.schedulerName }} + schedulerName: {{ .Values.ingest.schedulerName }} + {{- end }} + {{- if .Values.ingest.priorityClassName }} + priorityClassName: {{ .Values.ingest.priorityClassName | quote }} + {{- end }} + {{- if .Values.ingest.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.ingest.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.ingest.podAffinityPreset "component" "ingest" "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.ingest.podAntiAffinityPreset "component" "ingest" "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.ingest.nodeAffinityPreset.type "key" .Values.ingest.nodeAffinityPreset.key "values" .Values.ingest.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.ingest.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.ingest.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.ingest.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.ingest.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.ingest.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.ingest.topologySpreadConstraints "context" $) | nindent 8 }} + {{- end }} + serviceAccountName: {{ template "elasticsearch.ingest.serviceAccountName" . }} + {{- if or .Values.ingest.podSecurityContext.enabled .Values.ingest.securityContext.enabled }} + securityContext: + {{- if .Values.ingest.podSecurityContext.enabled }} + {{- omit .Values.ingest.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- else }} + fsGroup: {{ .Values.ingest.securityContext.fsGroup }} + {{- end }} + {{- end }} + {{- if or .Values.ingest.initContainers .Values.sysctlImage.enabled }} + initContainers: + {{- if .Values.sysctlImage.enabled }} + ## Image that performs the sysctl operation to modify Kernel settings (needed sometimes to avoid boot errors) + - name: sysctl + image: {{ include "elasticsearch.sysctl.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + command: + - /bin/bash + - -ec + - | + {{- include "elasticsearch.sysctlIfLess" (dict "key" "vm.max_map_count" "value" "262144") | nindent 14 }} + {{- include "elasticsearch.sysctlIfLess" (dict "key" "fs.file-max" "value" "65536") | nindent 14 }} + securityContext: + privileged: true + {{- if .Values.sysctlImage.resources }} + resources: {{- toYaml .Values.sysctlImage.resources | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.ingest.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.ingest.initContainers "context" $) | nindent 8 }} + {{- end }} + {{- end }} + containers: + - name: elasticsearch + image: {{ include "elasticsearch.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if or .Values.ingest.containerSecurityContext.enabled .Values.ingest.securityContext.enabled }} + securityContext: + {{- if .Values.ingest.containerSecurityContext.enabled }} + {{- omit .Values.ingest.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- else }} + runAsUser: {{ .Values.ingest.securityContext.runAsUser }} + {{- end }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- end }} + env: + - name: MY_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + - name: ELASTICSEARCH_CLUSTER_NAME + value: {{ .Values.name | quote }} + - name: ELASTICSEARCH_CLUSTER_HOSTS + value: {{ include "elasticsearch.hosts" . | quote }} + - name: ELASTICSEARCH_TOTAL_NODES + value: {{ add (ternary .Values.master.autoscaling.minReplicas .Values.master.replicas (eq .Values.master.autoscaling.enabled true)) (ternary .Values.data.autoscaling.minReplicas .Values.data.replicas (eq .Values.data.autoscaling.enabled true)) | quote }} + - name: ELASTICSEARCH_CLUSTER_MASTER_HOSTS + {{- $elasticsearchMasterFullname := include "elasticsearch.master.fullname" . }} + {{- $replicas := int (ternary .Values.master.autoscaling.minReplicas .Values.master.replicas (eq .Values.master.autoscaling.enabled true)) }} + value: {{range $i, $e := until $replicas }}{{ $elasticsearchMasterFullname }}-{{ $e }} {{ end }} + - name: ELASTICSEARCH_MINIMUM_MASTER_NODES + value: {{ add (div (ternary .Values.master.autoscaling.minReplicas .Values.master.replicas (eq .Values.master.autoscaling.enabled true)) 2) 1 | quote }} + - name: ELASTICSEARCH_ADVERTISED_HOSTNAME + value: "$(MY_POD_NAME).{{ include "elasticsearch.ingest.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}" + {{- if .Values.plugins }} + - name: ELASTICSEARCH_PLUGINS + value: {{ .Values.plugins | quote }} + {{- end }} + - name: ELASTICSEARCH_HEAP_SIZE + value: {{ .Values.ingest.heapSize | quote }} + - name: ELASTICSEARCH_IS_DEDICATED_NODE + value: "yes" + - name: ELASTICSEARCH_NODE_TYPE + value: "ingest" + {{- if .Values.security.enabled }} + {{- include "elasticsearch.configure.security" . | nindent 12 }} + {{- end }} + {{- if .Values.extraEnvVars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.extraEnvVars "context" $ ) | nindent 12 }} + {{- end }} + {{- if or .Values.extraEnvVarsConfigMap .Values.extraEnvVarsSecret }} + envFrom: + {{- if .Values.extraEnvVarsConfigMap }} + - configMapRef: + name: {{ include "common.tplvalues.render" ( dict "value" .Values.extraEnvVarsConfigMap "context" $ ) }} + {{- end }} + {{- if .Values.extraEnvVarsSecret }} + - secretRef: + name: {{ include "common.tplvalues.render" ( dict "value" .Values.extraEnvVarsSecret "context" $ ) }} + {{- end }} + {{- end }} + ports: + - name: http + containerPort: 9200 + - name: transport + containerPort: 9300 + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.ingest.startupProbe.enabled }} + startupProbe: + initialDelaySeconds: {{ .Values.ingest.startupProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.ingest.startupProbe.periodSeconds }} + timeoutSeconds: {{ .Values.ingest.startupProbe.timeoutSeconds }} + successThreshold: {{ .Values.ingest.startupProbe.successThreshold }} + failureThreshold: {{ .Values.ingest.startupProbe.failureThreshold }} + exec: + command: + - /opt/bitnami/scripts/elasticsearch/healthcheck.sh + {{- else if .Values.ingest.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.ingest.customStartupProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.ingest.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.ingest.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.ingest.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.ingest.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.ingest.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.ingest.livenessProbe.failureThreshold }} + exec: + command: + - /opt/bitnami/scripts/elasticsearch/healthcheck.sh + {{- else if .Values.ingest.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.ingest.customLivenessProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.ingest.readinessProbe.enabled }} + readinessProbe: + initialDelaySeconds: {{ .Values.ingest.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.ingest.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.ingest.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.ingest.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.ingest.readinessProbe.failureThreshold }} + exec: + command: + - /opt/bitnami/scripts/elasticsearch/healthcheck.sh + {{- else if .Values.ingest.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.ingest.customReadinessProbe "context" $) | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.ingest.resources }} + resources: {{- toYaml .Values.ingest.resources | nindent 12 }} + {{- end }} + volumeMounts: + {{- if .Values.config }} + - mountPath: /opt/bitnami/elasticsearch/config/elasticsearch.yml + name: config + subPath: elasticsearch.yml + {{- end }} + - name: data + mountPath: /bitnami/elasticsearch/data + {{- if .Values.security.enabled }} + - name: elasticsearch-certificates + mountPath: /opt/bitnami/elasticsearch/config/certs + readOnly: true + {{- end }} + {{- if .Values.extraConfig }} + - mountPath: /opt/bitnami/elasticsearch/config/my_elasticsearch.yml + name: config + subPath: my_elasticsearch.yml + {{- end }} + {{- if .Values.extraVolumeMounts }} + {{- toYaml .Values.extraVolumeMounts | nindent 12 }} + {{- end }} + {{- if .Values.initScripts }} + - name: custom-init-scripts + mountPath: /docker-entrypoint-initdb.d/init-scripts + {{- end }} + {{- if .Values.initScriptsCM }} + - name: custom-init-scripts-cm + mountPath: /docker-entrypoint-initdb.d/init-scripts-cm + {{- end }} + {{- if .Values.initScriptsSecret }} + - name: custom-init-scripts-secret + mountPath: /docker-entrypoint-initdb.d/init-scripts-secret + {{- end }} + {{- if .Values.ingest.sidecars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.ingest.sidecars "context" $) | nindent 8 }} + {{- end }} + volumes: + {{- if or .Values.config .Values.extraConfig }} + - name: config + configMap: + name: {{ include "common.names.fullname" . }} + {{- end }} + {{- if .Values.security.enabled }} + - name: elasticsearch-certificates + secret: + secretName: {{ include "elasticsearch.ingest.tlsSecretName" . }} + defaultMode: 256 + {{- end }} + {{- if .Values.extraVolumes }} + {{- toYaml .Values.extraVolumes | nindent 8 }} + {{- end }} + {{- if .Values.initScripts }} + - name: custom-init-scripts + configMap: + name: {{ template "elasticsearch.initScripts" . }} + {{- end }} + {{- if .Values.initScriptsCM }} + - name: custom-init-scripts-cm + configMap: + name: {{ template "elasticsearch.initScriptsCM" . }} + {{- end }} + {{- if .Values.initScriptsSecret }} + - name: custom-init-scripts-secret + secret: + secretName: {{ template "elasticsearch.initScriptsSecret" . }} + defaultMode: 0755 + {{- end }} + - name: "data" + emptyDir: {} +{{- end }} diff --git a/elasticsearch/templates/ingest-svc.yaml b/elasticsearch/templates/ingest-svc.yaml new file mode 100644 index 0000000..87b366e --- /dev/null +++ b/elasticsearch/templates/ingest-svc.yaml @@ -0,0 +1,29 @@ +{{- if .Values.ingest.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "elasticsearch.ingest.fullname" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: ingest + annotations: {{ include "common.tplvalues.render" ( dict "value" .Values.ingest.service.annotations "context" $) | nindent 4 }} +spec: + type: {{ .Values.ingest.service.type | quote }} + {{- if and (eq .Values.ingest.service.type "LoadBalancer") (not (empty .Values.ingest.service.loadBalancerIP)) }} + loadBalancerIP: {{ .Values.ingest.service.loadBalancerIP }} + {{- end }} + publishNotReadyAddresses: true + ports: + - name: http + port: 9200 + targetPort: http + - name: tcp-transport + port: {{ .Values.ingest.service.port }} + targetPort: transport + {{- if and (or (eq .Values.ingest.service.type "NodePort") (eq .Values.ingest.service.type "LoadBalancer")) (not (empty .Values.ingest.service.nodePort)) }} + nodePort: {{ .Values.ingest.service.nodePort }} + {{- else if eq .Values.ingest.service.type "ClusterIP" }} + nodePort: null + {{- end }} + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: ingest +{{- end }} diff --git a/elasticsearch/templates/master-hpa.yaml b/elasticsearch/templates/master-hpa.yaml new file mode 100644 index 0000000..d542337 --- /dev/null +++ b/elasticsearch/templates/master-hpa.yaml @@ -0,0 +1,35 @@ +{{- if .Values.master.autoscaling.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "elasticsearch.master.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: master + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.commonLabels "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + scaleTargetRef: + apiVersion: {{ template "common.capabilities.statefulset.apiVersion" . }} + kind: StatefulSet + name: {{ include "elasticsearch.master.fullname" . }} + minReplicas: {{ .Values.master.autoscaling.minReplicas }} + maxReplicas: {{ .Values.master.autoscaling.maxReplicas }} + metrics: + {{- if .Values.master.autoscaling.targetCPU }} + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ .Values.master.autoscaling.targetCPU }} + {{- end }} + {{- if .Values.master.autoscaling.targetMemory }} + - type: Resource + resource: + name: memory + targetAverageUtilization: {{ .Values.master.autoscaling.targetMemory }} + {{- end }} +{{- end }} diff --git a/elasticsearch/templates/master-statefulset.yaml b/elasticsearch/templates/master-statefulset.yaml new file mode 100644 index 0000000..a045ade --- /dev/null +++ b/elasticsearch/templates/master-statefulset.yaml @@ -0,0 +1,329 @@ +apiVersion: {{ template "common.capabilities.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ include "elasticsearch.master.fullname" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: master + ## Istio Labels: https://istio.io/docs/ops/deployment/requirements/ + app: master +spec: + updateStrategy: + type: {{ .Values.master.updateStrategy.type }} + {{- if (eq "OnDelete" .Values.master.updateStrategy.type) }} + rollingUpdate: null + {{- end }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: master + podManagementPolicy: Parallel + {{- if not .Values.master.autoscaling.enabled }} + replicas: {{ .Values.master.replicas }} + {{- end }} + serviceName: {{ template "elasticsearch.master.fullname" . }} + template: + metadata: + labels: {{- include "common.labels.standard" . | nindent 8 }} + app.kubernetes.io/component: master + ## Istio Labels: https://istio.io/docs/ops/deployment/requirements/ + app: master + {{- if .Values.master.podLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.master.podLabels "context" $) | nindent 8 }} + {{- end }} + annotations: + {{- if and (include "elasticsearch.createTlsSecret" .) (not .Values.security.tls.master.existingSecret) }} + checksum/tls: {{ include (print $.Template.BasePath "/tls-secret.yaml") . | sha256sum }} + {{- end }} + {{- if .Values.master.podAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.master.podAnnotations "context" $) | nindent 8 }} + {{- end }} + spec: + {{- include "elasticsearch.imagePullSecrets" . | nindent 6 }} + {{- if .Values.master.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.master.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.master.schedulerName }} + schedulerName: {{ .Values.master.schedulerName }} + {{- end }} + {{- if .Values.master.priorityClassName }} + priorityClassName: {{ .Values.master.priorityClassName | quote }} + {{- end }} + {{- if .Values.master.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.master.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.master.podAffinityPreset "component" "master" "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.master.podAntiAffinityPreset "component" "master" "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.master.nodeAffinityPreset.type "key" .Values.master.nodeAffinityPreset.key "values" .Values.master.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.master.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.master.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.master.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.master.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.master.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.master.topologySpreadConstraints "context" $) | nindent 8 }} + {{- end }} + serviceAccountName: {{ template "elasticsearch.master.serviceAccountName" . }} + {{- if or .Values.master.podSecurityContext.enabled .Values.master.securityContext.enabled }} + securityContext: + {{- if .Values.master.podSecurityContext.enabled }} + {{- omit .Values.master.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- else }} + fsGroup: {{ .Values.master.securityContext.fsGroup }} + {{- end }} + {{- end }} + {{- if or .Values.master.initContainers .Values.sysctlImage.enabled (and .Values.volumePermissions.enabled .Values.master.persistence.enabled) }} + initContainers: + {{- if .Values.sysctlImage.enabled }} + ## Image that performs the sysctl operation to modify Kernel settings (needed sometimes to avoid boot errors) + - name: sysctl + image: {{ include "elasticsearch.sysctl.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + command: + - /bin/bash + - -ec + - | + {{- include "elasticsearch.sysctlIfLess" (dict "key" "vm.max_map_count" "value" "262144") | nindent 14 }} + {{- include "elasticsearch.sysctlIfLess" (dict "key" "fs.file-max" "value" "65536") | nindent 14 }} + securityContext: + privileged: true + {{- if .Values.sysctlImage.resources }} + resources: {{- toYaml .Values.sysctlImage.resources | nindent 12 }} + {{- end }} + {{- end }} + {{- if and .Values.volumePermissions.enabled .Values.master.persistence.enabled }} + - name: volume-permissions + image: {{ include "elasticsearch.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/bash + - -ec + - | + chown -R {{ .Values.master.securityContext.runAsUser }}:{{ .Values.master.securityContext.fsGroup }} //bitnami/elasticsearch/data + securityContext: + runAsUser: 0 + {{- if .Values.volumePermissions.resources }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: data + mountPath: "/bitnami/elasticsearch/data" + {{- end }} + {{- if .Values.master.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.master.initContainers "context" $) | nindent 8 }} + {{- end }} + {{- end }} + containers: + - name: elasticsearch + image: {{ include "elasticsearch.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if or .Values.master.containerSecurityContext.enabled .Values.master.securityContext.enabled }} + securityContext: + {{- if .Values.master.containerSecurityContext.enabled }} + {{- omit .Values.master.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{ else }} + runAsUser: {{ .Values.master.securityContext.runAsUser }} + {{- end }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- end }} + env: + - name: MY_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + - name: ELASTICSEARCH_CLUSTER_NAME + value: {{ .Values.name | quote }} + - name: ELASTICSEARCH_CLUSTER_HOSTS + value: {{ include "elasticsearch.hosts" . | quote }} + - name: ELASTICSEARCH_TOTAL_NODES + value: {{ add (ternary .Values.master.autoscaling.minReplicas .Values.master.replicas .Values.master.autoscaling.enabled) (ternary .Values.data.autoscaling.minReplicas .Values.data.replicas .Values.data.autoscaling.enabled) | quote }} + - name: ELASTICSEARCH_CLUSTER_MASTER_HOSTS + {{- $elasticsearchMasterFullname := include "elasticsearch.master.fullname" . }} + {{- $replicas := int (ternary .Values.master.autoscaling.minReplicas .Values.master.replicas .Values.master.autoscaling.enabled) }} + value: {{range $i, $e := until $replicas }}{{ $elasticsearchMasterFullname }}-{{ $e }} {{ end }} + - name: ELASTICSEARCH_MINIMUM_MASTER_NODES + value: {{ add (div (ternary .Values.master.autoscaling.minReplicas .Values.master.replicas .Values.master.autoscaling.enabled) 2) 1 | quote }} + - name: ELASTICSEARCH_ADVERTISED_HOSTNAME + value: "$(MY_POD_NAME).{{ include "elasticsearch.master.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}" + {{- if .Values.plugins }} + - name: ELASTICSEARCH_PLUGINS + value: {{ .Values.plugins | quote }} + {{- end }} + {{- if .Values.snapshotRepoPath }} + - name: ELASTICSEARCH_FS_SNAPSHOT_REPO_PATH + value: {{ .Values.snapshotRepoPath | quote }} + {{- end }} + - name: ELASTICSEARCH_HEAP_SIZE + value: {{ .Values.master.heapSize | quote }} + - name: ELASTICSEARCH_IS_DEDICATED_NODE + value: "yes" + - name: ELASTICSEARCH_NODE_TYPE + value: "master" + {{- if .Values.security.enabled }} + {{- include "elasticsearch.configure.security" . | nindent 12 }} + {{- end }} + {{- if .Values.extraEnvVars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.extraEnvVars "context" $ ) | nindent 12 }} + {{- end }} + {{- if or .Values.extraEnvVarsConfigMap .Values.extraEnvVarsSecret }} + envFrom: + {{- if .Values.extraEnvVarsConfigMap }} + - configMapRef: + name: {{ include "common.tplvalues.render" ( dict "value" .Values.extraEnvVarsConfigMap "context" $ ) }} + {{- end }} + {{- if .Values.extraEnvVarsSecret }} + - secretRef: + name: {{ include "common.tplvalues.render" ( dict "value" .Values.extraEnvVarsSecret "context" $ ) }} + {{- end }} + {{- end }} + ports: + - name: http + containerPort: 9200 + - name: transport + containerPort: 9300 + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.master.startupProbe.enabled }} + startupProbe: + initialDelaySeconds: {{ .Values.master.startupProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.master.startupProbe.periodSeconds }} + timeoutSeconds: {{ .Values.master.startupProbe.timeoutSeconds }} + successThreshold: {{ .Values.master.startupProbe.successThreshold }} + failureThreshold: {{ .Values.master.startupProbe.failureThreshold }} + exec: + command: + - /opt/bitnami/scripts/elasticsearch/healthcheck.sh + {{- else if .Values.master.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.master.customStartupProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.master.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.master.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.master.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.master.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.master.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.master.livenessProbe.failureThreshold }} + exec: + command: + - /opt/bitnami/scripts/elasticsearch/healthcheck.sh + {{- else if .Values.master.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.master.customLivenessProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.master.readinessProbe.enabled }} + readinessProbe: + initialDelaySeconds: {{ .Values.master.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.master.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.master.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.master.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.master.readinessProbe.failureThreshold }} + exec: + command: + - /opt/bitnami/scripts/elasticsearch/healthcheck.sh + {{- else if .Values.master.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.master.customReadinessProbe "context" $) | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.master.resources }} + resources: {{- toYaml .Values.master.resources | nindent 12 }} + {{- end }} + volumeMounts: + {{- if .Values.config }} + - mountPath: /opt/bitnami/elasticsearch/config/elasticsearch.yml + name: config + subPath: elasticsearch.yml + {{- end }} + {{- if .Values.extraConfig }} + - mountPath: /opt/bitnami/elasticsearch/config/my_elasticsearch.yml + name: config + subPath: my_elasticsearch.yml + {{- end }} + - name: data + mountPath: /bitnami/elasticsearch/data + {{- if .Values.security.enabled }} + - name: elasticsearch-certificates + mountPath: /opt/bitnami/elasticsearch/config/certs + readOnly: true + {{- end }} + {{- if .Values.extraVolumeMounts }} + {{- toYaml .Values.extraVolumeMounts | nindent 12 }} + {{- end }} + {{- if .Values.initScripts }} + - name: custom-init-scripts + mountPath: /docker-entrypoint-initdb.d/init-scripts + {{- end }} + {{- if .Values.initScriptsCM }} + - name: custom-init-scripts-cm + mountPath: /docker-entrypoint-initdb.d/init-scripts-cm + {{- end }} + {{- if .Values.initScriptsSecret }} + - name: custom-init-scripts-secret + mountPath: /docker-entrypoint-initdb.d/init-scripts-secret + {{- end }} + {{- if .Values.master.sidecars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.master.sidecars "context" $) | nindent 8 }} + {{- end }} + volumes: + {{- if or .Values.config .Values.extraConfig }} + - name: config + configMap: + name: {{ include "common.names.fullname" . }} + {{- end }} + {{- if .Values.security.enabled }} + - name: elasticsearch-certificates + secret: + secretName: {{ include "elasticsearch.master.tlsSecretName" . }} + defaultMode: 256 + {{- end }} + {{- if .Values.extraVolumes }} + {{- toYaml .Values.extraVolumes | nindent 8 }} + {{- end }} + {{- if .Values.initScripts }} + - name: custom-init-scripts + configMap: + name: {{ template "elasticsearch.initScripts" . }} + {{- end }} + {{- if .Values.initScriptsCM }} + - name: custom-init-scripts-cm + configMap: + name: {{ template "elasticsearch.initScriptsCM" . }} + {{- end }} + {{- if .Values.initScriptsSecret }} + - name: custom-init-scripts-secret + secret: + secretName: {{ template "elasticsearch.initScriptsSecret" . }} + defaultMode: 0755 + {{- end }} +{{- if not .Values.master.persistence.enabled }} + - name: "data" + emptyDir: {} +{{- else if .Values.master.persistence.existingClaim }} + - name: "data" + persistentVolumeClaim: + claimName: {{ .Values.master.persistence.existingClaim }} +{{- else }} + volumeClaimTemplates: + - metadata: + name: "data" + {{- if .Values.master.persistence.annotations }} + annotations: {{- toYaml .Values.master.persistence.annotations | nindent 10 }} + {{- end }} + spec: + accessModes: {{- toYaml .Values.master.persistence.accessModes | nindent 10 }} + {{ $storage := dict "global" .Values.global "local" .Values.master }} + {{ include "elasticsearch.storageClass" $storage }} + resources: + requests: + storage: {{ .Values.master.persistence.size | quote }} + {{- if .Values.master.persistence.selector }} + selector: {{- include "common.tplvalues.render" (dict "value" .Values.master.persistence.selector "context" $) | nindent 10 }} + {{- else if .Values.master.persistence.existingVolume }} + selector: + matchLabels: + volume: {{ .Values.master.persistence.existingVolume }} + {{- end }} +{{- end }} diff --git a/elasticsearch/templates/master-svc.yaml b/elasticsearch/templates/master-svc.yaml new file mode 100644 index 0000000..5884c1c --- /dev/null +++ b/elasticsearch/templates/master-svc.yaml @@ -0,0 +1,27 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "elasticsearch.master.fullname" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: master + annotations: {{ include "common.tplvalues.render" ( dict "value" .Values.master.service.annotations "context" $) | nindent 4 }} +spec: + type: {{ .Values.master.service.type | quote }} + {{- if and (eq .Values.master.service.type "LoadBalancer") (not (empty .Values.master.service.loadBalancerIP)) }} + loadBalancerIP: {{ .Values.master.service.loadBalancerIP }} + {{- end }} + publishNotReadyAddresses: true + ports: + - name: http + port: 9200 + targetPort: http + - name: tcp-transport + port: {{ .Values.master.service.port }} + targetPort: transport + {{- if and (or (eq .Values.master.service.type "NodePort") (eq .Values.master.service.type "LoadBalancer")) (not (empty .Values.master.service.nodePort)) }} + nodePort: {{ .Values.master.service.nodePort }} + {{- else if eq .Values.master.service.type "ClusterIP" }} + nodePort: null + {{- end }} + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: master diff --git a/elasticsearch/templates/metrics-deploy.yaml b/elasticsearch/templates/metrics-deploy.yaml new file mode 100644 index 0000000..a4f367c --- /dev/null +++ b/elasticsearch/templates/metrics-deploy.yaml @@ -0,0 +1,107 @@ +{{- if .Values.metrics.enabled }} +apiVersion: {{ template "common.capabilities.deployment.apiVersion" . }} +kind: Deployment +metadata: + name: {{ include "elasticsearch.metrics.fullname" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: metrics + ## Istio Labels: https://istio.io/docs/ops/deployment/requirements/ + app: metrics +spec: + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: metrics + replicas: 1 + template: + metadata: + labels: {{- include "common.labels.standard" . | nindent 8 }} + app.kubernetes.io/component: metrics + ## Istio Labels: https://istio.io/docs/ops/deployment/requirements/ + app: metrics + {{- if .Values.metrics.podLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.podLabels "context" $) | nindent 8 }} + {{- end }} + {{- with .Values.metrics.podAnnotations }} + annotations: {{ toYaml . | nindent 8 }} + {{- end }} + spec: + {{- include "elasticsearch.imagePullSecrets" . | nindent 6 }} + {{- if .Values.metrics.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.metrics.schedulerName }} + schedulerName: {{ .Values.metrics.schedulerName }} + {{- end }} + containers: + - name: metrics + image: {{ include "elasticsearch.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else }} + args: + {{- if gt (int .Values.coordinating.replicas) 0 }} + # Prefer coordinating only nodes to do the initial metrics query + - --es.uri=http://{{- if .Values.security.enabled }}elastic:{{ .Values.security.elasticPassword}}@{{- end}}{{ template "elasticsearch.coordinating.fullname" . }}:{{ .Values.coordinating.service.port }} + {{- else }} + # Using master nodes as there are no coordinating only nodes + - --es.uri=http://{{- if .Values.security.enabled }}elastic:{{ .Values.security.elasticPassword}}@{{- end}}{{ include "elasticsearch.master.fullname" . }}:{{ .Values.master.service.port }} + {{- end }} + - --es.all + {{- if .Values.metrics.extraArgs }} + {{- toYaml .Values.metrics.extraArgs | nindent 12 }} + {{- end }} + {{- end }} + ports: + - name: metrics + containerPort: 9114 + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.metrics.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.metrics.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.metrics.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.metrics.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.metrics.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.metrics.livenessProbe.failureThreshold }} + httpGet: + path: /metrics + port: metrics + {{- else if .Values.metrics.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customLivenessProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.metrics.readinessProbe.enabled }} + readinessProbe: + initialDelaySeconds: {{ .Values.metrics.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.metrics.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.metrics.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.metrics.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.metrics.readinessProbe.failureThreshold }} + httpGet: + path: /metrics + port: metrics + {{- else if .Values.metrics.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customReadinessProbe "context" $) | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.metrics.resources }} + resources: {{- toYaml .Values.metrics.resources | nindent 12 }} + {{- end }} + {{- if .Values.metrics.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.metrics.podAffinityPreset "component" "metrics" "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.metrics.podAntiAffinityPreset "component" "metrics" "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.metrics.nodeAffinityPreset.type "key" .Values.metrics.nodeAffinityPreset.key "values" .Values.metrics.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.metrics.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.metrics.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.metrics.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.topologySpreadConstraints "context" $) | nindent 8 }} + {{- end }} +{{- end }} diff --git a/elasticsearch/templates/metrics-svc.yaml b/elasticsearch/templates/metrics-svc.yaml new file mode 100644 index 0000000..6e49a71 --- /dev/null +++ b/elasticsearch/templates/metrics-svc.yaml @@ -0,0 +1,17 @@ +{{- if .Values.metrics.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "elasticsearch.metrics.fullname" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: metrics + annotations: {{ include "common.tplvalues.render" ( dict "value" .Values.metrics.service.annotations "context" $) | nindent 4 }} +spec: + type: {{ .Values.metrics.service.type }} + ports: + - name: http-metrics + port: 9114 + targetPort: metrics + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: metrics +{{- end }} diff --git a/elasticsearch/templates/podsecuritypolicy.yaml b/elasticsearch/templates/podsecuritypolicy.yaml new file mode 100644 index 0000000..2d0a3ff --- /dev/null +++ b/elasticsearch/templates/podsecuritypolicy.yaml @@ -0,0 +1,34 @@ +{{- $pspAvailable := (semverCompare "<1.25-0" (include "common.capabilities.kubeVersion" .)) -}} +{{- if and $pspAvailable .Values.curator.enabled .Values.curator.psp.create }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ include "elasticsearch.curator.fullname" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + role: curator +spec: + privileged: true + #requiredDropCapabilities: + volumes: + - 'configMap' + - 'secret' + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + # Require the container to run without root privileges. + rule: 'MustRunAsNonRoot' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + readOnlyRootFilesystem: false +{{- end }} diff --git a/elasticsearch/templates/role.yaml b/elasticsearch/templates/role.yaml new file mode 100644 index 0000000..9f34b99 --- /dev/null +++ b/elasticsearch/templates/role.yaml @@ -0,0 +1,21 @@ +{{- if and .Values.curator.enabled .Values.curator.rbac.enabled }} +kind: Role +apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }} +metadata: + name: {{ include "elasticsearch.curator.fullname" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + role: curator + component: elasticsearch-curator-configmap +rules: + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["update", "patch"] + {{- $pspAvailable := (semverCompare "<1.25-0" (include "common.capabilities.kubeVersion" .)) -}} + {{- if and $pspAvailable .Values.curator.psp.create }} + - apiGroups: ["extensions"] + resources: ["podsecuritypolicies"] + verbs: ["use"] + resourceNames: + - {{ include "elasticsearch.curator.fullname" . }} + {{- end }} +{{- end }} diff --git a/elasticsearch/templates/rolebinding.yaml b/elasticsearch/templates/rolebinding.yaml new file mode 100644 index 0000000..658e028 --- /dev/null +++ b/elasticsearch/templates/rolebinding.yaml @@ -0,0 +1,17 @@ +{{- if and .Values.curator.enabled .Values.curator.rbac.enabled }} +kind: RoleBinding +apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }} +metadata: + name: {{ include "elasticsearch.curator.fullname" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + role: curator + component: elasticsearch-curator-configmap +roleRef: + kind: Role + name: {{ template "elasticsearch.curator.fullname" . }} + apiGroup: rbac.authorization.k8s.io +subjects: + - kind: ServiceAccount + name: {{ include "elasticsearch.curator.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end }} diff --git a/elasticsearch/templates/secrets.yaml b/elasticsearch/templates/secrets.yaml new file mode 100644 index 0000000..140e3e8 --- /dev/null +++ b/elasticsearch/templates/secrets.yaml @@ -0,0 +1,54 @@ +{{- if (include "elasticsearch.createSecret" . ) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "common.names.fullname" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + {{- if .Values.security.elasticPassword }} + elasticsearch-password: {{ default "" .Values.security.elasticPassword | b64enc | quote }} + {{- else }} + elasticsearch-password: {{ randAlphaNum 14 | b64enc | quote }} + {{- end }} + {{- if .Values.security.tls.keystorePassword }} + keystore-password: {{ default "" .Values.security.tls.keystorePassword | b64enc | quote }} + {{- end }} + {{- if .Values.security.tls.truststorePassword }} + truststore-password: {{ default "" .Values.security.tls.truststorePassword | b64enc | quote }} + {{- end }} + {{- if .Values.security.tls.keyPassword }} + key-password: {{ default "" .Values.security.tls.keyPassword | b64enc | quote }} + {{- end }} +--- +{{- end }} +{{- if (include "elasticsearch.createTlsPasswordsSecret" . ) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "common.names.fullname" . }}-tls-pass + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + {{- if .Values.security.tls.keystorePassword }} + truststore-password: {{ default "" .Values.elasticPassword | b64enc | quote }} + {{- end }} + {{- if .Values.security.tls.truststorePassword }} + keystore-password: {{ default "" .Values.elasticPassword | b64enc | quote }} + {{- end }} + {{- if .Values.security.tls.elasticPassword }} + key-password: {{ default "" .Values.elasticPassword | b64enc | quote }} + {{- end }} +{{- end }} diff --git a/elasticsearch/templates/serviceaccount.yaml b/elasticsearch/templates/serviceaccount.yaml new file mode 100644 index 0000000..60da547 --- /dev/null +++ b/elasticsearch/templates/serviceaccount.yaml @@ -0,0 +1,44 @@ +{{- if and .Values.curator.enabled .Values.curator.serviceAccount.create .Values.curator.rbac.enabled }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "elasticsearch.curator.serviceAccountName" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + role: curator +{{- end }} +{{- if .Values.data.serviceAccount.create }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "elasticsearch.data.serviceAccountName" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + role: data +{{- end }} +{{- if .Values.master.serviceAccount.create }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "elasticsearch.master.serviceAccountName" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + role: master +{{- end }} +{{- if .Values.coordinating.serviceAccount.create }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "elasticsearch.coordinating.serviceAccountName" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + role: coordinating-only +{{- end }} +{{- if and .Values.ingest.enabled .Values.ingest.serviceAccount.create }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "elasticsearch.ingest.serviceAccountName" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + role: ingest +{{- end }} diff --git a/elasticsearch/templates/servicemonitor.yaml b/elasticsearch/templates/servicemonitor.yaml new file mode 100644 index 0000000..b4a9fe6 --- /dev/null +++ b/elasticsearch/templates/servicemonitor.yaml @@ -0,0 +1,29 @@ +{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "elasticsearch.metrics.fullname" . }} + {{- if .Values.metrics.serviceMonitor.namespace }} + namespace: {{ .Values.metrics.serviceMonitor.namespace }} + {{- end }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: metrics +spec: + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + {{- if .Values.metrics.serviceMonitor.selector }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.serviceMonitor.selector "context" $) | nindent 6 }} + {{- end }} + app.kubernetes.io/component: metrics + endpoints: + - port: http-metrics + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} +{{- end }} diff --git a/elasticsearch/templates/tls-secret.yaml b/elasticsearch/templates/tls-secret.yaml new file mode 100644 index 0000000..f4f157a --- /dev/null +++ b/elasticsearch/templates/tls-secret.yaml @@ -0,0 +1,99 @@ +{{- if (include "elasticsearch.createTlsSecret" .) }} +{{- $ca := genCA "elasticsearch-ca" 365 }} +{{- $releaseNamespace := .Release.Namespace }} +{{- $clusterDomain := .Values.clusterDomain }} + +{{- if not .Values.security.tls.master.existingSecret }} +--- +{{- $fullname := include "elasticsearch.master.fullname" . }} +{{- $serviceName := include "elasticsearch.master.fullname" . }} +{{- $altNames := list (printf "*.%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) $fullname "127.0.0.1" "localhost" }} +{{- $crt := genSignedCert $fullname nil $altNames 365 $ca }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "elasticsearch.master.fullname" . }}-crt + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + app.kubernetes.io/component: master +type: kubernetes.io/tls +data: + ca.crt: {{ $ca.Cert | b64enc | quote }} + tls.crt: {{ $crt.Cert | b64enc | quote }} + tls.key: {{ $crt.Key | b64enc | quote }} +{{- end }} +{{- if not .Values.security.tls.data.existingSecret }} +--- +{{- $fullname := include "elasticsearch.data.fullname" . }} +{{- $serviceName := include "elasticsearch.data.fullname" . }} +{{- $altNames := list (printf "*.%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) $fullname "127.0.0.1" "localhost" }} +{{- $crt := genSignedCert $fullname nil $altNames 365 $ca }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "elasticsearch.data.fullname" . }}-crt + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + app.kubernetes.io/component: data + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + ca.crt: {{ $ca.Cert | b64enc | quote }} + tls.crt: {{ $crt.Cert | b64enc | quote }} + tls.key: {{ $crt.Key | b64enc | quote }} +{{- end }} +{{- if not .Values.security.tls.coordinating.existingSecret }} +--- +{{- $fullname := include "elasticsearch.coordinating.fullname" . }} +{{- $serviceName := include "elasticsearch.coordinating.fullname" . }} +{{- $altNames := list (printf "*.%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) $fullname "127.0.0.1" "localhost" }} +{{- $crt := genSignedCert $fullname nil $altNames 365 $ca }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "elasticsearch.coordinating.fullname" . }}-crt + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + app.kubernetes.io/component: coordinating-only + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + ca.crt: {{ $ca.Cert | b64enc | quote }} + tls.crt: {{ $crt.Cert | b64enc | quote }} + tls.key: {{ $crt.Key | b64enc | quote }} +{{- end }} +{{- if and .Values.ingest.enabled (not .Values.security.tls.ingest.existingSecret) }} +--- +{{- $fullname := include "elasticsearch.ingest.fullname" . }} +{{- $serviceName := include "elasticsearch.ingest.fullname" . }} +{{- $altNames := list (printf "*.%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) $fullname "127.0.0.1" "localhost" }} +{{- $crt := genSignedCert $fullname nil $altNames 365 $ca }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "elasticsearch.ingest.fullname" . }}-crt + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + app.kubernetes.io/component: ingest + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + ca.crt: {{ $ca.Cert | b64enc | quote }} + tls.crt: {{ $crt.Cert | b64enc | quote }} + tls.key: {{ $crt.Key | b64enc | quote }} +{{- end }} +{{- end }} diff --git a/elasticsearch/values.yaml b/elasticsearch/values.yaml new file mode 100644 index 0000000..bf3963d --- /dev/null +++ b/elasticsearch/values.yaml @@ -0,0 +1,1783 @@ +## @section Global parameters +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass + +## @param global.imageRegistry Global Docker image registry +## @param global.imagePullSecrets Global Docker registry secret names as an array +## @param global.storageClass Global StorageClass for Persistent Volume(s) +## @param global.coordinating.name Coordinating name to be used in the Kibana subchart (service name) +## @param global.kibanaEnabled Whether or not to enable Kibana +## +global: + imageRegistry: "" + ## E.g. + ## imagePullSecrets: + ## - myRegistryKeySecretName + ## + imagePullSecrets: [] + storageClass: "" + coordinating: + name: coordinating-only + kibanaEnabled: false + +## @section Common parameters + +## @param nameOverride String to partially override common.names.fullname template (will maintain the release name) +## +nameOverride: "" +## @param fullnameOverride String to fully override common.names.fullname template +## +fullnameOverride: "" +## @param clusterDomain Kubernetes cluster domain +## +clusterDomain: cluster.local +## @param extraDeploy Array of extra objects to deploy with the release +## +extraDeploy: [] + +## Enable diagnostic mode in the deployment +## +diagnosticMode: + ## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden) + ## + enabled: false + ## @param diagnosticMode.command Command to override all containers in the deployment + ## + command: + - sleep + ## @param diagnosticMode.args Args to override all containers in the deployment + ## + args: + - infinity + +## @section Elasticsearch parameters + +## Bitnami Elasticsearch image version +## ref: https://hub.docker.com/r/bitnami/elasticsearch/tags/ +## @param image.registry Elasticsearch image registry +## @param image.repository Elasticsearch image repository +## @param image.tag Elasticsearch image tag (immutable tags are recommended) +## @param image.pullPolicy Elasticsearch image pull policy +## @param image.pullSecrets Elasticsearch image pull secrets +## @param image.debug Enable image debug mode +## +image: + registry: docker.io + repository: bitnami/elasticsearch + tag: 7.16.2-debian-10-r0 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Set to true if you would like to see extra information on logs + ## + debug: false + +## X-Pack security parameters +## Note: TLS configuration is required in order to configure password authentication +## +security: + ## @param security.enabled Enable X-Pack Security settings + ## + enabled: false + ## @param security.elasticPassword Password for 'elastic' user + ## Ref: https://github.com/bitnami/bitnami-docker-elasticsearch#security + ## + elasticPassword: "" + ## @param security.existingSecret Name of the existing secret containing the Elasticsearch password + ## + existingSecret: "" + ## FIPS mode + ## @param security.fipsMode Configure elasticsearch with FIPS 140 compliant mode + ## Ref: https://www.elastic.co/guide/en/elasticsearch/reference/current/fips-140-compliance.html + ## + fipsMode: false + ## TLS configuration + ## + tls: + ## @param security.tls.restEncryption Enable SSL/TLS encryption for Elasticsearch REST API. + ## + restEncryption: true + ## @param security.tls.autoGenerated Create self-signed TLS certificates. + ## Note: Currently only supports PEM certificates. + ## + autoGenerated: false + ## @param security.tls.verificationMode Verification mode for SSL communications. + ## Supported values: full, certificate, none. + ## Ref: https://www.elastic.co/guide/en/elasticsearch/reference/current/security-settings.html + ## + verificationMode: "full" + ## @param security.tls.master.existingSecret Existing secret containing the certificates for the master nodes + ## @param security.tls.data.existingSecret Existing secret containing the certificates for the data nodes + ## @param security.tls.ingest.existingSecret Existing secret containing the certificates for the ingest nodes + ## @param security.tls.coordinating.existingSecret Existing secret containing the certificates for the coordinating nodes + ## + master: + existingSecret: "" + data: + existingSecret: "" + ingest: + existingSecret: "" + coordinating: + existingSecret: "" + ## @param security.tls.keystorePassword Password to access the JKS/PKCS12 keystore or PEM key when they are password-protected. + ## + keystorePassword: "" + ## @param security.tls.truststorePassword Password to access the JKS/PKCS12 truststore when they are password-protected. + ## + truststorePassword: "" + ## @param security.tls.keystoreFilename Name of the keystore file + ## + keystoreFilename: elasticsearch.keystore.jks + ## @param security.tls.truststoreFilename Name of the truststore + ## + truststoreFilename: elasticsearch.truststore.jks + ## @param security.tls.usePemCerts Use this variable if your secrets contain PEM certificates instead of JKS/PKCS12 + ## Ignored when using autoGenerated certs. + ## + usePemCerts: false + ## @param security.tls.keyPassword Password to access the PEM key when they are password-protected. + ## + keyPassword: "" + +## Elasticsearch cluster name +## @param name Elasticsearch cluster name +## +name: elastic +## @param plugins Comma, semi-colon or space separated list of plugins to install at initialization +## ref: https://github.com/bitnami/bitnami-docker-elasticsearch#environment-variables +## +plugins: "" +## @param snapshotRepoPath File System snapshot repository path +## ref: https://github.com/bitnami/bitnami-docker-elasticsearch#environment-variables +## +snapshotRepoPath: "" +## @param config Override elasticsearch configuration +## +config: {} +## @param extraConfig Append extra configuration to the elasticsearch node configuration +## Use this instead of `config` to add more configuration +## See below example: +## extraConfig: +## node: +## store: +## allow_mmap: false +## ref: https://www.elastic.co/guide/en/elasticsearch/reference/current/settings.html +## +extraConfig: {} +## @param extraVolumes A list of volumes to be added to the pod +## Example Use Case: mount ssl certificates when elasticsearch has tls enabled +## extraVolumes: +## - name: es-certs +## secret: +## defaultMode: 420 +## secretName: es-certs +extraVolumes: [] +## @param extraVolumeMounts A list of volume mounts to be added to the pod +## extraVolumeMounts: +## - name: es-certs +## mountPath: /certs +## readOnly: true +extraVolumeMounts: [] +## @param initScripts Dictionary of init scripts. Evaluated as a template. +## Specify dictionary of scripts to be run at first boot +## Alternatively, you can put your scripts under the files/docker-entrypoint-initdb.d directory +## For example: +## initScripts: +## my_init_script.sh: | +## #!/bin/sh +## echo "Do something." +## +initScripts: {} +## @param initScriptsCM ConfigMap with the init scripts. Evaluated as a template. +## Note: This will override initScripts +## +initScriptsCM: "" +## @param initScriptsSecret Secret containing `/docker-entrypoint-initdb.d` scripts to be executed at initialization time that contain sensitive data. Evaluated as a template. +## +initScriptsSecret: "" +## @param extraEnvVars Array containing extra env vars to be added to all pods (evaluated as a template) +## For example: +## extraEnvVars: +## - name: MY_ENV_VAR +## value: env_var_value +## +extraEnvVars: [] +## @param extraEnvVarsConfigMap ConfigMap containing extra env vars to be added to all pods (evaluated as a template) +## +extraEnvVarsConfigMap: "" +## @param extraEnvVarsSecret Secret containing extra env vars to be added to all pods (evaluated as a template) +## +extraEnvVarsSecret: "" + +## @section Master parameters + +## Elasticsearch master-eligible node parameters +## +master: + ## @param master.name Master-eligible node pod name + ## + name: master + ## @param master.fullnameOverride String to fully override elasticsearch.master.fullname template with a string + ## + fullnameOverride: "" + ## @param master.replicas Desired number of Elasticsearch master-eligible nodes. Consider using an odd number of master nodes to prevent "split brain" situation. See: https://www.elastic.co/guide/en/elasticsearch/reference/7.x/modules-discovery-voting.html + ## https://www.elastic.co/guide/en/elasticsearch/reference/7.x/modules-discovery-voting.html#_even_numbers_of_master_eligible_nodes + ## https://www.elastic.co/guide/en/elasticsearch/reference/7.x/modules-discovery-quorums.html#modules-discovery-quorums + ## + replicas: 3 + ## Update strategy for ElasticSearch master statefulset + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## @param master.updateStrategy.type Update strategy for Master statefulset + ## + updateStrategy: + type: RollingUpdate + ## @param master.hostAliases Add deployment host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param master.schedulerName Name of the k8s scheduler (other than default) + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## + ## @param master.heapSize Master-eligible node heap size + ## + heapSize: 128m + ## @param master.podAnnotations Annotations for master-eligible pods. + ## + podAnnotations: {} + ## @param master.podLabels Extra labels to add to Pod + ## + podLabels: {} + ## Pod Security Context for master-eligible pods. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## @param master.securityContext.enabled Enable security context for master-eligible pods + ## @param master.securityContext.fsGroup Group ID for the container for master-eligible pods + ## @param master.securityContext.runAsUser User ID for the container for master-eligible pods + ## + securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + ## Pod Security Context for master-eligible pods. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## @param master.podSecurityContext.enabled Enable security context for master-eligible pods + ## @param master.podSecurityContext.fsGroup Group ID for the container for master-eligible pods + ## + podSecurityContext: + enabled: false + fsGroup: 1001 + ## Container Security Context for master-eligible pods. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## @param master.containerSecurityContext.enabled Enable security context for master-eligible pods + ## @param master.containerSecurityContext.runAsUser User ID for the container for master-eligible pods + ## + containerSecurityContext: + enabled: false + runAsUser: 1001 + ## @param master.podAffinityPreset Master-eligible Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param master.podAntiAffinityPreset Master-eligible Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: "" + ## Node affinity preset. Allowed values: soft, hard + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## @param master.nodeAffinityPreset.type Master-eligible Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## @param master.nodeAffinityPreset.key Master-eligible Node label key to match Ignored if `affinity` is set. + ## @param master.nodeAffinityPreset.values Master-eligible Node label values to match. Ignored if `affinity` is set. + ## + nodeAffinityPreset: + type: "" + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param master.affinity Master-eligible Affinity for pod assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set + ## + affinity: {} + ## @param master.priorityClassName Master pods Priority Class Name + ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass + ## + priorityClassName: "" + ## @param master.nodeSelector Master-eligible Node labels for pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + ## @param master.tolerations Master-eligible Tolerations for pod assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param master.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods + ## + topologySpreadConstraints: [] + ## Elasticsearch master-eligible container's resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## We usually recommend not to specify default resources and to leave this as a conscious + ## choice for the user. This also increases chances charts run on environments with little + ## resources, such as Minikube. If you do want to specify resources, uncomment the following + ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. + ## @param master.resources.limits The resources limits for the container + ## @param master.resources.requests [object] The requested resources for the container + ## + resources: + ## Example: + ## limits: + ## cpu: 100m + ## memory: 128Mi + limits: {} + requests: + cpu: 25m + memory: 256Mi + ## Elasticsearch master-eligible container's startup probe + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## @param master.startupProbe.enabled Enable/disable the startup probe (master nodes pod) + ## @param master.startupProbe.initialDelaySeconds Delay before startup probe is initiated (master nodes pod) + ## @param master.startupProbe.periodSeconds How often to perform the probe (master nodes pod) + ## @param master.startupProbe.timeoutSeconds When the probe times out (master nodes pod) + ## @param master.startupProbe.successThreshold Minimum consecutive successes for the probe to be considered successful after having failed (master nodes pod) + ## @param master.startupProbe.failureThreshold Minimum consecutive failures for the probe to be considered failed after having succeeded + ## + startupProbe: + enabled: false + initialDelaySeconds: 90 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## Elasticsearch master-eligible container's liveness probe + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## @param master.livenessProbe.enabled Enable/disable the liveness probe (master-eligible nodes pod) + ## @param master.livenessProbe.initialDelaySeconds Delay before liveness probe is initiated (master-eligible nodes pod) + ## @param master.livenessProbe.periodSeconds How often to perform the probe (master-eligible nodes pod) + ## @param master.livenessProbe.timeoutSeconds When the probe times out (master-eligible nodes pod) + ## @param master.livenessProbe.successThreshold Minimum consecutive successes for the probe to be considered successful after having failed (master-eligible nodes pod) + ## @param master.livenessProbe.failureThreshold Minimum consecutive failures for the probe to be considered failed after having succeeded + ## + livenessProbe: + enabled: true + initialDelaySeconds: 90 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## Elasticsearch master-eligible container's readiness probe + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## @param master.readinessProbe.enabled Enable/disable the readiness probe (master-eligible nodes pod) + ## @param master.readinessProbe.initialDelaySeconds Delay before readiness probe is initiated (master-eligible nodes pod) + ## @param master.readinessProbe.periodSeconds How often to perform the probe (master-eligible nodes pod) + ## @param master.readinessProbe.timeoutSeconds When the probe times out (master-eligible nodes pod) + ## @param master.readinessProbe.successThreshold Minimum consecutive successes for the probe to be considered successful after having failed (master-eligible nodes pod) + ## @param master.readinessProbe.failureThreshold Minimum consecutive failures for the probe to be considered failed after having succeeded + ## + readinessProbe: + enabled: true + initialDelaySeconds: 90 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## @param master.customStartupProbe Override default startup probe + ## + customStartupProbe: {} + ## @param master.customLivenessProbe Override default liveness probe + ## + customLivenessProbe: {} + ## @param master.customReadinessProbe Override default readiness probe + ## + customReadinessProbe: {} + ## @param master.initContainers Extra init containers to add to the Elasticsearch master-eligible pod(s) + ## + initContainers: [] + ## @param master.sidecars Extra sidecar containers to add to the Elasticsearch master-eligible pod(s) + ## + sidecars: [] + ## Enable persistence using Persistent Volume Claims + ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + ## @param master.persistence.enabled Enable persistence using a `PersistentVolumeClaim` + ## + enabled: true + ## @param master.persistence.storageClass Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "" + ## @param master.persistence.existingClaim Existing Persistent Volume Claim + ## then accept the value as an existing Persistent Volume Claim to which + ## the container should be bound + ## + existingClaim: "" + ## @param master.persistence.existingVolume Existing Persistent Volume for use as volume match label selector to the `volumeClaimTemplate`. Ignored when `master.persistence.selector` is set. + ## + existingVolume: "" + ## @param master.persistence.selector Configure custom selector for existing Persistent Volume. Overwrites `master.persistence.existingVolume` + ## selector: + ## matchLabels: + ## volume: + ## + selector: {} + ## @param master.persistence.annotations Persistent Volume Claim annotations + ## + annotations: {} + ## @param master.persistence.accessModes Persistent Volume Access Modes + ## + accessModes: + - ReadWriteOnce + ## @param master.persistence.size Persistent Volume Size + ## + size: 8Gi + ## Service parameters for master-eligible node(s) + ## + service: + ## @param master.service.type Kubernetes Service type (master-eligible nodes) + ## + type: ClusterIP + ## @param master.service.port Kubernetes Service port for Elasticsearch transport port (master-eligible nodes) + ## + port: 9300 + ## @param master.service.nodePort Kubernetes Service nodePort (master-eligible nodes) + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + nodePort: "" + ## @param master.service.annotations Annotations for master-eligible nodes service + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + ## @param master.service.loadBalancerIP loadBalancerIP if master-eligible nodes service type is `LoadBalancer` + ## Set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + loadBalancerIP: "" + ## Provide functionality to use RBAC + ## + serviceAccount: + ## @param master.serviceAccount.create Enable creation of ServiceAccount for the master node + ## + create: false + ## @param master.serviceAccount.name Name of the created serviceAccount + ## If not set and create is true, a name is generated using the fullname template + name: "" + ## Autoscaling configuration + ## @param master.autoscaling.enabled Enable autoscaling for master replicas + ## @param master.autoscaling.minReplicas Minimum number of master replicas + ## @param master.autoscaling.maxReplicas Maximum number of master replicas + ## @param master.autoscaling.targetCPU Target CPU utilization percentage for master replica autoscaling + ## @param master.autoscaling.targetMemory Target Memory utilization percentage for master replica autoscaling + ## + autoscaling: + enabled: false + minReplicas: 2 + maxReplicas: 11 + targetCPU: "" + targetMemory: "" + +## @section Coordinating parameters + +## Elasticsearch coordinating-only node parameters +## +coordinating: + ## @param coordinating.fullnameOverride String to fully override elasticsearch.coordinating.fullname template with a string + ## + fullnameOverride: "" + ## @param coordinating.replicas Desired number of Elasticsearch coordinating-only nodes + ## + replicas: 2 + ## @param coordinating.hostAliases Add deployment host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param coordinating.schedulerName Name of the k8s scheduler (other than default) + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## Update strategy for ElasticSearch coordinating statefulset + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## @param coordinating.updateStrategy.type Update strategy for Coordinating Statefulset + ## + updateStrategy: + type: RollingUpdate + ## @param coordinating.heapSize Coordinating-only node heap size + ## + heapSize: 128m + ## @param coordinating.podAnnotations Annotations for coordinating pods. + ## + podAnnotations: {} + ## @param coordinating.podLabels Extra labels to add to Pod + ## + podLabels: {} + ## Pod Security Context for coordinating-only pods. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## @param coordinating.securityContext.enabled Enable security context for coordinating-only pods + ## @param coordinating.securityContext.fsGroup Group ID for the container for coordinating-only pods + ## @param coordinating.securityContext.runAsUser User ID for the container for coordinating-only pods + ## + securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + ## Pod Security Context for coordinating-only pods. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## @param coordinating.podSecurityContext.enabled Enable security context for coordinating-only pods + ## @param coordinating.podSecurityContext.fsGroup Group ID for the container for coordinating-only pods + ## + podSecurityContext: + enabled: false + fsGroup: 1001 + ## Container Security Context for coordinating-only pods. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## @param coordinating.containerSecurityContext.enabled Enable security context for coordinating-only pods + ## @param coordinating.containerSecurityContext.runAsUser User ID for the container for coordinating-only pods + ## + containerSecurityContext: + enabled: false + runAsUser: 1001 + ## @param coordinating.podAffinityPreset Coordinating Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param coordinating.podAntiAffinityPreset Coordinating Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: "" + ## Node affinity preset + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## @param coordinating.nodeAffinityPreset.type Coordinating Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## @param coordinating.nodeAffinityPreset.key Coordinating Node label key to match Ignored if `affinity` is set. + ## @param coordinating.nodeAffinityPreset.values Coordinating Node label values to match. Ignored if `affinity` is set. + ## + nodeAffinityPreset: + type: "" + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param coordinating.affinity Coordinating Affinity for pod assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set + ## + affinity: {} + ## @param coordinating.priorityClassName Coordinating pods Priority Class Name + ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass + ## + priorityClassName: "" + ## @param coordinating.nodeSelector Coordinating Node labels for pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + ## @param coordinating.tolerations Coordinating Tolerations for pod assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param coordinating.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods + ## + topologySpreadConstraints: [] + ## Elasticsearch coordinating-only container's resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## We usually recommend not to specify default resources and to leave this as a conscious + ## choice for the user. This also increases chances charts run on environments with little + ## resources, such as Minikube. If you do want to specify resources, uncomment the following + ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. + ## @param coordinating.resources.limits The resources limits for the container + ## @param coordinating.resources.requests [object] The requested resources for the container + ## + resources: + ## Example: + ## limits: + ## cpu: 100m + ## memory: 384Mi + limits: {} + requests: + cpu: 25m + memory: 256Mi + ## Elasticsearch coordinating-only container's startup probe + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## @param coordinating.startupProbe.enabled Enable/disable the startup probe (coordinating nodes pod) + ## @param coordinating.startupProbe.initialDelaySeconds Delay before startup probe is initiated (coordinating nodes pod) + ## @param coordinating.startupProbe.periodSeconds How often to perform the probe (coordinating nodes pod) + ## @param coordinating.startupProbe.timeoutSeconds When the probe times out (coordinating nodes pod) + ## @param coordinating.startupProbe.failureThreshold Minimum consecutive failures for the probe to be considered failed after having succeeded + ## @param coordinating.startupProbe.successThreshold Minimum consecutive successes for the probe to be considered successful after having failed (coordinating nodes pod) + ## + startupProbe: + enabled: false + initialDelaySeconds: 90 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## Elasticsearch coordinating-only container's liveness probe + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## @param coordinating.livenessProbe.enabled Enable/disable the liveness probe (coordinating-only nodes pod) + ## @param coordinating.livenessProbe.initialDelaySeconds Delay before liveness probe is initiated (coordinating-only nodes pod) + ## @param coordinating.livenessProbe.periodSeconds How often to perform the probe (coordinating-only nodes pod) + ## @param coordinating.livenessProbe.timeoutSeconds When the probe times out (coordinating-only nodes pod) + ## @param coordinating.livenessProbe.failureThreshold Minimum consecutive failures for the probe to be considered failed after having succeeded + ## @param coordinating.livenessProbe.successThreshold Minimum consecutive successes for the probe to be considered successful after having failed (coordinating-only nodes pod) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 90 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## Elasticsearch coordinating-only container's readiness probe + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## @param coordinating.readinessProbe.enabled Enable/disable the readiness probe (coordinating-only nodes pod) + ## @param coordinating.readinessProbe.initialDelaySeconds Delay before readiness probe is initiated (coordinating-only nodes pod) + ## @param coordinating.readinessProbe.periodSeconds How often to perform the probe (coordinating-only nodes pod) + ## @param coordinating.readinessProbe.timeoutSeconds When the probe times out (coordinating-only nodes pod) + ## @param coordinating.readinessProbe.failureThreshold Minimum consecutive failures for the probe to be considered failed after having succeeded + ## @param coordinating.readinessProbe.successThreshold Minimum consecutive successes for the probe to be considered successful after having failed (coordinating-only nodes pod) + ## + readinessProbe: + enabled: true + initialDelaySeconds: 90 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## @param coordinating.customStartupProbe Override default startup probe + ## + customStartupProbe: {} + ## @param coordinating.customLivenessProbe Override default liveness probe + ## + customLivenessProbe: {} + ## @param coordinating.customReadinessProbe Override default readiness probe + ## + customReadinessProbe: {} + ## @param coordinating.initContainers Extra init containers to add to the Elasticsearch coordinating-only pod(s) + ## + initContainers: [] + ## @param coordinating.sidecars Extra sidecar containers to add to the Elasticsearch coordinating-only pod(s) + ## + sidecars: [] + ## Service parameters for coordinating-only node(s) + ## + service: + ## @param coordinating.service.type Kubernetes Service type (coordinating-only nodes) + ## + type: ClusterIP + ## @param coordinating.service.port Kubernetes Service port for REST API (coordinating-only nodes) + ## + port: 9200 + ## @param coordinating.service.nodePort Kubernetes Service nodePort (coordinating-only nodes) + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + nodePort: "" + ## @param coordinating.service.annotations Annotations for coordinating-only nodes service + ## Set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + ## @param coordinating.service.loadBalancerIP loadBalancerIP if coordinating-only nodes service type is `LoadBalancer` + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + loadBalancerIP: "" + ## @param coordinating.service.externalTrafficPolicy Enable client source IP preservation with externalTrafficPolicy: Local + ## ref http://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## Provide functionality to use RBAC + ## + serviceAccount: + ## @param coordinating.serviceAccount.create Enable creation of ServiceAccount for the coordinating-only node + ## + create: false + ## @param coordinating.serviceAccount.name Name of the created serviceAccount + ## If not set and create is true, a name is generated using the fullname template + ## + name: "" + ## Autoscaling configuration + ## @param coordinating.autoscaling.enabled Enable autoscaling for coordinating replicas + ## @param coordinating.autoscaling.minReplicas Minimum number of coordinating replicas + ## @param coordinating.autoscaling.maxReplicas Maximum number of coordinating replicas + ## @param coordinating.autoscaling.targetCPU Target CPU utilization percentage for coordinating replica autoscaling + ## @param coordinating.autoscaling.targetMemory Target Memory utilization percentage for coordinating replica autoscaling + ## + autoscaling: + enabled: false + minReplicas: 2 + maxReplicas: 11 + targetCPU: "" + targetMemory: "" + +## @section Data parameters + +## Elasticsearch data node parameters +## +data: + ## @param data.name Data node pod name + ## + name: data + ## @param data.fullnameOverride String to fully override elasticsearch.data.fullname template with a string + ## + fullnameOverride: "" + ## @param data.replicas Desired number of Elasticsearch data nodes + ## + replicas: 2 + ## @param data.hostAliases Add deployment host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param data.schedulerName Name of the k8s scheduler (other than default) + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## Update strategy for ElasticSearch Data statefulset + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## @param data.updateStrategy.type Update strategy for Data statefulset + ## @param data.updateStrategy.rollingUpdatePartition Partition update strategy for Data statefulset + ## + updateStrategy: + type: RollingUpdate + rollingUpdatePartition: "" + ## @param data.heapSize Data node heap size + ## + heapSize: 1024m + ## @param data.podAnnotations Annotations for data pods. + ## + podAnnotations: {} + ## @param data.podLabels Extra labels to add to Pod + ## + podLabels: {} + ## Pod Security Context for data pods. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## @param data.securityContext.enabled Enable security context for data pods + ## @param data.securityContext.fsGroup Group ID for the container for data pods + ## @param data.securityContext.runAsUser User ID for the container for data pods + ## + securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + ## Pod Security Context for data pods. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## @param data.podSecurityContext.enabled Enable security context for data pods + ## @param data.podSecurityContext.fsGroup Group ID for the container for data pods + ## + podSecurityContext: + enabled: true + fsGroup: 1001 + ## Container Security Context for data pods. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## @param data.containerSecurityContext.enabled Enable security context for data pods + ## @param data.containerSecurityContext.runAsUser User ID for the container for data pods + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + ## @param data.podAffinityPreset Data Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param data.podAntiAffinityPreset Data Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: "" + ## Node affinity preset. Allowed values: soft, hard + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## @param data.nodeAffinityPreset.type Data Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## @param data.nodeAffinityPreset.key Data Node label key to match Ignored if `affinity` is set. + ## @param data.nodeAffinityPreset.values Data Node label values to match. Ignored if `affinity` is set. + ## + nodeAffinityPreset: + type: "" + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + + ## @param data.affinity Data Affinity for pod assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set + ## + affinity: {} + ## @param data.priorityClassName Data pods Priority Class Name + ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass + ## + priorityClassName: "" + ## @param data.nodeSelector Data Node labels for pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + ## @param data.tolerations Data Tolerations for pod assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param data.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods + ## + topologySpreadConstraints: [] + ## Elasticsearch data container's resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## We usually recommend not to specify default resources and to leave this as a conscious + ## choice for the user. This also increases chances charts run on environments with little + ## resources, such as Minikube. If you do want to specify resources, uncomment the following + ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. + ## @param data.resources.limits The resources limits for the container + ## @param data.resources.requests [object] The requested resources for the container + ## + resources: + ## Example: + ## limits: + ## cpu: 100m + ## memory: 2176Mi + limits: {} + requests: + cpu: 25m + memory: 2048Mi + ## Elasticsearch data container's startup probe + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## @param data.startupProbe.enabled Enable/disable the startup probe (data nodes pod) + ## @param data.startupProbe.initialDelaySeconds Delay before startup probe is initiated (data nodes pod) + ## @param data.startupProbe.periodSeconds How often to perform the probe (data nodes pod) + ## @param data.startupProbe.timeoutSeconds When the probe times out (data nodes pod) + ## @param data.startupProbe.failureThreshold Minimum consecutive failures for the probe to be considered failed after having succeeded + ## @param data.startupProbe.successThreshold Minimum consecutive successes for the probe to be considered successful after having failed (data nodes pod) + ## + startupProbe: + enabled: false + initialDelaySeconds: 90 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## Elasticsearch data container's liveness probe + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## @param data.livenessProbe.enabled Enable/disable the liveness probe (data nodes pod) + ## @param data.livenessProbe.initialDelaySeconds Delay before liveness probe is initiated (data nodes pod) + ## @param data.livenessProbe.periodSeconds How often to perform the probe (data nodes pod) + ## @param data.livenessProbe.timeoutSeconds When the probe times out (data nodes pod) + ## @param data.livenessProbe.failureThreshold Minimum consecutive failures for the probe to be considered failed after having succeeded + ## @param data.livenessProbe.successThreshold Minimum consecutive successes for the probe to be considered successful after having failed (data nodes pod) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 90 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## Elasticsearch data container's readiness probe + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## @param data.readinessProbe.enabled Enable/disable the readiness probe (data nodes pod) + ## @param data.readinessProbe.initialDelaySeconds Delay before readiness probe is initiated (data nodes pod) + ## @param data.readinessProbe.periodSeconds How often to perform the probe (data nodes pod) + ## @param data.readinessProbe.timeoutSeconds When the probe times out (data nodes pod) + ## @param data.readinessProbe.failureThreshold Minimum consecutive failures for the probe to be considered failed after having succeeded + ## @param data.readinessProbe.successThreshold Minimum consecutive successes for the probe to be considered successful after having failed (data nodes pod) + ## + readinessProbe: + enabled: true + initialDelaySeconds: 90 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## @param data.customStartupProbe Override default startup probe + ## + customStartupProbe: {} + ## @param data.customLivenessProbe Override default liveness probe + ## + customLivenessProbe: {} + ## @param data.customReadinessProbe Override default readiness probe + ## + customReadinessProbe: {} + ## @param data.initContainers Extra init containers to add to the Elasticsearch data pod(s) + ## + initContainers: [] + ## @param data.sidecars Extra sidecar containers to add to the Elasticsearch data pod(s) + ## + sidecars: [] + ## Enable persistence using Persistent Volume Claims + ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + ## @param data.persistence.enabled Enable persistence using a `PersistentVolumeClaim` + ## + enabled: true + ## @param data.persistence.storageClass Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "" + ## @param data.persistence.existingClaim Existing Persistent Volume Claim + ## If persistence is enable, and this value is defined, + ## then accept the value as an existing Persistent Volume Claim to which + ## the container should be bound + ## + existingClaim: "" + ## @param data.persistence.existingVolume Existing Persistent Volume for use as volume match label selector to the `volumeClaimTemplate`. Ignored when `data.persistence.selector` ist set. + ## + existingVolume: "" + ## @param data.persistence.selector Configure custom selector for existing Persistent Volume. Overwrites `data.persistence.existingVolume` + ## selector: + ## matchLabels: + ## volume: + selector: {} + ## @param data.persistence.annotations Persistent Volume Claim annotations + ## + annotations: {} + ## @param data.persistence.accessModes Persistent Volume Access Modes + ## + accessModes: + - ReadWriteOnce + ## @param data.persistence.size Persistent Volume Size + ## + size: 8Gi + ## Provide functionality to use RBAC + ## + serviceAccount: + ## @param data.serviceAccount.create Enable creation of ServiceAccount for the data node + ## + create: false + ## @param data.serviceAccount.name Name of the created serviceAccount + ## If not set and create is true, a name is generated using the fullname template + ## + name: "" + ## Autoscaling configuration + ## @param data.autoscaling.enabled Enable autoscaling for data replicas + ## @param data.autoscaling.minReplicas Minimum number of data replicas + ## @param data.autoscaling.maxReplicas Maximum number of data replicas + ## @param data.autoscaling.targetCPU Target CPU utilization percentage for data replica autoscaling + ## @param data.autoscaling.targetMemory Target Memory utilization percentage for data replica autoscaling + ## + autoscaling: + enabled: false + minReplicas: 2 + maxReplicas: 11 + targetCPU: "" + targetMemory: "" + +## @section Ingest parameters + +## Elasticsearch ingest node parameters +## +ingest: + ## @param ingest.enabled Enable ingest nodes + ## + enabled: false + ## @param ingest.name Ingest node pod name + ## + name: ingest + ## @param ingest.fullnameOverride String to fully override elasticsearch.ingest.fullname template with a string + ## + fullnameOverride: "" + ## @param ingest.replicas Desired number of Elasticsearch ingest nodes + ## + replicas: 2 + ## Update strategy for ElasticSearch ingest statefulset + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## @param ingest.updateStrategy.type Update strategy for Ingest statefulset + ## + updateStrategy: + type: RollingUpdate + ## @param ingest.heapSize Ingest node heap size + ## + heapSize: 128m + ## @param ingest.podAnnotations Annotations for ingest pods. + ## + podAnnotations: {} + ## @param ingest.hostAliases Add deployment host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param ingest.schedulerName Name of the k8s scheduler (other than default) + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param ingest.podLabels Extra labels to add to Pod + ## + podLabels: {} + ## Pod Security Context for ingest pods. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## @param ingest.securityContext.enabled Enable security context for ingest pods + ## @param ingest.securityContext.fsGroup Group ID for the container for ingest pods + ## @param ingest.securityContext.runAsUser User ID for the container for ingest pods + ## + securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + ## Pod Security Context for ingest pods. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## @param ingest.podSecurityContext.enabled Enable security context for ingest pods + ## @param ingest.podSecurityContext.fsGroup Group ID for the container for ingest pods + ## + podSecurityContext: + enabled: true + fsGroup: 1001 + ## Container Security Context for ingest pods. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## @param ingest.containerSecurityContext.enabled Enable security context for ingest pods + ## @param ingest.containerSecurityContext.runAsUser User ID for the container for ingest pods + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + ## @param ingest.podAffinityPreset Ingest Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param ingest.podAntiAffinityPreset Ingest Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: "" + ## Node affinity preset + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## Allowed values: soft, hard + ## @param ingest.nodeAffinityPreset.type Ingest Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## @param ingest.nodeAffinityPreset.key Ingest Node label key to match Ignored if `affinity` is set. + ## @param ingest.nodeAffinityPreset.values Ingest Node label values to match. Ignored if `affinity` is set. + ## + nodeAffinityPreset: + type: "" + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param ingest.affinity Ingest Affinity for pod assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set + ## + affinity: {} + ## @param ingest.priorityClassName Ingest pods Priority Class Name + ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass + ## + priorityClassName: "" + ## @param ingest.nodeSelector Ingest Node labels for pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + ## @param ingest.tolerations Ingest Tolerations for pod assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param ingest.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods + ## + topologySpreadConstraints: [] + ## Elasticsearch ingest container's resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## We usually recommend not to specify default resources and to leave this as a conscious + ## choice for the user. This also increases chances charts run on environments with little + ## resources, such as Minikube. If you do want to specify resources, uncomment the following + ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. + ## @param ingest.resources.limits The resources limits for the container + ## @param ingest.resources.requests [object] The requested resources for the container + ## + resources: + ## Example: + ## limits: + ## cpu: 100m + ## memory: 384Mi + limits: {} + requests: + cpu: 25m + memory: 256Mi + ## Elasticsearch ingest container's startup probe + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## @param ingest.startupProbe.enabled Enable/disable the startup probe (ingest nodes pod) + ## @param ingest.startupProbe.initialDelaySeconds Delay before startup probe is initiated (ingest nodes pod) + ## @param ingest.startupProbe.periodSeconds How often to perform the probe (ingest nodes pod) + ## @param ingest.startupProbe.timeoutSeconds When the probe times out (ingest nodes pod) + ## @param ingest.startupProbe.failureThreshold Minimum consecutive failures for the probe to be considered failed after having succeeded + ## @param ingest.startupProbe.successThreshold Minimum consecutive successes for the probe to be considered successful after having failed (ingest nodes pod) + ## + startupProbe: + enabled: false + initialDelaySeconds: 90 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## Elasticsearch ingest container's liveness probe + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## @param ingest.livenessProbe.enabled Enable/disable the liveness probe (ingest nodes pod) + ## @param ingest.livenessProbe.initialDelaySeconds Delay before liveness probe is initiated (ingest nodes pod) + ## @param ingest.livenessProbe.periodSeconds How often to perform the probe (ingest nodes pod) + ## @param ingest.livenessProbe.timeoutSeconds When the probe times out (ingest nodes pod) + ## @param ingest.livenessProbe.failureThreshold Minimum consecutive failures for the probe to be considered failed after having succeeded + ## @param ingest.livenessProbe.successThreshold Minimum consecutive successes for the probe to be considered successful after having failed (ingest nodes pod) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 90 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## Elasticsearch ingest container's readiness probe + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## @param ingest.readinessProbe.enabled Enable/disable the readiness probe (ingest nodes pod) + ## @param ingest.readinessProbe.initialDelaySeconds Delay before readiness probe is initiated (ingest nodes pod) + ## @param ingest.readinessProbe.periodSeconds How often to perform the probe (ingest nodes pod) + ## @param ingest.readinessProbe.timeoutSeconds When the probe times out (ingest nodes pod) + ## @param ingest.readinessProbe.failureThreshold Minimum consecutive failures for the probe to be considered failed after having succeeded + ## @param ingest.readinessProbe.successThreshold Minimum consecutive successes for the probe to be considered successful after having failed (ingest nodes pod) + ## + readinessProbe: + enabled: true + initialDelaySeconds: 90 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## @param ingest.customStartupProbe Override default startup probe + ## + customStartupProbe: {} + ## @param ingest.customLivenessProbe Override default liveness probe + ## + customLivenessProbe: {} + ## @param ingest.customReadinessProbe Override default readiness probe + ## + customReadinessProbe: {} + ## @param ingest.initContainers Extra init containers to add to the Elasticsearch ingest pod(s) + ## + initContainers: [] + ## @param ingest.sidecars Extra sidecar containers to add to the Elasticsearch ingest pod(s) + ## + sidecars: [] + ## Service parameters for ingest node(s) + ## + service: + ## @param ingest.service.type Kubernetes Service type (ingest nodes) + ## + type: ClusterIP + ## @param ingest.service.port Kubernetes Service port Elasticsearch transport port (ingest nodes) + ## + port: 9300 + ## @param ingest.service.nodePort Kubernetes Service nodePort (ingest nodes) + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + nodePort: "" + ## @param ingest.service.annotations Annotations for ingest nodes service + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + ## @param ingest.service.loadBalancerIP loadBalancerIP if ingest nodes service type is `LoadBalancer` + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + loadBalancerIP: "" + + ## Provide functionality to use RBAC + ## + serviceAccount: + ## @param ingest.serviceAccount.create Create a default serviceaccount for elasticsearch curator + ## + create: false + ## @param ingest.serviceAccount.name Name of the created serviceAccount + ## If not set and create is true, a name is generated using the fullname template + ## + name: "" + +## @section Curator parameters + +## Elasticsearch curator parameters +## +curator: + ## @param curator.enabled Enable Elasticsearch Curator cron job + enabled: false + ## @param curator.name Elasticsearch Curator pod name + ## + name: curator + ## @param curator.image.registry Elasticsearch Curator image registry + ## @param curator.image.repository Elasticsearch Curator image repository + ## @param curator.image.tag Elasticsearch Curator image tag + ## @param curator.image.pullPolicy Elasticsearch Curator image pull policy + ## @param curator.image.pullSecrets Elasticsearch Curator image pull secrets + ## + image: + registry: docker.io + repository: bitnami/elasticsearch-curator + tag: 5.8.4-debian-10-r215 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## @param curator.cronjob.schedule Schedule for the CronJob + ## @param curator.cronjob.annotations Annotations to add to the cronjob + ## @param curator.cronjob.concurrencyPolicy `Allow,Forbid,Replace` concurrent jobs + ## @param curator.cronjob.failedJobsHistoryLimit Specify the number of failed Jobs to keep + ## @param curator.cronjob.successfulJobsHistoryLimit Specify the number of completed Jobs to keep + ## @param curator.cronjob.jobRestartPolicy Control the Job restartPolicy + ## + cronjob: + ## At 01:00 every day + schedule: "0 1 * * *" + annotations: {} + concurrencyPolicy: "" + failedJobsHistoryLimit: "" + successfulJobsHistoryLimit: "" + jobRestartPolicy: Never + ## @param curator.schedulerName Name of the k8s scheduler (other than default) + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param curator.podAnnotations Annotations to add to the pod + ## + podAnnotations: {} + ## @param curator.podLabels Extra labels to add to Pod + ## + podLabels: {} + ## @param curator.podAffinityPreset Curator Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param curator.podAntiAffinityPreset Curator Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: "" + ## Node affinity preset + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## @param curator.nodeAffinityPreset.type Curator Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## @param curator.nodeAffinityPreset.key Curator Node label key to match Ignored if `affinity` is set. + ## @param curator.nodeAffinityPreset.values Curator Node label values to match. Ignored if `affinity` is set. + ## + nodeAffinityPreset: + type: "" + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param curator.initContainers Extra init containers to add to the Elasticsearch coordinating-only pod(s) + ## + initContainers: [] + ## @param curator.sidecars Extra sidecar containers to add to the Elasticsearch ingest pod(s) + ## + sidecars: [] + ## @param curator.affinity Curator Affinity for pod assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set + ## + affinity: {} + ## @param curator.nodeSelector Curator Node labels for pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + ## @param curator.tolerations Curator Tolerations for pod assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param curator.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods + ## + topologySpreadConstraints: [] + ## @param curator.rbac.enabled Enable RBAC resources + ## + rbac: + enabled: false + ## @param curator.serviceAccount.create Create a default serviceaccount for elasticsearch curator + ## @param curator.serviceAccount.name Name for elasticsearch curator serviceaccount + ## + serviceAccount: + create: true + ## If not set and create is true, a name is generated using the fullname template + ## + name: "" + ## @param curator.psp.create Whether to create a PodSecurityPolicy. WARNING: PodSecurityPolicy is deprecated in Kubernetes v1.21 or later, unavailable in v1.25 or later + ## + psp: + create: false + ## @param curator.hooks [object] Whether to run job on selected hooks + ## + hooks: + install: false + upgrade: false + ## @param curator.dryrun Run Curator in dry-run mode + ## + dryrun: false + ## @param curator.command Command to execute + ## + command: ["curator"] + ## @param curator.env Environment variables to add to the cronjob container + ## + env: {} + ## Curator configMaps + configMaps: + ## @param curator.configMaps.action_file_yml [string] Contents of the Curator action_file.yml + ## Delete indices older than 90 days + ## + action_file_yml: |- + --- + actions: + 1: + action: delete_indices + description: "Clean up ES by deleting old indices" + options: + timeout_override: + continue_if_exception: False + disable_action: False + ignore_empty_list: True + filters: + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: 90 + field: + stats_result: + epoch: + exclude: False + ## @param curator.configMaps.config_yml [string] Contents of the Curator config.yml (overrides config) + ## Default config (this value is evaluated as a template) + ## + config_yml: |- + --- + client: + hosts: + - {{ template "elasticsearch.coordinating.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + port: {{ .Values.coordinating.service.port }} + # url_prefix: + # use_ssl: True + # certificate: + # client_cert: + # client_key: + # ssl_no_validate: True + # http_auth: + # timeout: 30 + # master_only: False + # logging: + # loglevel: INFO + # logfile: + # logformat: default + # blacklist: ['elasticsearch', 'urllib3'] + ## Curator resources requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## We usually recommend not to specify default resources and to leave this as a conscious + ## choice for the user. This also increases chances charts run on environments with little + ## resources, such as Minikube. If you do want to specify resources, uncomment the following + ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. + ## @param curator.resources.limits The resources limits for the container + ## @param curator.resources.requests The requested resources for the container + ## + resources: + ## Example: + ## limits: + ## cpu: 100m + ## memory: 128Mi + limits: {} + ## Examples: + ## requests: + ## cpu: 100m + ## memory: 128Mi + requests: {} + ## @param curator.priorityClassName Curator Pods Priority Class Name + ## + priorityClassName: "" + ## @param curator.extraVolumes Extra volumes + ## Example Use Case: mount ssl certificates when elasticsearch has tls enabled + ## extraVolumes: + ## - name: es-certs + ## secret: + ## defaultMode: 420 + ## secretName: es-certs + extraVolumes: [] + ## @param curator.extraVolumeMounts Mount extra volume(s) + ## extraVolumeMounts: + ## - name: es-certs + ## mountPath: /certs + ## readOnly: true + extraVolumeMounts: [] + ## @param curator.extraInitContainers DEPRECATED. Use `curator.initContainers` instead. Init containers to add to the cronjob container + ## Don't configure S3 repository till Elasticsearch is reachable. + ## Ensure that it is available at http://elasticsearch:9200 + ## + ## elasticsearch-s3-repository: + ## image: bitnami/minideb + ## imagePullPolicy: "IfNotPresent" + ## command: + ## - "/bin/bash" + ## - "-c" + ## args: + ## - | + ## ES_HOST=elasticsearch + ## ES_PORT=9200 + ## ES_REPOSITORY=backup + ## S3_REGION=us-east-1 + ## S3_BUCKET=bucket + ## S3_BASE_PATH=backup + ## S3_COMPRESS=true + ## S3_STORAGE_CLASS=standard + ## install_packages curl && \ + ## ( counter=0; while (( counter++ < 120 )); do curl -s http://${ES_HOST}:${ES_PORT} >/dev/null 2>&1 && break; echo "Waiting for elasticsearch $counter/120"; sleep 1; done ) && \ + ## cat < + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "" + ## @param master.persistence.existingClaim Existing Persistent Volume Claim + ## then accept the value as an existing Persistent Volume Claim to which + ## the container should be bound + ## + existingClaim: "data-elastic" + ## @param master.persistence.existingVolume Existing Persistent Volume for use as volume match label selector to the `volumeClaimTemplate`. Ignored when `master.persistence.selector` is set. + ## + existingVolume: "" + ## @param master.persistence.selector Configure custom selector for existing Persistent Volume. Overwrites `master.persistence.existingVolume` + ## selector: + ## matchLabels: + ## volume: + ## + selector: {} + ## @param master.persistence.annotations Persistent Volume Claim annotations + ## + annotations: {} + ## @param master.persistence.accessModes Persistent Volume Access Modes + ## + accessModes: + - ReadWriteOnce + ## @param master.persistence.size Persistent Volume Size + ## + size: 8Gi + ## Service parameters for master-eligible node(s) + ## + service: + ## @param master.service.type Kubernetes Service type (master-eligible nodes) + ## + type: ClusterIP + ## @param master.service.port Kubernetes Service port for Elasticsearch transport port (master-eligible nodes) + ## + port: 9300 + ## @param master.service.nodePort Kubernetes Service nodePort (master-eligible nodes) + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + nodePort: "" + ## @param master.service.annotations Annotations for master-eligible nodes service + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + ## @param master.service.loadBalancerIP loadBalancerIP if master-eligible nodes service type is `LoadBalancer` + ## Set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + loadBalancerIP: "" + ## Provide functionality to use RBAC + ## + serviceAccount: + ## @param master.serviceAccount.create Enable creation of ServiceAccount for the master node + ## + create: false + ## @param master.serviceAccount.name Name of the created serviceAccount + ## If not set and create is true, a name is generated using the fullname template + name: "" + ## Autoscaling configuration + ## @param master.autoscaling.enabled Enable autoscaling for master replicas + ## @param master.autoscaling.minReplicas Minimum number of master replicas + ## @param master.autoscaling.maxReplicas Maximum number of master replicas + ## @param master.autoscaling.targetCPU Target CPU utilization percentage for master replica autoscaling + ## @param master.autoscaling.targetMemory Target Memory utilization percentage for master replica autoscaling + ## + autoscaling: + enabled: false + minReplicas: 2 + maxReplicas: 11 + targetCPU: "" + targetMemory: "" + +## @section Coordinating parameters + +## Elasticsearch coordinating-only node parameters +## +coordinating: + ## @param coordinating.fullnameOverride String to fully override elasticsearch.coordinating.fullname template with a string + ## + fullnameOverride: "" + ## @param coordinating.replicas Desired number of Elasticsearch coordinating-only nodes + ## + replicas: 1 + ## @param coordinating.hostAliases Add deployment host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param coordinating.schedulerName Name of the k8s scheduler (other than default) + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## Update strategy for ElasticSearch coordinating statefulset + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## @param coordinating.updateStrategy.type Update strategy for Coordinating Statefulset + ## + updateStrategy: + type: RollingUpdate + ## @param coordinating.heapSize Coordinating-only node heap size + ## + heapSize: 128m + ## @param coordinating.podAnnotations Annotations for coordinating pods. + ## + podAnnotations: {} + ## @param coordinating.podLabels Extra labels to add to Pod + ## + podLabels: {} + ## Pod Security Context for coordinating-only pods. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## @param coordinating.securityContext.enabled Enable security context for coordinating-only pods + ## @param coordinating.securityContext.fsGroup Group ID for the container for coordinating-only pods + ## @param coordinating.securityContext.runAsUser User ID for the container for coordinating-only pods + ## + securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + ## Pod Security Context for coordinating-only pods. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## @param coordinating.podSecurityContext.enabled Enable security context for coordinating-only pods + ## @param coordinating.podSecurityContext.fsGroup Group ID for the container for coordinating-only pods + ## + podSecurityContext: + enabled: false + fsGroup: 1001 + ## Container Security Context for coordinating-only pods. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## @param coordinating.containerSecurityContext.enabled Enable security context for coordinating-only pods + ## @param coordinating.containerSecurityContext.runAsUser User ID for the container for coordinating-only pods + ## + containerSecurityContext: + enabled: false + runAsUser: 1001 + ## @param coordinating.podAffinityPreset Coordinating Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param coordinating.podAntiAffinityPreset Coordinating Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: "" + ## Node affinity preset + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## @param coordinating.nodeAffinityPreset.type Coordinating Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## @param coordinating.nodeAffinityPreset.key Coordinating Node label key to match Ignored if `affinity` is set. + ## @param coordinating.nodeAffinityPreset.values Coordinating Node label values to match. Ignored if `affinity` is set. + ## + nodeAffinityPreset: + type: "" + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param coordinating.affinity Coordinating Affinity for pod assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set + ## + affinity: {} + ## @param coordinating.priorityClassName Coordinating pods Priority Class Name + ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass + ## + priorityClassName: "" + ## @param coordinating.nodeSelector Coordinating Node labels for pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + ## @param coordinating.tolerations Coordinating Tolerations for pod assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param coordinating.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods + ## + topologySpreadConstraints: [] + ## Elasticsearch coordinating-only container's resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## We usually recommend not to specify default resources and to leave this as a conscious + ## choice for the user. This also increases chances charts run on environments with little + ## resources, such as Minikube. If you do want to specify resources, uncomment the following + ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. + ## @param coordinating.resources.limits The resources limits for the container + ## @param coordinating.resources.requests [object] The requested resources for the container + ## + resources: + ## Example: + ## limits: + ## cpu: 100m + ## memory: 384Mi + limits: {} + requests: + cpu: 25m + memory: 256Mi + ## Elasticsearch coordinating-only container's startup probe + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## @param coordinating.startupProbe.enabled Enable/disable the startup probe (coordinating nodes pod) + ## @param coordinating.startupProbe.initialDelaySeconds Delay before startup probe is initiated (coordinating nodes pod) + ## @param coordinating.startupProbe.periodSeconds How often to perform the probe (coordinating nodes pod) + ## @param coordinating.startupProbe.timeoutSeconds When the probe times out (coordinating nodes pod) + ## @param coordinating.startupProbe.failureThreshold Minimum consecutive failures for the probe to be considered failed after having succeeded + ## @param coordinating.startupProbe.successThreshold Minimum consecutive successes for the probe to be considered successful after having failed (coordinating nodes pod) + ## + startupProbe: + enabled: false + initialDelaySeconds: 90 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## Elasticsearch coordinating-only container's liveness probe + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## @param coordinating.livenessProbe.enabled Enable/disable the liveness probe (coordinating-only nodes pod) + ## @param coordinating.livenessProbe.initialDelaySeconds Delay before liveness probe is initiated (coordinating-only nodes pod) + ## @param coordinating.livenessProbe.periodSeconds How often to perform the probe (coordinating-only nodes pod) + ## @param coordinating.livenessProbe.timeoutSeconds When the probe times out (coordinating-only nodes pod) + ## @param coordinating.livenessProbe.failureThreshold Minimum consecutive failures for the probe to be considered failed after having succeeded + ## @param coordinating.livenessProbe.successThreshold Minimum consecutive successes for the probe to be considered successful after having failed (coordinating-only nodes pod) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 90 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## Elasticsearch coordinating-only container's readiness probe + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## @param coordinating.readinessProbe.enabled Enable/disable the readiness probe (coordinating-only nodes pod) + ## @param coordinating.readinessProbe.initialDelaySeconds Delay before readiness probe is initiated (coordinating-only nodes pod) + ## @param coordinating.readinessProbe.periodSeconds How often to perform the probe (coordinating-only nodes pod) + ## @param coordinating.readinessProbe.timeoutSeconds When the probe times out (coordinating-only nodes pod) + ## @param coordinating.readinessProbe.failureThreshold Minimum consecutive failures for the probe to be considered failed after having succeeded + ## @param coordinating.readinessProbe.successThreshold Minimum consecutive successes for the probe to be considered successful after having failed (coordinating-only nodes pod) + ## + readinessProbe: + enabled: true + initialDelaySeconds: 90 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## @param coordinating.customStartupProbe Override default startup probe + ## + customStartupProbe: {} + ## @param coordinating.customLivenessProbe Override default liveness probe + ## + customLivenessProbe: {} + ## @param coordinating.customReadinessProbe Override default readiness probe + ## + customReadinessProbe: {} + ## @param coordinating.initContainers Extra init containers to add to the Elasticsearch coordinating-only pod(s) + ## + initContainers: [] + ## @param coordinating.sidecars Extra sidecar containers to add to the Elasticsearch coordinating-only pod(s) + ## + sidecars: [] + ## Service parameters for coordinating-only node(s) + ## + service: + ## @param coordinating.service.type Kubernetes Service type (coordinating-only nodes) + ## + type: ClusterIP + ## @param coordinating.service.port Kubernetes Service port for REST API (coordinating-only nodes) + ## + port: 9200 + ## @param coordinating.service.nodePort Kubernetes Service nodePort (coordinating-only nodes) + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + nodePort: "" + ## @param coordinating.service.annotations Annotations for coordinating-only nodes service + ## Set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + ## @param coordinating.service.loadBalancerIP loadBalancerIP if coordinating-only nodes service type is `LoadBalancer` + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + loadBalancerIP: "" + ## @param coordinating.service.externalTrafficPolicy Enable client source IP preservation with externalTrafficPolicy: Local + ## ref http://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## Provide functionality to use RBAC + ## + serviceAccount: + ## @param coordinating.serviceAccount.create Enable creation of ServiceAccount for the coordinating-only node + ## + create: false + ## @param coordinating.serviceAccount.name Name of the created serviceAccount + ## If not set and create is true, a name is generated using the fullname template + ## + name: "" + ## Autoscaling configuration + ## @param coordinating.autoscaling.enabled Enable autoscaling for coordinating replicas + ## @param coordinating.autoscaling.minReplicas Minimum number of coordinating replicas + ## @param coordinating.autoscaling.maxReplicas Maximum number of coordinating replicas + ## @param coordinating.autoscaling.targetCPU Target CPU utilization percentage for coordinating replica autoscaling + ## @param coordinating.autoscaling.targetMemory Target Memory utilization percentage for coordinating replica autoscaling + ## + autoscaling: + enabled: false + minReplicas: 2 + maxReplicas: 11 + targetCPU: "" + targetMemory: "" + +## @section Data parameters + +## Elasticsearch data node parameters +## +data: + ## @param data.name Data node pod name + ## + name: data + ## @param data.fullnameOverride String to fully override elasticsearch.data.fullname template with a string + ## + fullnameOverride: "" + ## @param data.replicas Desired number of Elasticsearch data nodes + ## + replicas: 2 + ## @param data.hostAliases Add deployment host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param data.schedulerName Name of the k8s scheduler (other than default) + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## Update strategy for ElasticSearch Data statefulset + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## @param data.updateStrategy.type Update strategy for Data statefulset + ## @param data.updateStrategy.rollingUpdatePartition Partition update strategy for Data statefulset + ## + updateStrategy: + type: RollingUpdate + rollingUpdatePartition: "" + ## @param data.heapSize Data node heap size + ## + heapSize: 1024m + ## @param data.podAnnotations Annotations for data pods. + ## + podAnnotations: {} + ## @param data.podLabels Extra labels to add to Pod + ## + podLabels: {} + ## Pod Security Context for data pods. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## @param data.securityContext.enabled Enable security context for data pods + ## @param data.securityContext.fsGroup Group ID for the container for data pods + ## @param data.securityContext.runAsUser User ID for the container for data pods + ## + securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + ## Pod Security Context for data pods. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## @param data.podSecurityContext.enabled Enable security context for data pods + ## @param data.podSecurityContext.fsGroup Group ID for the container for data pods + ## + podSecurityContext: + enabled: true + fsGroup: 1001 + ## Container Security Context for data pods. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## @param data.containerSecurityContext.enabled Enable security context for data pods + ## @param data.containerSecurityContext.runAsUser User ID for the container for data pods + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + ## @param data.podAffinityPreset Data Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param data.podAntiAffinityPreset Data Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: "" + ## Node affinity preset. Allowed values: soft, hard + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## @param data.nodeAffinityPreset.type Data Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## @param data.nodeAffinityPreset.key Data Node label key to match Ignored if `affinity` is set. + ## @param data.nodeAffinityPreset.values Data Node label values to match. Ignored if `affinity` is set. + ## + nodeAffinityPreset: + type: "" + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + + ## @param data.affinity Data Affinity for pod assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set + ## + affinity: {} + ## @param data.priorityClassName Data pods Priority Class Name + ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass + ## + priorityClassName: "" + ## @param data.nodeSelector Data Node labels for pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + ## @param data.tolerations Data Tolerations for pod assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param data.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods + ## + topologySpreadConstraints: [] + ## Elasticsearch data container's resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## We usually recommend not to specify default resources and to leave this as a conscious + ## choice for the user. This also increases chances charts run on environments with little + ## resources, such as Minikube. If you do want to specify resources, uncomment the following + ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. + ## @param data.resources.limits The resources limits for the container + ## @param data.resources.requests [object] The requested resources for the container + ## + resources: + ## Example: + ## limits: + ## cpu: 100m + ## memory: 2176Mi + limits: {} + requests: + cpu: 25m + memory: 2048Mi + ## Elasticsearch data container's startup probe + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## @param data.startupProbe.enabled Enable/disable the startup probe (data nodes pod) + ## @param data.startupProbe.initialDelaySeconds Delay before startup probe is initiated (data nodes pod) + ## @param data.startupProbe.periodSeconds How often to perform the probe (data nodes pod) + ## @param data.startupProbe.timeoutSeconds When the probe times out (data nodes pod) + ## @param data.startupProbe.failureThreshold Minimum consecutive failures for the probe to be considered failed after having succeeded + ## @param data.startupProbe.successThreshold Minimum consecutive successes for the probe to be considered successful after having failed (data nodes pod) + ## + startupProbe: + enabled: false + initialDelaySeconds: 90 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## Elasticsearch data container's liveness probe + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## @param data.livenessProbe.enabled Enable/disable the liveness probe (data nodes pod) + ## @param data.livenessProbe.initialDelaySeconds Delay before liveness probe is initiated (data nodes pod) + ## @param data.livenessProbe.periodSeconds How often to perform the probe (data nodes pod) + ## @param data.livenessProbe.timeoutSeconds When the probe times out (data nodes pod) + ## @param data.livenessProbe.failureThreshold Minimum consecutive failures for the probe to be considered failed after having succeeded + ## @param data.livenessProbe.successThreshold Minimum consecutive successes for the probe to be considered successful after having failed (data nodes pod) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 90 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## Elasticsearch data container's readiness probe + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## @param data.readinessProbe.enabled Enable/disable the readiness probe (data nodes pod) + ## @param data.readinessProbe.initialDelaySeconds Delay before readiness probe is initiated (data nodes pod) + ## @param data.readinessProbe.periodSeconds How often to perform the probe (data nodes pod) + ## @param data.readinessProbe.timeoutSeconds When the probe times out (data nodes pod) + ## @param data.readinessProbe.failureThreshold Minimum consecutive failures for the probe to be considered failed after having succeeded + ## @param data.readinessProbe.successThreshold Minimum consecutive successes for the probe to be considered successful after having failed (data nodes pod) + ## + readinessProbe: + enabled: true + initialDelaySeconds: 90 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## @param data.customStartupProbe Override default startup probe + ## + customStartupProbe: {} + ## @param data.customLivenessProbe Override default liveness probe + ## + customLivenessProbe: {} + ## @param data.customReadinessProbe Override default readiness probe + ## + customReadinessProbe: {} + ## @param data.initContainers Extra init containers to add to the Elasticsearch data pod(s) + ## + initContainers: [] + ## @param data.sidecars Extra sidecar containers to add to the Elasticsearch data pod(s) + ## + sidecars: [] + ## Enable persistence using Persistent Volume Claims + ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + ## @param data.persistence.enabled Enable persistence using a `PersistentVolumeClaim` + ## + enabled: true + ## @param data.persistence.storageClass Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "" + ## @param data.persistence.existingClaim Existing Persistent Volume Claim + ## If persistence is enable, and this value is defined, + ## then accept the value as an existing Persistent Volume Claim to which + ## the container should be bound + ## + existingClaim: "" + ## @param data.persistence.existingVolume Existing Persistent Volume for use as volume match label selector to the `volumeClaimTemplate`. Ignored when `data.persistence.selector` ist set. + ## + existingVolume: "" + ## @param data.persistence.selector Configure custom selector for existing Persistent Volume. Overwrites `data.persistence.existingVolume` + ## selector: + ## matchLabels: + ## volume: + selector: {} + ## @param data.persistence.annotations Persistent Volume Claim annotations + ## + annotations: {} + ## @param data.persistence.accessModes Persistent Volume Access Modes + ## + accessModes: + - ReadWriteOnce + ## @param data.persistence.size Persistent Volume Size + ## + size: 8Gi + ## Provide functionality to use RBAC + ## + serviceAccount: + ## @param data.serviceAccount.create Enable creation of ServiceAccount for the data node + ## + create: false + ## @param data.serviceAccount.name Name of the created serviceAccount + ## If not set and create is true, a name is generated using the fullname template + ## + name: "" + ## Autoscaling configuration + ## @param data.autoscaling.enabled Enable autoscaling for data replicas + ## @param data.autoscaling.minReplicas Minimum number of data replicas + ## @param data.autoscaling.maxReplicas Maximum number of data replicas + ## @param data.autoscaling.targetCPU Target CPU utilization percentage for data replica autoscaling + ## @param data.autoscaling.targetMemory Target Memory utilization percentage for data replica autoscaling + ## + autoscaling: + enabled: false + minReplicas: 2 + maxReplicas: 11 + targetCPU: "" + targetMemory: "" + +## @section Ingest parameters + +## Elasticsearch ingest node parameters +## +ingest: + ## @param ingest.enabled Enable ingest nodes + ## + enabled: false + ## @param ingest.name Ingest node pod name + ## + name: ingest + ## @param ingest.fullnameOverride String to fully override elasticsearch.ingest.fullname template with a string + ## + fullnameOverride: "" + ## @param ingest.replicas Desired number of Elasticsearch ingest nodes + ## + replicas: 2 + ## Update strategy for ElasticSearch ingest statefulset + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## @param ingest.updateStrategy.type Update strategy for Ingest statefulset + ## + updateStrategy: + type: RollingUpdate + ## @param ingest.heapSize Ingest node heap size + ## + heapSize: 128m + ## @param ingest.podAnnotations Annotations for ingest pods. + ## + podAnnotations: {} + ## @param ingest.hostAliases Add deployment host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param ingest.schedulerName Name of the k8s scheduler (other than default) + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param ingest.podLabels Extra labels to add to Pod + ## + podLabels: {} + ## Pod Security Context for ingest pods. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## @param ingest.securityContext.enabled Enable security context for ingest pods + ## @param ingest.securityContext.fsGroup Group ID for the container for ingest pods + ## @param ingest.securityContext.runAsUser User ID for the container for ingest pods + ## + securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + ## Pod Security Context for ingest pods. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## @param ingest.podSecurityContext.enabled Enable security context for ingest pods + ## @param ingest.podSecurityContext.fsGroup Group ID for the container for ingest pods + ## + podSecurityContext: + enabled: true + fsGroup: 1001 + ## Container Security Context for ingest pods. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## @param ingest.containerSecurityContext.enabled Enable security context for ingest pods + ## @param ingest.containerSecurityContext.runAsUser User ID for the container for ingest pods + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + ## @param ingest.podAffinityPreset Ingest Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param ingest.podAntiAffinityPreset Ingest Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: "" + ## Node affinity preset + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## Allowed values: soft, hard + ## @param ingest.nodeAffinityPreset.type Ingest Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## @param ingest.nodeAffinityPreset.key Ingest Node label key to match Ignored if `affinity` is set. + ## @param ingest.nodeAffinityPreset.values Ingest Node label values to match. Ignored if `affinity` is set. + ## + nodeAffinityPreset: + type: "" + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param ingest.affinity Ingest Affinity for pod assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set + ## + affinity: {} + ## @param ingest.priorityClassName Ingest pods Priority Class Name + ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass + ## + priorityClassName: "" + ## @param ingest.nodeSelector Ingest Node labels for pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + ## @param ingest.tolerations Ingest Tolerations for pod assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param ingest.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods + ## + topologySpreadConstraints: [] + ## Elasticsearch ingest container's resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## We usually recommend not to specify default resources and to leave this as a conscious + ## choice for the user. This also increases chances charts run on environments with little + ## resources, such as Minikube. If you do want to specify resources, uncomment the following + ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. + ## @param ingest.resources.limits The resources limits for the container + ## @param ingest.resources.requests [object] The requested resources for the container + ## + resources: + ## Example: + ## limits: + ## cpu: 100m + ## memory: 384Mi + limits: {} + requests: + cpu: 25m + memory: 256Mi + ## Elasticsearch ingest container's startup probe + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## @param ingest.startupProbe.enabled Enable/disable the startup probe (ingest nodes pod) + ## @param ingest.startupProbe.initialDelaySeconds Delay before startup probe is initiated (ingest nodes pod) + ## @param ingest.startupProbe.periodSeconds How often to perform the probe (ingest nodes pod) + ## @param ingest.startupProbe.timeoutSeconds When the probe times out (ingest nodes pod) + ## @param ingest.startupProbe.failureThreshold Minimum consecutive failures for the probe to be considered failed after having succeeded + ## @param ingest.startupProbe.successThreshold Minimum consecutive successes for the probe to be considered successful after having failed (ingest nodes pod) + ## + startupProbe: + enabled: false + initialDelaySeconds: 90 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## Elasticsearch ingest container's liveness probe + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## @param ingest.livenessProbe.enabled Enable/disable the liveness probe (ingest nodes pod) + ## @param ingest.livenessProbe.initialDelaySeconds Delay before liveness probe is initiated (ingest nodes pod) + ## @param ingest.livenessProbe.periodSeconds How often to perform the probe (ingest nodes pod) + ## @param ingest.livenessProbe.timeoutSeconds When the probe times out (ingest nodes pod) + ## @param ingest.livenessProbe.failureThreshold Minimum consecutive failures for the probe to be considered failed after having succeeded + ## @param ingest.livenessProbe.successThreshold Minimum consecutive successes for the probe to be considered successful after having failed (ingest nodes pod) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 90 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## Elasticsearch ingest container's readiness probe + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## @param ingest.readinessProbe.enabled Enable/disable the readiness probe (ingest nodes pod) + ## @param ingest.readinessProbe.initialDelaySeconds Delay before readiness probe is initiated (ingest nodes pod) + ## @param ingest.readinessProbe.periodSeconds How often to perform the probe (ingest nodes pod) + ## @param ingest.readinessProbe.timeoutSeconds When the probe times out (ingest nodes pod) + ## @param ingest.readinessProbe.failureThreshold Minimum consecutive failures for the probe to be considered failed after having succeeded + ## @param ingest.readinessProbe.successThreshold Minimum consecutive successes for the probe to be considered successful after having failed (ingest nodes pod) + ## + readinessProbe: + enabled: true + initialDelaySeconds: 90 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## @param ingest.customStartupProbe Override default startup probe + ## + customStartupProbe: {} + ## @param ingest.customLivenessProbe Override default liveness probe + ## + customLivenessProbe: {} + ## @param ingest.customReadinessProbe Override default readiness probe + ## + customReadinessProbe: {} + ## @param ingest.initContainers Extra init containers to add to the Elasticsearch ingest pod(s) + ## + initContainers: [] + ## @param ingest.sidecars Extra sidecar containers to add to the Elasticsearch ingest pod(s) + ## + sidecars: [] + ## Service parameters for ingest node(s) + ## + service: + ## @param ingest.service.type Kubernetes Service type (ingest nodes) + ## + type: ClusterIP + ## @param ingest.service.port Kubernetes Service port Elasticsearch transport port (ingest nodes) + ## + port: 9300 + ## @param ingest.service.nodePort Kubernetes Service nodePort (ingest nodes) + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + nodePort: "" + ## @param ingest.service.annotations Annotations for ingest nodes service + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + ## @param ingest.service.loadBalancerIP loadBalancerIP if ingest nodes service type is `LoadBalancer` + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + loadBalancerIP: "" + + ## Provide functionality to use RBAC + ## + serviceAccount: + ## @param ingest.serviceAccount.create Create a default serviceaccount for elasticsearch curator + ## + create: false + ## @param ingest.serviceAccount.name Name of the created serviceAccount + ## If not set and create is true, a name is generated using the fullname template + ## + name: "" + +## @section Curator parameters + +## Elasticsearch curator parameters +## +curator: + ## @param curator.enabled Enable Elasticsearch Curator cron job + enabled: false + ## @param curator.name Elasticsearch Curator pod name + ## + name: curator + ## @param curator.image.registry Elasticsearch Curator image registry + ## @param curator.image.repository Elasticsearch Curator image repository + ## @param curator.image.tag Elasticsearch Curator image tag + ## @param curator.image.pullPolicy Elasticsearch Curator image pull policy + ## @param curator.image.pullSecrets Elasticsearch Curator image pull secrets + ## + image: + registry: docker.io + repository: bitnami/elasticsearch-curator + tag: 5.8.4-debian-10-r215 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## @param curator.cronjob.schedule Schedule for the CronJob + ## @param curator.cronjob.annotations Annotations to add to the cronjob + ## @param curator.cronjob.concurrencyPolicy `Allow,Forbid,Replace` concurrent jobs + ## @param curator.cronjob.failedJobsHistoryLimit Specify the number of failed Jobs to keep + ## @param curator.cronjob.successfulJobsHistoryLimit Specify the number of completed Jobs to keep + ## @param curator.cronjob.jobRestartPolicy Control the Job restartPolicy + ## + cronjob: + ## At 01:00 every day + schedule: "0 1 * * *" + annotations: {} + concurrencyPolicy: "" + failedJobsHistoryLimit: "" + successfulJobsHistoryLimit: "" + jobRestartPolicy: Never + ## @param curator.schedulerName Name of the k8s scheduler (other than default) + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param curator.podAnnotations Annotations to add to the pod + ## + podAnnotations: {} + ## @param curator.podLabels Extra labels to add to Pod + ## + podLabels: {} + ## @param curator.podAffinityPreset Curator Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param curator.podAntiAffinityPreset Curator Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: "" + ## Node affinity preset + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## @param curator.nodeAffinityPreset.type Curator Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## @param curator.nodeAffinityPreset.key Curator Node label key to match Ignored if `affinity` is set. + ## @param curator.nodeAffinityPreset.values Curator Node label values to match. Ignored if `affinity` is set. + ## + nodeAffinityPreset: + type: "" + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param curator.initContainers Extra init containers to add to the Elasticsearch coordinating-only pod(s) + ## + initContainers: [] + ## @param curator.sidecars Extra sidecar containers to add to the Elasticsearch ingest pod(s) + ## + sidecars: [] + ## @param curator.affinity Curator Affinity for pod assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set + ## + affinity: {} + ## @param curator.nodeSelector Curator Node labels for pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + ## @param curator.tolerations Curator Tolerations for pod assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param curator.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods + ## + topologySpreadConstraints: [] + ## @param curator.rbac.enabled Enable RBAC resources + ## + rbac: + enabled: false + ## @param curator.serviceAccount.create Create a default serviceaccount for elasticsearch curator + ## @param curator.serviceAccount.name Name for elasticsearch curator serviceaccount + ## + serviceAccount: + create: true + ## If not set and create is true, a name is generated using the fullname template + ## + name: "" + ## @param curator.psp.create Whether to create a PodSecurityPolicy. WARNING: PodSecurityPolicy is deprecated in Kubernetes v1.21 or later, unavailable in v1.25 or later + ## + psp: + create: false + ## @param curator.hooks [object] Whether to run job on selected hooks + ## + hooks: + install: false + upgrade: false + ## @param curator.dryrun Run Curator in dry-run mode + ## + dryrun: false + ## @param curator.command Command to execute + ## + command: ["curator"] + ## @param curator.env Environment variables to add to the cronjob container + ## + env: {} + ## Curator configMaps + configMaps: + ## @param curator.configMaps.action_file_yml [string] Contents of the Curator action_file.yml + ## Delete indices older than 90 days + ## + action_file_yml: |- + --- + actions: + 1: + action: delete_indices + description: "Clean up ES by deleting old indices" + options: + timeout_override: + continue_if_exception: False + disable_action: False + ignore_empty_list: True + filters: + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: 90 + field: + stats_result: + epoch: + exclude: False + ## @param curator.configMaps.config_yml [string] Contents of the Curator config.yml (overrides config) + ## Default config (this value is evaluated as a template) + ## + config_yml: |- + --- + client: + hosts: + - {{ template "elasticsearch.coordinating.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + port: {{ .Values.coordinating.service.port }} + # url_prefix: + # use_ssl: True + # certificate: + # client_cert: + # client_key: + # ssl_no_validate: True + # http_auth: + # timeout: 30 + # master_only: False + # logging: + # loglevel: INFO + # logfile: + # logformat: default + # blacklist: ['elasticsearch', 'urllib3'] + ## Curator resources requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## We usually recommend not to specify default resources and to leave this as a conscious + ## choice for the user. This also increases chances charts run on environments with little + ## resources, such as Minikube. If you do want to specify resources, uncomment the following + ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. + ## @param curator.resources.limits The resources limits for the container + ## @param curator.resources.requests The requested resources for the container + ## + resources: + ## Example: + ## limits: + ## cpu: 100m + ## memory: 128Mi + limits: {} + ## Examples: + ## requests: + ## cpu: 100m + ## memory: 128Mi + requests: {} + ## @param curator.priorityClassName Curator Pods Priority Class Name + ## + priorityClassName: "" + ## @param curator.extraVolumes Extra volumes + ## Example Use Case: mount ssl certificates when elasticsearch has tls enabled + ## extraVolumes: + ## - name: es-certs + ## secret: + ## defaultMode: 420 + ## secretName: es-certs + extraVolumes: [] + ## @param curator.extraVolumeMounts Mount extra volume(s) + ## extraVolumeMounts: + ## - name: es-certs + ## mountPath: /certs + ## readOnly: true + extraVolumeMounts: [] + ## @param curator.extraInitContainers DEPRECATED. Use `curator.initContainers` instead. Init containers to add to the cronjob container + ## Don't configure S3 repository till Elasticsearch is reachable. + ## Ensure that it is available at http://elasticsearch:9200 + ## + ## elasticsearch-s3-repository: + ## image: bitnami/minideb + ## imagePullPolicy: "IfNotPresent" + ## command: + ## - "/bin/bash" + ## - "-c" + ## args: + ## - | + ## ES_HOST=elasticsearch + ## ES_PORT=9200 + ## ES_REPOSITORY=backup + ## S3_REGION=us-east-1 + ## S3_BUCKET=bucket + ## S3_BASE_PATH=backup + ## S3_COMPRESS=true + ## S3_STORAGE_CLASS=standard + ## install_packages curl && \ + ## ( counter=0; while (( counter++ < 120 )); do curl -s http://${ES_HOST}:${ES_PORT} >/dev/null 2>&1 && break; echo "Waiting for elasticsearch $counter/120"; sleep 1; done ) && \ + ## cat <