es mongodb

This commit is contained in:
marsal wang
2021-12-24 15:55:52 +08:00
parent 98c8ddd6f5
commit de34586660
77 changed files with 15072 additions and 0 deletions

View File

@ -44,6 +44,40 @@ helm upgrade -i rabbitmq-crm1 rabbitmq/ --values local-values/rabbitmq/crm1.yam
```
## redis
```
cd redis && helm dependency update && cd ..
helm upgrade -i redis-crm1 redis/ --values local-values/redis/crm1.yaml -n crm1
```
## mongodb
```
cd mongodb && helm dependency update && cd ..
helm upgrade -i mongodb-crm1 mongodb/ --values local-values/mongodb/crm1.yaml -n crm1
```
## elasticsearch
```
cd elasticsearch && helm dependency update && cd ..
helm upgrade -i elasticsearch-crm1 mongodb/ --values local-values/es/crm1.yaml -n crm1
```
## nacos
https://hub.fastgit.org/nacos-group/nacos-k8s.git

21
elasticsearch/.helmignore Normal file
View File

@ -0,0 +1,21 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj

9
elasticsearch/Chart.lock Normal file
View File

@ -0,0 +1,9 @@
dependencies:
- name: common
repository: https://charts.bitnami.com/bitnami
version: 1.10.3
- name: kibana
repository: https://charts.bitnami.com/bitnami
version: 9.1.6
digest: sha256:041be74b9823cc010a01721868fb21c97a05e6e1f5bf72df50d85fc7a7bdec4c
generated: "2021-12-19T20:26:11.528367846Z"

28
elasticsearch/Chart.yaml Normal file
View File

@ -0,0 +1,28 @@
annotations:
category: Analytics
apiVersion: v2
appVersion: 7.16.2
dependencies:
- name: common
repository: https://charts.bitnami.com/bitnami
tags:
- bitnami-common
version: 1.x.x
- condition: global.kibanaEnabled
name: kibana
repository: https://charts.bitnami.com/bitnami
version: 9.x.x
description: A highly scalable open-source full-text search and analytics engine
engine: gotpl
home: https://github.com/bitnami/charts/tree/master/bitnami/elasticsearch
icon: https://bitnami.com/assets/stacks/elasticsearch/img/elasticsearch-stack-220x234.png
keywords:
- elasticsearch
maintainers:
- email: containers@bitnami.com
name: Bitnami
name: elasticsearch
sources:
- https://github.com/bitnami/bitnami-docker-elasticsearch
- https://www.elastic.co/products/elasticsearch
version: 17.5.5

822
elasticsearch/README.md Normal file
View File

@ -0,0 +1,822 @@
# Elasticsearch
[Elasticsearch](https://www.elastic.co/products/elasticsearch) is a highly scalable open-source full-text search and analytics engine. It allows you to store, search, and analyze big volumes of data quickly and in near real time.
## TL;DR
```console
$ helm repo add bitnami https://charts.bitnami.com/bitnami
$ helm install my-release bitnami/elasticsearch
```
## Introduction
This chart bootstraps a [Elasticsearch](https://github.com/bitnami/bitnami-docker-elasticsearch) deployment on a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This Helm chart has been tested on top of [Bitnami Kubernetes Production Runtime](https://kubeprod.io/) (BKPR). Deploy BKPR to get automated TLS certificates, logging and monitoring for your applications.
## Prerequisites
- Kubernetes 1.12+
- Helm 3.1.0
- PV provisioner support in the underlying infrastructure
## Installing the Chart
To install the chart with the release name `my-release`:
```console
$ helm repo add bitnami https://charts.bitnami.com/bitnami
$ helm install my-release bitnami/elasticsearch
```
These commands deploy Elasticsearch on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation.
> **Tip**: List all releases using `helm list`
## Uninstalling the Chart
To uninstall/delete the `my-release` release:
```console
$ helm delete my-release
```
The command removes all the Kubernetes components associated with the chart and deletes the release. Remove also the chart using `--purge` option:
```console
$ helm delete --purge my-release
```
## Parameters
### Global parameters
| Name | Description | Value |
| -------------------------- | ------------------------------------------------------------------ | ------------------- |
| `global.imageRegistry` | Global Docker image registry | `""` |
| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` |
| `global.storageClass` | Global StorageClass for Persistent Volume(s) | `""` |
| `global.coordinating.name` | Coordinating name to be used in the Kibana subchart (service name) | `coordinating-only` |
| `global.kibanaEnabled` | Whether or not to enable Kibana | `false` |
### Common parameters
| Name | Description | Value |
| ------------------------ | -------------------------------------------------------------------------------------------- | --------------- |
| `nameOverride` | String to partially override common.names.fullname template (will maintain the release name) | `""` |
| `fullnameOverride` | String to fully override common.names.fullname template | `""` |
| `clusterDomain` | Kubernetes cluster domain | `cluster.local` |
| `extraDeploy` | Array of extra objects to deploy with the release | `[]` |
| `diagnosticMode.enabled` | Enable diagnostic mode (all probes will be disabled and the command will be overridden) | `false` |
| `diagnosticMode.command` | Command to override all containers in the deployment | `["sleep"]` |
| `diagnosticMode.args` | Args to override all containers in the deployment | `["infinity"]` |
### Elasticsearch parameters
| Name | Description | Value |
| ------------------------------------------ | --------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------ |
| `image.registry` | Elasticsearch image registry | `docker.io` |
| `image.repository` | Elasticsearch image repository | `bitnami/elasticsearch` |
| `image.tag` | Elasticsearch image tag (immutable tags are recommended) | `7.15.2-debian-10-r10` |
| `image.pullPolicy` | Elasticsearch image pull policy | `IfNotPresent` |
| `image.pullSecrets` | Elasticsearch image pull secrets | `[]` |
| `image.debug` | Enable image debug mode | `false` |
| `security.enabled` | Enable X-Pack Security settings | `false` |
| `security.elasticPassword` | Password for 'elastic' user | `""` |
| `security.existingSecret` | Name of the existing secret containing the Elasticsearch password | `""` |
| `security.fipsMode` | Configure elasticsearch with FIPS 140 compliant mode | `false` |
| `security.tls.restEncryption` | Enable SSL/TLS encryption for Elasticsearch REST API. | `true` |
| `security.tls.autoGenerated` | Create self-signed TLS certificates. | `false` |
| `security.tls.verificationMode` | Verification mode for SSL communications. | `full` |
| `security.tls.master.existingSecret` | Existing secret containing the certificates for the master nodes | `""` |
| `security.tls.data.existingSecret` | Existing secret containing the certificates for the data nodes | `""` |
| `security.tls.ingest.existingSecret` | Existing secret containing the certificates for the ingest nodes | `""` |
| `security.tls.coordinating.existingSecret` | Existing secret containing the certificates for the coordinating nodes | `""` |
| `security.tls.keystorePassword` | Password to access the JKS/PKCS12 keystore or PEM key when they are password-protected. | `""` |
| `security.tls.truststorePassword` | Password to access the JKS/PKCS12 truststore when they are password-protected. | `""` |
| `security.tls.keystoreFilename` | Name of the keystore file | `elasticsearch.keystore.jks` |
| `security.tls.truststoreFilename` | Name of the truststore | `elasticsearch.truststore.jks` |
| `security.tls.usePemCerts` | Use this variable if your secrets contain PEM certificates instead of JKS/PKCS12 | `false` |
| `security.tls.keyPassword` | Password to access the PEM key when they are password-protected. | `""` |
| `name` | Elasticsearch cluster name | `elastic` |
| `plugins` | Comma, semi-colon or space separated list of plugins to install at initialization | `""` |
| `snapshotRepoPath` | File System snapshot repository path | `""` |
| `config` | Override elasticsearch configuration | `{}` |
| `extraConfig` | Append extra configuration to the elasticsearch node configuration | `{}` |
| `extraVolumes` | A list of volumes to be added to the pod | `[]` |
| `extraVolumeMounts` | A list of volume mounts to be added to the pod | `[]` |
| `initScripts` | Dictionary of init scripts. Evaluated as a template. | `{}` |
| `initScriptsCM` | ConfigMap with the init scripts. Evaluated as a template. | `""` |
| `initScriptsSecret` | Secret containing `/docker-entrypoint-initdb.d` scripts to be executed at initialization time that contain sensitive data. Evaluated as a template. | `""` |
| `extraEnvVars` | Array containing extra env vars to be added to all pods (evaluated as a template) | `[]` |
| `extraEnvVarsConfigMap` | ConfigMap containing extra env vars to be added to all pods (evaluated as a template) | `""` |
| `extraEnvVarsSecret` | Secret containing extra env vars to be added to all pods (evaluated as a template) | `""` |
### Master parameters
| Name | Description | Value |
| ------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------- |
| `master.name` | Master-eligible node pod name | `master` |
| `master.fullnameOverride` | String to fully override elasticsearch.master.fullname template with a string | `""` |
| `master.replicas` | Desired number of Elasticsearch master-eligible nodes. Consider using an odd number of master nodes to prevent "split brain" situation. See: https://www.elastic.co/guide/en/elasticsearch/reference/7.x/modules-discovery-voting.html | `3` |
| `master.updateStrategy.type` | Update strategy for Master statefulset | `RollingUpdate` |
| `master.hostAliases` | Add deployment host aliases | `[]` |
| `master.schedulerName` | Name of the k8s scheduler (other than default) | `""` |
| `master.heapSize` | Master-eligible node heap size | `128m` |
| `master.podAnnotations` | Annotations for master-eligible pods. | `{}` |
| `master.podLabels` | Extra labels to add to Pod | `{}` |
| `master.securityContext.enabled` | Enable security context for master-eligible pods | `true` |
| `master.securityContext.fsGroup` | Group ID for the container for master-eligible pods | `1001` |
| `master.securityContext.runAsUser` | User ID for the container for master-eligible pods | `1001` |
| `master.podSecurityContext.enabled` | Enable security context for master-eligible pods | `false` |
| `master.podSecurityContext.fsGroup` | Group ID for the container for master-eligible pods | `1001` |
| `master.containerSecurityContext.enabled` | Enable security context for master-eligible pods | `false` |
| `master.containerSecurityContext.runAsUser` | User ID for the container for master-eligible pods | `1001` |
| `master.podAffinityPreset` | Master-eligible Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` |
| `master.podAntiAffinityPreset` | Master-eligible Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` |
| `master.nodeAffinityPreset.type` | Master-eligible Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` |
| `master.nodeAffinityPreset.key` | Master-eligible Node label key to match Ignored if `affinity` is set. | `""` |
| `master.nodeAffinityPreset.values` | Master-eligible Node label values to match. Ignored if `affinity` is set. | `[]` |
| `master.affinity` | Master-eligible Affinity for pod assignment | `{}` |
| `master.priorityClassName` | Master pods Priority Class Name | `""` |
| `master.nodeSelector` | Master-eligible Node labels for pod assignment | `{}` |
| `master.tolerations` | Master-eligible Tolerations for pod assignment | `[]` |
| `master.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` |
| `master.resources.limits` | The resources limits for the container | `{}` |
| `master.resources.requests` | The requested resources for the container | `{}` |
| `master.startupProbe.enabled` | Enable/disable the startup probe (master nodes pod) | `false` |
| `master.startupProbe.initialDelaySeconds` | Delay before startup probe is initiated (master nodes pod) | `90` |
| `master.startupProbe.periodSeconds` | How often to perform the probe (master nodes pod) | `10` |
| `master.startupProbe.timeoutSeconds` | When the probe times out (master nodes pod) | `5` |
| `master.startupProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (master nodes pod) | `1` |
| `master.startupProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` |
| `master.livenessProbe.enabled` | Enable/disable the liveness probe (master-eligible nodes pod) | `true` |
| `master.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (master-eligible nodes pod) | `90` |
| `master.livenessProbe.periodSeconds` | How often to perform the probe (master-eligible nodes pod) | `10` |
| `master.livenessProbe.timeoutSeconds` | When the probe times out (master-eligible nodes pod) | `5` |
| `master.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (master-eligible nodes pod) | `1` |
| `master.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` |
| `master.readinessProbe.enabled` | Enable/disable the readiness probe (master-eligible nodes pod) | `true` |
| `master.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated (master-eligible nodes pod) | `90` |
| `master.readinessProbe.periodSeconds` | How often to perform the probe (master-eligible nodes pod) | `10` |
| `master.readinessProbe.timeoutSeconds` | When the probe times out (master-eligible nodes pod) | `5` |
| `master.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (master-eligible nodes pod) | `1` |
| `master.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` |
| `master.customStartupProbe` | Override default startup probe | `{}` |
| `master.customLivenessProbe` | Override default liveness probe | `{}` |
| `master.customReadinessProbe` | Override default readiness probe | `{}` |
| `master.initContainers` | Extra init containers to add to the Elasticsearch master-eligible pod(s) | `[]` |
| `master.sidecars` | Extra sidecar containers to add to the Elasticsearch master-eligible pod(s) | `[]` |
| `master.persistence.enabled` | Enable persistence using a `PersistentVolumeClaim` | `true` |
| `master.persistence.storageClass` | Persistent Volume Storage Class | `""` |
| `master.persistence.existingClaim` | Existing Persistent Volume Claim | `""` |
| `master.persistence.existingVolume` | Existing Persistent Volume for use as volume match label selector to the `volumeClaimTemplate`. Ignored when `master.persistence.selector` is set. | `""` |
| `master.persistence.selector` | Configure custom selector for existing Persistent Volume. Overwrites `master.persistence.existingVolume` | `{}` |
| `master.persistence.annotations` | Persistent Volume Claim annotations | `{}` |
| `master.persistence.accessModes` | Persistent Volume Access Modes | `["ReadWriteOnce"]` |
| `master.persistence.size` | Persistent Volume Size | `8Gi` |
| `master.service.type` | Kubernetes Service type (master-eligible nodes) | `ClusterIP` |
| `master.service.port` | Kubernetes Service port for Elasticsearch transport port (master-eligible nodes) | `9300` |
| `master.service.nodePort` | Kubernetes Service nodePort (master-eligible nodes) | `""` |
| `master.service.annotations` | Annotations for master-eligible nodes service | `{}` |
| `master.service.loadBalancerIP` | loadBalancerIP if master-eligible nodes service type is `LoadBalancer` | `""` |
| `master.serviceAccount.create` | Enable creation of ServiceAccount for the master node | `false` |
| `master.serviceAccount.name` | Name of the created serviceAccount | `""` |
| `master.autoscaling.enabled` | Enable autoscaling for master replicas | `false` |
| `master.autoscaling.minReplicas` | Minimum number of master replicas | `2` |
| `master.autoscaling.maxReplicas` | Maximum number of master replicas | `11` |
| `master.autoscaling.targetCPU` | Target CPU utilization percentage for master replica autoscaling | `""` |
| `master.autoscaling.targetMemory` | Target Memory utilization percentage for master replica autoscaling | `""` |
### Coordinating parameters
| Name | Description | Value |
| ------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------- | --------------- |
| `coordinating.fullnameOverride` | String to fully override elasticsearch.coordinating.fullname template with a string | `""` |
| `coordinating.replicas` | Desired number of Elasticsearch coordinating-only nodes | `2` |
| `coordinating.hostAliases` | Add deployment host aliases | `[]` |
| `coordinating.schedulerName` | Name of the k8s scheduler (other than default) | `""` |
| `coordinating.updateStrategy.type` | Update strategy for Coordinating Statefulset | `RollingUpdate` |
| `coordinating.heapSize` | Coordinating-only node heap size | `128m` |
| `coordinating.podAnnotations` | Annotations for coordinating pods. | `{}` |
| `coordinating.podLabels` | Extra labels to add to Pod | `{}` |
| `coordinating.securityContext.enabled` | Enable security context for coordinating-only pods | `true` |
| `coordinating.securityContext.fsGroup` | Group ID for the container for coordinating-only pods | `1001` |
| `coordinating.securityContext.runAsUser` | User ID for the container for coordinating-only pods | `1001` |
| `coordinating.podSecurityContext.enabled` | Enable security context for coordinating pods | `false` |
| `coordinating.podSecurityContext.fsGroup` | Group ID for the container for coordinating pods | `1001` |
| `coordinating.containerSecurityContext.enabled` | Enable security context for coordinating pods | `false` |
| `coordinating.containerSecurityContext.runAsUser` | User ID for the container for coordinating pods | `1001` |
| `coordinating.podAffinityPreset` | Coordinating Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` |
| `coordinating.podAntiAffinityPreset` | Coordinating Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` |
| `coordinating.nodeAffinityPreset.type` | Coordinating Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` |
| `coordinating.nodeAffinityPreset.key` | Coordinating Node label key to match Ignored if `affinity` is set. | `""` |
| `coordinating.nodeAffinityPreset.values` | Coordinating Node label values to match. Ignored if `affinity` is set. | `[]` |
| `coordinating.affinity` | Coordinating Affinity for pod assignment | `{}` |
| `coordinating.priorityClassName` | Coordinating pods Priority Class Name | `""` |
| `coordinating.nodeSelector` | Coordinating Node labels for pod assignment | `{}` |
| `coordinating.tolerations` | Coordinating Tolerations for pod assignment | `[]` |
| `coordinating.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` |
| `coordinating.resources.limits` | The resources limits for the container | `{}` |
| `coordinating.resources.requests` | The requested resources for the container | `{}` |
| `coordinating.startupProbe.enabled` | Enable/disable the startup probe (coordinating nodes pod) | `false` |
| `coordinating.startupProbe.initialDelaySeconds` | Delay before startup probe is initiated (coordinating nodes pod) | `90` |
| `coordinating.startupProbe.periodSeconds` | How often to perform the probe (coordinating nodes pod) | `10` |
| `coordinating.startupProbe.timeoutSeconds` | When the probe times out (coordinating nodes pod) | `5` |
| `coordinating.startupProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` |
| `coordinating.startupProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (coordinating nodes pod) | `1` |
| `coordinating.livenessProbe.enabled` | Enable/disable the liveness probe (coordinating-only nodes pod) | `true` |
| `coordinating.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (coordinating-only nodes pod) | `90` |
| `coordinating.livenessProbe.periodSeconds` | How often to perform the probe (coordinating-only nodes pod) | `10` |
| `coordinating.livenessProbe.timeoutSeconds` | When the probe times out (coordinating-only nodes pod) | `5` |
| `coordinating.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` |
| `coordinating.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (coordinating-only nodes pod) | `1` |
| `coordinating.readinessProbe.enabled` | Enable/disable the readiness probe (coordinating-only nodes pod) | `true` |
| `coordinating.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated (coordinating-only nodes pod) | `90` |
| `coordinating.readinessProbe.periodSeconds` | How often to perform the probe (coordinating-only nodes pod) | `10` |
| `coordinating.readinessProbe.timeoutSeconds` | When the probe times out (coordinating-only nodes pod) | `5` |
| `coordinating.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` |
| `coordinating.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (coordinating-only nodes pod) | `1` |
| `coordinating.customStartupProbe` | Override default startup probe | `{}` |
| `coordinating.customLivenessProbe` | Override default liveness probe | `{}` |
| `coordinating.customReadinessProbe` | Override default readiness probe | `{}` |
| `coordinating.initContainers` | Extra init containers to add to the Elasticsearch coordinating-only pod(s) | `[]` |
| `coordinating.sidecars` | Extra sidecar containers to add to the Elasticsearch coordinating-only pod(s) | `[]` |
| `coordinating.service.type` | Kubernetes Service type (coordinating-only nodes) | `ClusterIP` |
| `coordinating.service.port` | Kubernetes Service port for REST API (coordinating-only nodes) | `9200` |
| `coordinating.service.nodePort` | Kubernetes Service nodePort (coordinating-only nodes) | `""` |
| `coordinating.service.annotations` | Annotations for coordinating-only nodes service | `{}` |
| `coordinating.service.loadBalancerIP` | loadBalancerIP if coordinating-only nodes service type is `LoadBalancer` | `""` |
| `coordinating.service.externalTrafficPolicy` | Set `externalTrafficPolicy` to `Local` to enable client source IP preservation | `Cluster` |
| `coordinating.serviceAccount.create` | Enable creation of ServiceAccount for the coordinating-only node | `false` |
| `coordinating.serviceAccount.name` | Name of the created serviceAccount | `""` |
| `coordinating.autoscaling.enabled` | Enable autoscaling for coordinating replicas | `false` |
| `coordinating.autoscaling.minReplicas` | Minimum number of coordinating replicas | `2` |
| `coordinating.autoscaling.maxReplicas` | Maximum number of coordinating replicas | `11` |
| `coordinating.autoscaling.targetCPU` | Target CPU utilization percentage for coordinating replica autoscaling | `""` |
| `coordinating.autoscaling.targetMemory` | Target Memory utilization percentage for coordinating replica autoscaling | `""` |
### Data parameters
| Name | Description | Value |
| -------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------- |
| `data.name` | Data node pod name | `data` |
| `data.fullnameOverride` | String to fully override elasticsearch.data.fullname template with a string | `""` |
| `data.replicas` | Desired number of Elasticsearch data nodes | `2` |
| `data.hostAliases` | Add deployment host aliases | `[]` |
| `data.schedulerName` | Name of the k8s scheduler (other than default) | `""` |
| `data.updateStrategy.type` | Update strategy for Data statefulset | `RollingUpdate` |
| `data.updateStrategy.rollingUpdatePartition` | Partition update strategy for Data statefulset | `""` |
| `data.heapSize` | Data node heap size | `1024m` |
| `data.podAnnotations` | Annotations for data pods. | `{}` |
| `data.podLabels` | Extra labels to add to Pod | `{}` |
| `data.securityContext.enabled` | Enable security context for data pods | `true` |
| `data.securityContext.fsGroup` | Group ID for the container for data pods | `1001` |
| `data.securityContext.runAsUser` | User ID for the container for data pods | `1001` |
| `data.podSecurityContext.enabled` | Enable security context for data pods | `false` |
| `data.podSecurityContext.fsGroup` | Group ID for the container for data pods | `1001` |
| `data.containerSecurityContext.enabled` | Enable security context for data pods | `false` |
| `data.containerSecurityContext.runAsUser` | User ID for the container for data pods | `1001` |
| `data.podAffinityPreset` | Data Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` |
| `data.podAntiAffinityPreset` | Data Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` |
| `data.nodeAffinityPreset.type` | Data Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` |
| `data.nodeAffinityPreset.key` | Data Node label key to match Ignored if `affinity` is set. | `""` |
| `data.nodeAffinityPreset.values` | Data Node label values to match. Ignored if `affinity` is set. | `[]` |
| `data.affinity` | Data Affinity for pod assignment | `{}` |
| `data.priorityClassName` | Data pods Priority Class Name | `""` |
| `data.nodeSelector` | Data Node labels for pod assignment | `{}` |
| `data.tolerations` | Data Tolerations for pod assignment | `[]` |
| `data.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` |
| `data.resources.limits` | The resources limits for the container | `{}` |
| `data.resources.requests` | The requested resources for the container | `{}` |
| `data.startupProbe.enabled` | Enable/disable the startup probe (data nodes pod) | `false` |
| `data.startupProbe.initialDelaySeconds` | Delay before startup probe is initiated (data nodes pod) | `90` |
| `data.startupProbe.periodSeconds` | How often to perform the probe (data nodes pod) | `10` |
| `data.startupProbe.timeoutSeconds` | When the probe times out (data nodes pod) | `5` |
| `data.startupProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` |
| `data.startupProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (data nodes pod) | `1` |
| `data.livenessProbe.enabled` | Enable/disable the liveness probe (data nodes pod) | `true` |
| `data.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (data nodes pod) | `90` |
| `data.livenessProbe.periodSeconds` | How often to perform the probe (data nodes pod) | `10` |
| `data.livenessProbe.timeoutSeconds` | When the probe times out (data nodes pod) | `5` |
| `data.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` |
| `data.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (data nodes pod) | `1` |
| `data.readinessProbe.enabled` | Enable/disable the readiness probe (data nodes pod) | `true` |
| `data.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated (data nodes pod) | `90` |
| `data.readinessProbe.periodSeconds` | How often to perform the probe (data nodes pod) | `10` |
| `data.readinessProbe.timeoutSeconds` | When the probe times out (data nodes pod) | `5` |
| `data.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` |
| `data.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (data nodes pod) | `1` |
| `data.customStartupProbe` | Override default startup probe | `{}` |
| `data.customLivenessProbe` | Override default liveness probe | `{}` |
| `data.customReadinessProbe` | Override default readiness probe | `{}` |
| `data.initContainers` | Extra init containers to add to the Elasticsearch data pod(s) | `[]` |
| `data.sidecars` | Extra sidecar containers to add to the Elasticsearch data pod(s) | `[]` |
| `data.persistence.enabled` | Enable persistence using a `PersistentVolumeClaim` | `true` |
| `data.persistence.storageClass` | Persistent Volume Storage Class | `""` |
| `data.persistence.existingClaim` | Existing Persistent Volume Claim | `""` |
| `data.persistence.existingVolume` | Existing Persistent Volume for use as volume match label selector to the `volumeClaimTemplate`. Ignored when `data.persistence.selector` ist set. | `""` |
| `data.persistence.selector` | Configure custom selector for existing Persistent Volume. Overwrites `data.persistence.existingVolume` | `{}` |
| `data.persistence.annotations` | Persistent Volume Claim annotations | `{}` |
| `data.persistence.accessModes` | Persistent Volume Access Modes | `["ReadWriteOnce"]` |
| `data.persistence.size` | Persistent Volume Size | `8Gi` |
| `data.serviceAccount.create` | Enable creation of ServiceAccount for the data node | `false` |
| `data.serviceAccount.name` | Name of the created serviceAccount | `""` |
| `data.autoscaling.enabled` | Enable autoscaling for data replicas | `false` |
| `data.autoscaling.minReplicas` | Minimum number of data replicas | `2` |
| `data.autoscaling.maxReplicas` | Maximum number of data replicas | `11` |
| `data.autoscaling.targetCPU` | Target CPU utilization percentage for data replica autoscaling | `""` |
| `data.autoscaling.targetMemory` | Target Memory utilization percentage for data replica autoscaling | `""` |
### Ingest parameters
| Name | Description | Value |
| ------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------ | --------------- |
| `ingest.enabled` | Enable ingest nodes | `false` |
| `ingest.name` | Ingest node pod name | `ingest` |
| `ingest.fullnameOverride` | String to fully override elasticsearch.ingest.fullname template with a string | `""` |
| `ingest.replicas` | Desired number of Elasticsearch ingest nodes | `2` |
| `ingest.updateStrategy.type` | Update strategy for Ingest statefulset | `RollingUpdate` |
| `ingest.heapSize` | Ingest node heap size | `128m` |
| `ingest.podAnnotations` | Annotations for ingest pods. | `{}` |
| `ingest.hostAliases` | Add deployment host aliases | `[]` |
| `ingest.schedulerName` | Name of the k8s scheduler (other than default) | `""` |
| `ingest.podLabels` | Extra labels to add to Pod | `{}` |
| `ingest.securityContext.enabled` | Enable security context for ingest pods | `true` |
| `ingest.securityContext.fsGroup` | Group ID for the container for ingest pods | `1001` |
| `ingest.securityContext.runAsUser` | User ID for the container for ingest pods | `1001` |
| `ingest.podSecurityContext.enabled` | Enable security context for ingest pods | `false` |
| `ingest.podSecurityContext.fsGroup` | Group ID for the container for ingest pods | `1001` |
| `ingest.containerSecurityContext.enabled` | Enable security context for data pods | `false` |
| `ingest.containerSecurityContext.runAsUser` | User ID for the container for data pods | `1001` |
| `ingest.podAffinityPreset` | Ingest Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` |
| `ingest.podAntiAffinityPreset` | Ingest Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` |
| `ingest.nodeAffinityPreset.type` | Ingest Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` |
| `ingest.nodeAffinityPreset.key` | Ingest Node label key to match Ignored if `affinity` is set. | `""` |
| `ingest.nodeAffinityPreset.values` | Ingest Node label values to match. Ignored if `affinity` is set. | `[]` |
| `ingest.affinity` | Ingest Affinity for pod assignment | `{}` |
| `ingest.priorityClassName` | Ingest pods Priority Class Name | `""` |
| `ingest.nodeSelector` | Ingest Node labels for pod assignment | `{}` |
| `ingest.tolerations` | Ingest Tolerations for pod assignment | `[]` |
| `ingest.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` |
| `ingest.resources.limits` | The resources limits for the container | `{}` |
| `ingest.resources.requests` | The requested resources for the container | `{}` |
| `ingest.startupProbe.enabled` | Enable/disable the startup probe (ingest nodes pod) | `false` |
| `ingest.startupProbe.initialDelaySeconds` | Delay before startup probe is initiated (ingest nodes pod) | `90` |
| `ingest.startupProbe.periodSeconds` | How often to perform the probe (ingest nodes pod) | `10` |
| `ingest.startupProbe.timeoutSeconds` | When the probe times out (ingest nodes pod) | `5` |
| `ingest.startupProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` |
| `ingest.startupProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (ingest nodes pod) | `1` |
| `ingest.livenessProbe.enabled` | Enable/disable the liveness probe (ingest nodes pod) | `true` |
| `ingest.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (ingest nodes pod) | `90` |
| `ingest.livenessProbe.periodSeconds` | How often to perform the probe (ingest nodes pod) | `10` |
| `ingest.livenessProbe.timeoutSeconds` | When the probe times out (ingest nodes pod) | `5` |
| `ingest.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` |
| `ingest.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (ingest nodes pod) | `1` |
| `ingest.readinessProbe.enabled` | Enable/disable the readiness probe (ingest nodes pod) | `true` |
| `ingest.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated (ingest nodes pod) | `90` |
| `ingest.readinessProbe.periodSeconds` | How often to perform the probe (ingest nodes pod) | `10` |
| `ingest.readinessProbe.timeoutSeconds` | When the probe times out (ingest nodes pod) | `5` |
| `ingest.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` |
| `ingest.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (ingest nodes pod) | `1` |
| `ingest.customStartupProbe` | Override default startup probe | `{}` |
| `ingest.customLivenessProbe` | Override default liveness probe | `{}` |
| `ingest.customReadinessProbe` | Override default readiness probe | `{}` |
| `ingest.initContainers` | Extra init containers to add to the Elasticsearch ingest pod(s) | `[]` |
| `ingest.sidecars` | Extra sidecar containers to add to the Elasticsearch ingest pod(s) | `[]` |
| `ingest.service.type` | Kubernetes Service type (ingest nodes) | `ClusterIP` |
| `ingest.service.port` | Kubernetes Service port Elasticsearch transport port (ingest nodes) | `9300` |
| `ingest.service.nodePort` | Kubernetes Service nodePort (ingest nodes) | `""` |
| `ingest.service.annotations` | Annotations for ingest nodes service | `{}` |
| `ingest.service.loadBalancerIP` | loadBalancerIP if ingest nodes service type is `LoadBalancer` | `""` |
| `ingest.serviceAccount.create` | Create a default serviceaccount for elasticsearch curator | `false` |
| `ingest.serviceAccount.name` | Name of the created serviceAccount | `""` |
### Curator parameters
| Name | Description | Value |
| -------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------- |
| `curator.enabled` | Enable Elasticsearch Curator cron job | `false` |
| `curator.name` | Elasticsearch Curator pod name | `curator` |
| `curator.image.registry` | Elasticsearch Curator image registry | `docker.io` |
| `curator.image.repository` | Elasticsearch Curator image repository | `bitnami/elasticsearch-curator` |
| `curator.image.tag` | Elasticsearch Curator image tag | `5.8.4-debian-10-r190` |
| `curator.image.pullPolicy` | Elasticsearch Curator image pull policy | `IfNotPresent` |
| `curator.image.pullSecrets` | Elasticsearch Curator image pull secrets | `[]` |
| `curator.cronjob.schedule` | Schedule for the CronJob | `0 1 * * *` |
| `curator.cronjob.annotations` | Annotations to add to the cronjob | `{}` |
| `curator.cronjob.concurrencyPolicy` | `Allow,Forbid,Replace` concurrent jobs | `""` |
| `curator.cronjob.failedJobsHistoryLimit` | Specify the number of failed Jobs to keep | `""` |
| `curator.cronjob.successfulJobsHistoryLimit` | Specify the number of completed Jobs to keep | `""` |
| `curator.cronjob.jobRestartPolicy` | Control the Job restartPolicy | `Never` |
| `curator.schedulerName` | Name of the k8s scheduler (other than default) | `""` |
| `curator.podAnnotations` | Annotations to add to the pod | `{}` |
| `curator.podLabels` | Extra labels to add to Pod | `{}` |
| `curator.podAffinityPreset` | Curator Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` |
| `curator.podAntiAffinityPreset` | Curator Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` |
| `curator.nodeAffinityPreset.type` | Curator Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` |
| `curator.nodeAffinityPreset.key` | Curator Node label key to match Ignored if `affinity` is set. | `""` |
| `curator.nodeAffinityPreset.values` | Curator Node label values to match. Ignored if `affinity` is set. | `[]` |
| `curator.initContainers` | Extra init containers to add to the Elasticsearch coordinating-only pod(s) | `[]` |
| `curator.sidecars` | Extra sidecar containers to add to the Elasticsearch ingest pod(s) | `[]` |
| `curator.affinity` | Curator Affinity for pod assignment | `{}` |
| `curator.nodeSelector` | Curator Node labels for pod assignment | `{}` |
| `curator.tolerations` | Curator Tolerations for pod assignment | `[]` |
| `curator.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` |
| `curator.rbac.enabled` | Enable RBAC resources | `false` |
| `curator.serviceAccount.create` | Create a default serviceaccount for elasticsearch curator | `true` |
| `curator.serviceAccount.name` | Name for elasticsearch curator serviceaccount | `""` |
| `curator.psp.create` | Whether to create a PodSecurityPolicy. WARNING: PodSecurityPolicy is deprecated in Kubernetes v1.21 or later, unavailable in v1.25 or later | `false` |
| `curator.hooks` | Whether to run job on selected hooks | `{}` |
| `curator.dryrun` | Run Curator in dry-run mode | `false` |
| `curator.command` | Command to execute | `["curator"]` |
| `curator.env` | Environment variables to add to the cronjob container | `{}` |
| `curator.configMaps.action_file_yml` | Contents of the Curator action_file.yml | `""` |
| `curator.configMaps.config_yml` | Contents of the Curator config.yml (overrides config) | `""` |
| `curator.resources.limits` | The resources limits for the container | `{}` |
| `curator.resources.requests` | The requested resources for the container | `{}` |
| `curator.priorityClassName` | Curator Pods Priority Class Name | `""` |
| `curator.extraVolumes` | Extra volumes | `[]` |
| `curator.extraVolumeMounts` | Mount extra volume(s) | `[]` |
| `curator.extraInitContainers` | DEPRECATED. Use `curator.initContainers` instead. Init containers to add to the cronjob container | `[]` |
### Metrics parameters
| Name | Description | Value |
| -------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------ | -------------------------------- |
| `metrics.enabled` | Enable prometheus exporter | `false` |
| `metrics.name` | Metrics pod name | `metrics` |
| `metrics.image.registry` | Metrics exporter image registry | `docker.io` |
| `metrics.image.repository` | Metrics exporter image repository | `bitnami/elasticsearch-exporter` |
| `metrics.image.tag` | Metrics exporter image tag | `1.3.0-debian-10-r31` |
| `metrics.image.pullPolicy` | Metrics exporter image pull policy | `IfNotPresent` |
| `metrics.image.pullSecrets` | Metrics exporter image pull secrets | `[]` |
| `metrics.extraArgs` | Extra arguments to add to the default exporter command | `[]` |
| `metrics.hostAliases` | Add deployment host aliases | `[]` |
| `metrics.schedulerName` | Name of the k8s scheduler (other than default) | `""` |
| `metrics.service.type` | Metrics exporter endpoint service type | `ClusterIP` |
| `metrics.service.annotations` | Provide any additional annotations which may be required. | `{}` |
| `metrics.podAffinityPreset` | Metrics Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` |
| `metrics.podAntiAffinityPreset` | Metrics Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` |
| `metrics.nodeAffinityPreset.type` | Metrics Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` |
| `metrics.nodeAffinityPreset.key` | Metrics Node label key to match Ignored if `affinity` is set. | `""` |
| `metrics.nodeAffinityPreset.values` | Metrics Node label values to match. Ignored if `affinity` is set. | `[]` |
| `metrics.affinity` | Metrics Affinity for pod assignment | `{}` |
| `metrics.nodeSelector` | Metrics Node labels for pod assignment | `{}` |
| `metrics.tolerations` | Metrics Tolerations for pod assignment | `[]` |
| `metrics.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` |
| `metrics.resources.limits` | The resources limits for the container | `{}` |
| `metrics.resources.requests` | The requested resources for the container | `{}` |
| `metrics.livenessProbe.enabled` | Enable/disable the liveness probe (metrics pod) | `true` |
| `metrics.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (metrics pod) | `60` |
| `metrics.livenessProbe.periodSeconds` | How often to perform the probe (metrics pod) | `10` |
| `metrics.livenessProbe.timeoutSeconds` | When the probe times out (metrics pod) | `5` |
| `metrics.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` |
| `metrics.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (metrics pod) | `1` |
| `metrics.readinessProbe.enabled` | Enable/disable the readiness probe (metrics pod) | `true` |
| `metrics.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated (metrics pod) | `5` |
| `metrics.readinessProbe.periodSeconds` | How often to perform the probe (metrics pod) | `10` |
| `metrics.readinessProbe.timeoutSeconds` | When the probe times out (metrics pod) | `1` |
| `metrics.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` |
| `metrics.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (metrics pod) | `1` |
| `metrics.podAnnotations` | Metrics exporter pod Annotation and Labels | `{}` |
| `metrics.podLabels` | Extra labels to add to Pod | `{}` |
| `metrics.serviceMonitor.enabled` | if `true`, creates a Prometheus Operator ServiceMonitor (also requires `metrics.enabled` to be `true`) | `false` |
| `metrics.serviceMonitor.namespace` | Namespace in which Prometheus is running | `""` |
| `metrics.serviceMonitor.interval` | Interval at which metrics should be scraped. | `""` |
| `metrics.serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `""` |
| `metrics.serviceMonitor.selector` | Prometheus instance selector labels | `{}` |
### Sysctl Image parameters
| Name | Description | Value |
| -------------------------------- | ------------------------------------------- | ----------------------- |
| `sysctlImage.enabled` | Enable kernel settings modifier image | `true` |
| `sysctlImage.registry` | Kernel settings modifier image registry | `docker.io` |
| `sysctlImage.repository` | Kernel settings modifier image repository | `bitnami/bitnami-shell` |
| `sysctlImage.tag` | Kernel settings modifier image tag | `10-debian-10-r259` |
| `sysctlImage.pullPolicy` | Kernel settings modifier image pull policy | `IfNotPresent` |
| `sysctlImage.pullSecrets` | Kernel settings modifier image pull secrets | `[]` |
| `sysctlImage.resources.limits` | The resources limits for the container | `{}` |
| `sysctlImage.resources.requests` | The requested resources for the container | `{}` |
### VolumePermissions parameters
| Name | Description | Value |
| -------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------- |
| `volumePermissions.enabled` | Enable init container that changes volume permissions in the data directory (for cases where the default k8s `runAsUser` and `fsUser` values do not work) | `false` |
| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` |
| `volumePermissions.image.repository` | Init container volume-permissions image name | `bitnami/bitnami-shell` |
| `volumePermissions.image.tag` | Init container volume-permissions image tag | `10-debian-10-r259` |
| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `IfNotPresent` |
| `volumePermissions.image.pullSecrets` | Init container volume-permissions image pull secrets | `[]` |
| `volumePermissions.resources.limits` | The resources limits for the container | `{}` |
| `volumePermissions.resources.requests` | The requested resources for the container | `{}` |
### Kibana Parameters
| Name | Description | Value |
| ---------------------------- | ------------------------------------------------------------------------- | ------ |
| `kibana.elasticsearch.hosts` | Array containing hostnames for the ES instances. Used to generate the URL | `[]` |
| `kibana.elasticsearch.port` | Port to connect Kibana and ES instance. Used to generate the URL | `9200` |
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
```console
$ helm install my-release \
--set name=my-elastic,client.service.port=8080 \
bitnami/elasticsearch
```
The above command sets the Elasticsearch cluster name to `my-elastic` and REST port number to `8080`.
Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example,
```console
$ helm install my-release -f values.yaml bitnami/elasticsearch
```
> **Tip**: You can use the default [values.yaml](values.yaml).
## Configuration and installation details
### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/)
It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image.
Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist.
### Change ElasticSearch version
To modify the ElasticSearch version used in this chart you can specify a [valid image tag](https://hub.docker.com/r/bitnami/elasticsearch/tags/) using the `image.tag` parameter. For example, `image.tag=X.Y.Z`. This approach is also applicable to other images like exporters.
### Default kernel settings
Currently, Elasticsearch requires some changes in the kernel of the host machine to work as expected. If those values are not set in the underlying operating system, the ES containers fail to boot with ERROR messages. More information about these requirements can be found in the links below:
- [File Descriptor requirements](https://www.elastic.co/guide/en/elasticsearch/reference/current/file-descriptors.html)
- [Virtual memory requirements](https://www.elastic.co/guide/en/elasticsearch/reference/current/vm-max-map-count.html)
This chart uses a **privileged** initContainer to change those settings in the Kernel by running: `sysctl -w vm.max_map_count=262144 && sysctl -w fs.file-max=65536`.
You can disable the initContainer using the `sysctlImage.enabled=false` parameter.
### Enable bundled Kibana
This Elasticsearch chart contains Kibana as subchart, you can enable it just setting the `global.kibanaEnabled=true` parameter.
To see the notes with some operational instructions from the Kibana chart, please use the `--render-subchart-notes` as part of your `helm install` command, in this way you can see the Kibana and ES notes in your terminal.
When enabling the bundled kibana subchart, there are a few gotchas that you should be aware of listed below.
#### Elasticsearch rest Encryption
When enabling elasticsearch' rest endpoint encryption you will also need to set `kibana.elasticsearch.security.tls.enabled` to the SAME value along with some additional values shown below for an "out of the box experience":
```yaml
security:
enabled: true
# PASSWORD must be the same value passed to elasticsearch to get an "out of the box" experience
elasticPassword: "<PASSWORD>"
tls:
# AutoGenerate TLS certs for elastic
autoGenerated: true
kibana:
elasticsearch:
security:
auth:
enabled: true
# default in the elasticsearch chart is elastic
kibanaUsername: "<USERNAME>"
kibanaPassword: "<PASSWORD>"
tls:
# Instruct kibana to connect to elastic over https
enabled: true
# Bit of a catch 22, as you will need to know the name upfront of your release
existingSecret: RELEASENAME-elasticsearch-coordinating-only-crt
# As the certs are auto-generated, they are pemCerts so set to true
usePemCerts: true
```
At a bare-minimum, when working with kibana and elasticsearch together the following values MUST be the same, otherwise things will fail:
```yaml
security:
tls:
restEncryption: true
# assumes global.kibanaEnabled=true
kibana:
elasticsearch:
security:
tls:
enabled: true
```
### Adding extra environment variables
In case you want to add extra environment variables (useful for advanced operations like custom init scripts), you can use the `extraEnvVars` property.
```yaml
extraEnvVars:
- name: ELASTICSEARCH_VERSION
value: 7.0
```
Alternatively, you can use a ConfigMap or a Secret with the environment variables. To do so, use the `extraEnvVarsConfigMap` or the `extraEnvVarsSecret` values.
### Using custom init scripts
For advanced operations, the Bitnami Elasticsearch charts allows using custom init scripts that will be mounted inside `/docker-entrypoint.init-db`. You can include the file directly in your `values.yaml` with `initScripts`, or use a ConfigMap or a Secret (in case of sensitive data) for mounting these extra scripts. In this case you use the `initScriptsCM` and `initScriptsSecret` values.
```console
initScriptsCM=special-scripts
initScriptsSecret=special-scripts-sensitive
```
### Snapshot and restore operations
As it's described in the [official documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/snapshots-register-repository.html#snapshots-filesystem-repository), it's necessary to register a snapshot repository before you can perform snapshot and restore operations.
This chart allows you to configure Elasticsearch to use a shared file system to store snapshots. To do so, you need to mount a RWX volume on every Elasticsearch node, and set the parameter `snapshotRepoPath` with the path where the volume is mounted. In the example below, you can find the values to set when using a NFS Perstitent Volume:
```yaml
extraVolumes:
- name: snapshot-repository
nfs:
server: nfs.example.com # Please change this to your NFS server
path: /share1
extraVolumeMounts:
- name: snapshot-repository
mountPath: /snapshots
snapshotRepoPath: "/snapshots"
```
### Sidecars and Init Containers
If you have a need for additional containers to run within the same pod as Elasticsearch components (e.g. an additional metrics or logging exporter), you can do so via the `XXX.sidecars` parameter(s), where XXX is placeholder you need to replace with the actual component(s). Simply define your container according to the Kubernetes container spec.
```yaml
sidecars:
- name: your-image-name
image: your-image
imagePullPolicy: Always
ports:
- name: portname
containerPort: 1234
```
Similarly, you can add extra init containers using the `initContainers` parameter.
```yaml
initContainers:
- name: your-image-name
image: your-image
imagePullPolicy: Always
ports:
- name: portname
```
### Setting Pod's affinity
This chart allows you to set your custom affinity using the `XXX.affinity` parameter(s). Find more information about Pod's affinity in the [kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity).
As an alternative, you can use of the preset configurations for pod affinity, pod anti-affinity, and node affinity available at the [bitnami/common](https://github.com/bitnami/charts/tree/master/bitnami/common#affinities) chart. To do so, set the `XXX.podAffinityPreset`, `XXX.podAntiAffinityPreset`, or `XXX.nodeAffinityPreset` parameters.
## Persistence
The [Bitnami Elasticsearch](https://github.com/bitnami/bitnami-docker-elasticsearch) image stores the Elasticsearch data at the `/bitnami/elasticsearch/data` path of the container.
By default, the chart mounts a [Persistent Volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) at this location. The volume is created using dynamic volume provisioning. See the [Parameters](#parameters) section to configure the PVC.
### Adjust permissions of persistent volume mountpoint
As the image run as non-root by default, it is necessary to adjust the ownership of the persistent volume so that the container can write data into it.
By default, the chart is configured to use Kubernetes Security Context to automatically change the ownership of the volume. However, this feature does not work in all Kubernetes distributions.
As an alternative, this chart supports using an initContainer to change the ownership of the volume before mounting it in the final destination.
You can enable this initContainer by setting `volumePermissions.enabled` to `true`.
## Troubleshooting
Find more information about how to deal with common errors related to Bitnamis Helm charts in [this troubleshooting guide](https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues).
## Upgrading
### To 17.0.0
This version bumps in a major the version of the Kibana Helm Chart bundled as dependecy, [here](https://github.com/bitnami/charts/tree/master/bitnami/kibana#to-900) you can see the changes implemented in this Kibana major version.
### To 16.0.0
This version replaces the Ingest and Coordinating Deployments with Statefulsets. This change is required so Coordinating and Ingest nodes have their services associated, required for TLS hostname verification.
We haven't encountered any issues during our upgrade test, but we recommend creating volumes backups before upgrading this major version, especially for users with additional volumes and custom configurations.
Additionally, this version adds support for X-Pack Security features such as TLS/SSL encryption and basic authentication.
### To 15.0.0
From this version onwards, Elasticsearch container components are now licensed under the [Elastic License](https://www.elastic.co/licensing/elastic-license) that is not currently accepted as an Open Source license by the Open Source Initiative (OSI).
Also, from now on, the Helm Chart will include the X-Pack plugin installed by default.
Regular upgrade is compatible from previous versions.
### To 14.0.0
This version standardizes the way of defining Ingress rules in the Kibana subchart. When configuring a single hostname for the Ingress rule, set the `kibana.ingress.hostname` value. When defining more than one, set the `kibana.ingress.extraHosts` array. Apart from this case, no issues are expected to appear when upgrading.
### To 13.0.0
[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL.
**What changes were introduced in this major version?**
- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field.
- Move dependency information from the *requirements.yaml* to the *Chart.yaml*
- After running `helm dependency update`, a *Chart.lock* file is generated containing the same structure used in the previous *requirements.lock*
- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts
**Considerations when upgrading to this version**
- If you want to upgrade to this version from a previous one installed with Helm v3, you shouldn't face any issues
- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version doesn't support Helm v2 anymore
- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3
**Useful links**
- https://docs.bitnami.com/tutorials/resolve-helm2-helm3-post-migration-issues/
- https://helm.sh/docs/topics/v2_v3_migration/
- https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/
### To 12.0.0
Several changes were introduced that breaks backwards compatibility:
- Ports names were prefixed with the protocol to comply with Istio (see https://istio.io/docs/ops/deployment/requirements/).
- Labels are adapted to follow the Helm charts best practices.
- Elasticsearch data pods are now deployed in parallel in order to bootstrap the cluster and be discovered.
### To 11.0.0
Elasticsearch master pods are now deployed in parallel in order to bootstrap the cluster and be discovered.
The field `podManagementPolicy` can't be updated in a StatefulSet, so you need to destroy it before you upgrade the chart to this version.
```console
$ kubectl delete statefulset elasticsearch-master
$ helm upgrade <DEPLOYMENT_NAME> bitnami/elasticsearch
```
### TO 10.0.0
In this version, Kibana was added as dependent chart. More info about how to enable and work with this bundled Kibana in the ["Enable bundled Kibana"](#enable-bundled-kibana) section.
### To 9.0.0
Elasticsearch master nodes store the cluster status at `/bitnami/elasticsearch/data`. Among other things this includes the UUID of the elasticsearch cluster. Without a persistent data store for this data, the UUID of a cluster could change if k8s node(s) hosting the es master nodes go down and are scheduled on some other master node. In the event that this happens, the data nodes will no longer be able to join a cluster as the uuid changed resulting in a broken cluster.
To resolve such issues, PVC's are now attached for master node data persistence.
---
Helm performs a lookup for the object based on its group (apps), version (v1), and kind (Deployment). Also known as its GroupVersionKind, or GVK. Changing the GVK is considered a compatibility breaker from Kubernetes' point of view, so you cannot "upgrade" those objects to the new GVK in-place. Earlier versions of Helm 3 did not perform the lookup correctly which has since been fixed to match the spec.
In [4dfac075aacf74405e31ae5b27df4369e84eb0b0](https://github.com/bitnami/charts/commit/4dfac075aacf74405e31ae5b27df4369e84eb0b0) the `apiVersion` of the deployment resources was updated to `apps/v1` in tune with the api's deprecated, resulting in compatibility breakage.
### To 7.4.0
This version also introduces `bitnami/common`, a [library chart](https://helm.sh/docs/topics/library_charts/#helm) as a dependency. More documentation about this new utility could be found [here](https://github.com/bitnami/charts/tree/master/bitnami/common#bitnami-common-library-chart). Please, make sure that you have updated the chart dependencies before executing any upgrade.
### To 7.0.0
This version enabled by default the initContainer that modify some kernel settings to meet the Elasticsearch requirements. More info in the ["Default kernel settings"](#default-kernel-settings) section.
You can disable the initContainer using the `sysctlImage.enabled=false` parameter.
### To 3.0.0
Backwards compatibility is not guaranteed unless you modify the labels used on the chart's deployments.
Use the workaround below to upgrade from versions previous to 3.0.0. The following example assumes that the release name is elasticsearch:
```console
$ kubectl patch deployment elasticsearch-coordinating --type=json -p='[{"op": "remove", "path": "/spec/selector/matchLabels/chart"}]'
$ kubectl patch deployment elasticsearch-ingest --type=json -p='[{"op": "remove", "path": "/spec/selector/matchLabels/chart"}]'
$ kubectl patch deployment elasticsearch-master --type=json -p='[{"op": "remove", "path": "/spec/selector/matchLabels/chart"}]'
$ kubectl patch deployment elasticsearch-metrics --type=json -p='[{"op": "remove", "path": "/spec/selector/matchLabels/chart"}]'
$ kubectl delete statefulset elasticsearch-data --cascade=false
```

View File

@ -0,0 +1,6 @@
master:
replicas: 1
data:
replicas: 1
coordinating:
replicas: 1

View File

@ -0,0 +1,132 @@
CHART NAME: {{ .Chart.Name }}
CHART VERSION: {{ .Chart.Version }}
APP VERSION: {{ .Chart.AppVersion }}
{{- if contains .Values.coordinating.service.type "LoadBalancer" }}
-------------------------------------------------------------------------------
WARNING
By specifying "coordinating.service.type=LoadBalancer" you have most likely
exposed the Elasticsearch service externally.
Please note that Elasticsearch does not implement a authentication
mechanism to secure your cluster. For security reasons, we strongly
suggest that you switch to "ClusterIP" or "NodePort".
-------------------------------------------------------------------------------
{{- end }}
{{- if not .Values.sysctlImage.enabled }}
-------------------------------------------------------------------------------
WARNING
Elasticsearch requires some changes in the kernel of the host machine to
work as expected. If those values are not set in the underlying operating
system, the ES containers fail to boot with ERROR messages.
To check whether the host machine meets the requirements, run the command
below:
kubectl logs --namespace {{ .Release.Namespace }} $(kubectl get --namespace {{ .Release.Namespace }} \
pods -l app={{ template "common.names.name" . }},role=master -o jsonpath='{.items[0].metadata.name}') \
elasticsearch
You can adapt the Kernel parameters on you cluster as described in the
official documentation:
https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster
As an alternative, you can specify "sysctlImage.enabled=true" to use a
privileged initContainer to change those settings in the Kernel:
helm upgrade --namespace {{ .Release.Namespace }} {{ .Release.Name }} bitnami/elasticsearch --set sysctlImage.enabled=true
Note that this requires the ability to run privileged containers, which is likely not
the case on many secure clusters. To cover this use case, you can also set some parameters
in the config file to customize the default settings:
https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules-store.html
https://www.elastic.co/guide/en/cloud-on-k8s/master/k8s-virtual-memory.html
For that, you can place the desired parameters by using the "config" block present in the values.yaml
{{- else if .Values.sysctlImage.enabled }}
-------------------------------------------------------------------------------
WARNING
Elasticsearch requires some changes in the kernel of the host machine to
work as expected. If those values are not set in the underlying operating
system, the ES containers fail to boot with ERROR messages.
More information about these requirements can be found in the links below:
https://www.elastic.co/guide/en/elasticsearch/reference/current/file-descriptors.html
https://www.elastic.co/guide/en/elasticsearch/reference/current/vm-max-map-count.html
This chart uses a privileged initContainer to change those settings in the Kernel
by running: sysctl -w vm.max_map_count=262144 && sysctl -w fs.file-max=65536
{{- end }}
** Please be patient while the chart is being deployed **
{{- if .Values.diagnosticMode.enabled }}
The chart has been deployed in diagnostic mode. All probes have been disabled and the command has been overwritten with:
command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 4 }}
args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 4 }}
Get the list of pods by executing:
kubectl get pods --namespace {{ .Release.Namespace }} -l app.kubernetes.io/instance={{ .Release.Name }}
Access the pod you want to debug by executing
kubectl exec --namespace {{ .Release.Namespace }} -ti <NAME OF THE POD> -- bash
In order to replicate the container startup scripts execute this command:
/opt/bitnami/scripts/elasticsearch/entrypoint.sh /opt/bitnami/scripts/elasticsearch/run.sh
{{- else }}
{{- if .Values.curator.enabled }}
A CronJob will run with schedule {{ .Values.curator.cronjob.schedule }}.
The Jobs will not be removed automagically when deleting this Helm chart.
To remove these jobs, run the following:
kubectl --namespace {{ .Release.Namespace }} delete job -l app={{ template "common.names.name" . }},role=curator
{{- end }}
Elasticsearch can be accessed within the cluster on port {{ .Values.coordinating.service.port }} at {{ template "elasticsearch.coordinating.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}
To access from outside the cluster execute the following commands:
{{- if contains "NodePort" .Values.coordinating.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "elasticsearch.coordinating.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
curl http://$NODE_IP:$NODE_PORT/
{{- else if contains "LoadBalancer" .Values.coordinating.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "elasticsearch.coordinating.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "elasticsearch.coordinating.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
curl http://$SERVICE_IP:{{ .Values.coordinating.service.port }}/
{{- else if contains "ClusterIP" .Values.coordinating.service.type }}
kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "elasticsearch.coordinating.fullname" . }} {{ .Values.coordinating.service.port }}:9200 &
curl http://127.0.0.1:9200/
{{- end }}
{{- include "common.warnings.rollingTag" .Values.image }}
{{- include "common.warnings.rollingTag" .Values.volumePermissions.image }}
{{- include "common.warnings.rollingTag" .Values.sysctlImage }}
{{- end }}
{{ include "elasticsearch.validateValues" . }}

View File

@ -0,0 +1,490 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Return the proper ES image name
*/}}
{{- define "elasticsearch.image" -}}
{{ include "common.images.image" (dict "imageRoot" .Values.image "global" .Values.global) }}
{{- end -}}
{{/*
Create a default fully qualified master name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "elasticsearch.master.fullname" -}}
{{- if .Values.master.fullnameOverride -}}
{{- .Values.master.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" (include "common.names.fullname" .) .Values.master.name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{/*
Create a default fully qualified ingest name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "elasticsearch.ingest.fullname" -}}
{{- if .Values.ingest.fullnameOverride -}}
{{- .Values.ingest.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" (include "common.names.fullname" .) .Values.ingest.name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{/*
Create a default fully qualified coordinating name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "elasticsearch.coordinating.fullname" -}}
{{- if .Values.global.kibanaEnabled -}}
{{- printf "%s-%s" .Release.Name .Values.global.coordinating.name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- if .Values.coordinating -}}
{{- if .Values.coordinating.fullnameOverride -}}
{{- .Values.coordinating.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" (include "common.names.fullname" .) .Values.global.coordinating.name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Return the hostname of every ElasticSearch seed node
*/}}
{{- define "elasticsearch.hosts" -}}
{{- $clusterDomain := .Values.clusterDomain }}
{{- $releaseNamespace := .Release.Namespace }}
{{- $masterFullname := include "elasticsearch.master.fullname" . }}
{{- $coordinatingFullname := include "elasticsearch.coordinating.fullname" . }}
{{- $dataFullname := include "elasticsearch.data.fullname" . }}
{{- $ingestFullname := include "elasticsearch.ingest.fullname" . }}
{{- $masterFullname }}.{{ $releaseNamespace }}.svc.{{ $clusterDomain }},
{{- $coordinatingFullname }}.{{ $releaseNamespace }}.svc.{{ $clusterDomain }},
{{- $dataFullname }}.{{ $releaseNamespace }}.svc.{{ $clusterDomain }},
{{- if .Values.ingest.enabled }}
{{- $ingestFullname }}.{{ $releaseNamespace }}.svc.{{ $clusterDomain }},
{{- end -}}
{{- end -}}
{{/*
Create a default fully qualified data name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "elasticsearch.data.fullname" -}}
{{- if .Values.data.fullnameOverride -}}
{{- .Values.data.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" (include "common.names.fullname" .) .Values.data.name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{ template "elasticsearch.initScriptsSecret" . }}
{{/*
Get the initialization scripts volume name.
*/}}
{{- define "elasticsearch.initScripts" -}}
{{- printf "%s-init-scripts" (include "common.names.fullname" .) -}}
{{- end -}}
{{ template "elasticsearch.initScriptsCM" . }}
{{/*
Get the initialization scripts ConfigMap name.
*/}}
{{- define "elasticsearch.initScriptsCM" -}}
{{- printf "%s" .Values.initScriptsCM -}}
{{- end -}}
{{ template "elasticsearch.initScriptsSecret" . }}
{{/*
Get the initialization scripts Secret name.
*/}}
{{- define "elasticsearch.initScriptsSecret" -}}
{{- printf "%s" .Values.initScriptsSecret -}}
{{- end -}}
{{/*
Create the name of the master service account to use
*/}}
{{- define "elasticsearch.master.serviceAccountName" -}}
{{- if .Values.master.serviceAccount.create -}}
{{ default (include "elasticsearch.master.fullname" .) .Values.master.serviceAccount.name }}
{{- else -}}
{{ default "default" .Values.master.serviceAccount.name }}
{{- end -}}
{{- end -}}
{{/*
Create the name of the coordinating-only service account to use
*/}}
{{- define "elasticsearch.coordinating.serviceAccountName" -}}
{{- if .Values.coordinating.serviceAccount.create -}}
{{ default (include "elasticsearch.coordinating.fullname" .) .Values.coordinating.serviceAccount.name }}
{{- else -}}
{{ default "default" .Values.coordinating.serviceAccount.name }}
{{- end -}}
{{- end -}}
{{/*
Create the name of the data service account to use
*/}}
{{- define "elasticsearch.data.serviceAccountName" -}}
{{- if .Values.data.serviceAccount.create -}}
{{ default (include "elasticsearch.data.fullname" .) .Values.data.serviceAccount.name }}
{{- else -}}
{{ default "default" .Values.data.serviceAccount.name }}
{{- end -}}
{{- end -}}
{{/*
Create the name of the ingest service account to use
*/}}
{{- define "elasticsearch.ingest.serviceAccountName" -}}
{{- if .Values.ingest.serviceAccount.create -}}
{{ default (include "elasticsearch.ingest.fullname" .) .Values.ingest.serviceAccount.name }}
{{- else -}}
{{ default "default" .Values.ingest.serviceAccount.name }}
{{- end -}}
{{- end -}}
{{/*
Create a default fully qualified metrics name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "elasticsearch.metrics.fullname" -}}
{{- printf "%s-%s" (include "common.names.fullname" .) .Values.metrics.name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Return the proper ES exporter image name
*/}}
{{- define "elasticsearch.metrics.image" -}}
{{ include "common.images.image" (dict "imageRoot" .Values.metrics.image "global" .Values.global) }}
{{- end -}}
{{/*
Return the proper sysctl image name
*/}}
{{- define "elasticsearch.sysctl.image" -}}
{{ include "common.images.image" (dict "imageRoot" .Values.sysctlImage "global" .Values.global) }}
{{- end -}}
{{/*
Return the proper Docker Image Registry Secret Names
*/}}
{{- define "elasticsearch.imagePullSecrets" -}}
{{ include "common.images.pullSecrets" (dict "images" (list .Values.image .Values.metrics.image .Values.curator.image .Values.sysctlImage .Values.volumePermissions.image) "global" .Values.global) }}
{{- end -}}
{{/*
Return the proper image name (for the init container volume-permissions image)
*/}}
{{- define "elasticsearch.volumePermissions.image" -}}
{{ include "common.images.image" (dict "imageRoot" .Values.volumePermissions.image "global" .Values.global) }}
{{- end -}}
{{/*
Return the proper Storage Class
Usage:
{{ include "elasticsearch.storageClass" (dict "global" .Values.global "local" .Values.master) }}
*/}}
{{- define "elasticsearch.storageClass" -}}
{{/*
Helm 2.11 supports the assignment of a value to a variable defined in a different scope,
but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic.
*/}}
{{- if .global -}}
{{- if .global.storageClass -}}
{{- if (eq "-" .global.storageClass) -}}
{{- printf "storageClassName: \"\"" -}}
{{- else }}
{{- printf "storageClassName: %s" .global.storageClass -}}
{{- end -}}
{{- else -}}
{{- if .local.persistence.storageClass -}}
{{- if (eq "-" .local.persistence.storageClass) -}}
{{- printf "storageClassName: \"\"" -}}
{{- else }}
{{- printf "storageClassName: %s" .local.persistence.storageClass -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- else -}}
{{- if .local.persistence.storageClass -}}
{{- if (eq "-" .local.persistence.storageClass) -}}
{{- printf "storageClassName: \"\"" -}}
{{- else }}
{{- printf "storageClassName: %s" .local.persistence.storageClass -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Return the appropriate apiVersion for cronjob APIs.
*/}}
{{- define "cronjob.apiVersion" -}}
{{- if semverCompare "< 1.8-0" .Capabilities.KubeVersion.GitVersion -}}
{{- print "batch/v2alpha1" }}
{{- else if and (semverCompare ">=1.8-0" .Capabilities.KubeVersion.GitVersion) (semverCompare "< 1.21-0" .Capabilities.KubeVersion.GitVersion) -}}
{{- print "batch/v1beta1" }}
{{- else if semverCompare ">=1.21-0" .Capabilities.KubeVersion.GitVersion -}}
{{- print "batch/v1" }}
{{- end -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "elasticsearch.curator.fullname" -}}
{{- printf "%s-%s" (include "common.names.fullname" .) .Values.curator.name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create the name of the service account to use
*/}}
{{- define "elasticsearch.curator.serviceAccountName" -}}
{{- if .Values.curator.serviceAccount.create -}}
{{ default (include "elasticsearch.curator.fullname" .) .Values.curator.serviceAccount.name }}
{{- else -}}
{{ default "default" .Values.curator.serviceAccount.name }}
{{- end -}}
{{- end -}}
{{/*
Return the proper ES curator image name
*/}}
{{- define "elasticsearch.curator.image" -}}
{{ include "common.images.image" (dict "imageRoot" .Values.curator.image "global" .Values.global) }}
{{- end -}}
{{/*
Return the elasticsearch TLS credentials secret for master nodes.
*/}}
{{- define "elasticsearch.master.tlsSecretName" -}}
{{- $secretName := .Values.security.tls.master.existingSecret -}}
{{- if $secretName -}}
{{- printf "%s" (tpl $secretName $) -}}
{{- else -}}
{{- printf "%s-crt" (include "elasticsearch.master.fullname" .) -}}
{{- end -}}
{{- end -}}
{{/*
Return the elasticsearch TLS credentials secret for data nodes.
*/}}
{{- define "elasticsearch.data.tlsSecretName" -}}
{{- $secretName := .Values.security.tls.data.existingSecret -}}
{{- if $secretName -}}
{{- printf "%s" (tpl $secretName $) -}}
{{- else -}}
{{- printf "%s-crt" (include "elasticsearch.data.fullname" .) -}}
{{- end -}}
{{- end -}}
{{/*
Return the elasticsearch TLS credentials secret for ingest nodes.
*/}}
{{- define "elasticsearch.ingest.tlsSecretName" -}}
{{- $secretName := .Values.security.tls.ingest.existingSecret -}}
{{- if $secretName -}}
{{- printf "%s" (tpl $secretName $) -}}
{{- else -}}
{{- printf "%s-crt" (include "elasticsearch.ingest.fullname" .) -}}
{{- end -}}
{{- end -}}
{{/*
Return the elasticsearch TLS credentials secret for coordinating-only nodes.
*/}}
{{- define "elasticsearch.coordinating.tlsSecretName" -}}
{{- $secretName := .Values.security.tls.coordinating.existingSecret -}}
{{- if $secretName -}}
{{- printf "%s" (tpl $secretName $) -}}
{{- else -}}
{{- printf "%s-crt" (include "elasticsearch.coordinating.fullname" .) -}}
{{- end -}}
{{- end -}}
{{/*
Return true if a TLS credentials secret object should be created
*/}}
{{- define "elasticsearch.createTlsSecret" -}}
{{- if and .Values.security.enabled .Values.security.tls.autoGenerated (not (include "elasticsearch.security.tlsSecretsProvided" .)) }}
{{- true -}}
{{- end -}}
{{- end -}}
{{/*
Return true if an authentication credentials secret object should be created
*/}}
{{- define "elasticsearch.createSecret" -}}
{{- if and .Values.security.enabled (not .Values.security.existingSecret) }}
{{- true -}}
{{- end -}}
{{- end -}}
{{/*
Return the Elasticsearch authentication credentials secret name
*/}}
{{- define "elasticsearch.secretName" -}}
{{- coalesce .Values.security.existingSecret (include "common.names.fullname" .) -}}
{{- end -}}
{{/*
Return true if a TLS password secret object should be created
*/}}
{{- define "elasticsearch.createTlsPasswordsSecret" -}}
{{- if and .Values.security.enabled (not .Values.security.tls.passwordsSecret) (or .Values.security.tls.keystorePassword .Values.security.tls.truststorePassword .Values.security.tls.keyPassword ) }}
{{- true -}}
{{- end -}}
{{- end -}}
{{/*
Return the Elasticsearch TLS password secret name
*/}}
{{- define "elasticsearch.tlsPasswordsSecret" -}}
{{- coalesce .Values.security.tls.passwordsSecret (printf "%s-tls-pass" (include "common.names.fullname" .)) -}}
{{- end -}}
{{/*
Add environment variables to configure database values
*/}}
{{- define "elasticsearch.configure.security" -}}
- name: ELASTICSEARCH_ENABLE_SECURITY
value: "true"
- name: ELASTICSEARCH_PASSWORD
valueFrom:
secretKeyRef:
name: {{ include "elasticsearch.secretName" . }}
key: elasticsearch-password
- name: ELASTICSEARCH_ENABLE_FIPS_MODE
value: {{ .Values.security.fipsMode | quote }}
- name: ELASTICSEARCH_TLS_VERIFICATION_MODE
value: {{ .Values.security.tls.verificationMode | quote }}
- name: ELASTICSEARCH_ENABLE_REST_TLS
value: {{ ternary "true" "false" .Values.security.tls.restEncryption | quote }}
{{- if or (include "elasticsearch.createTlsSecret" .) .Values.security.tls.usePemCerts }}
- name: ELASTICSEARCH_TLS_USE_PEM
value: "true"
{{- else }}
- name: ELASTICSEARCH_KEYSTORE_LOCATION
value: "/opt/bitnami/elasticsearch/config/certs/{{ .Values.security.tls.keystoreFilename }}"
- name: ELASTICSEARCH_TRUSTSTORE_LOCATION
value: "/opt/bitnami/elasticsearch/config/certs/{{ .Values.security.tls.truststoreFilename }}"
{{- end }}
{{- if and (not .Values.security.tls.usePemCerts) (or .Values.security.tls.keystorePassword .Values.security.tls.passwordsSecret) }}
- name: ELASTICSEARCH_KEYSTORE_PASSWORD
valueFrom:
secretKeyRef:
name: {{ include "elasticsearch.tlsPasswordsSecret" . }}
key: keystore-password
{{- end }}
{{- if and (not .Values.security.tls.usePemCerts) (or .Values.security.tls.truststorePassword .Values.security.tls.passwordsSecret) }}
- name: ELASTICSEARCH_TRUSTSTORE_PASSWORD
valueFrom:
secretKeyRef:
name: {{ include "elasticsearch.tlsPasswordsSecret" . }}
key: truststore-password
{{- end }}
{{- if and .Values.security.tls.usePemCerts (or .Values.security.tls.keyPassword .Values.security.tls.passwordsSecret) }}
- name: ELASTICSEARCH_KEY_PASSWORD
value: {{ .Values.security.tls.keyPassword | quote }}
{{- end }}
{{- end -}}
{{/*
Returns true if at least 1 existing secret was provided
*/}}
{{- define "elasticsearch.security.tlsSecretsProvided" -}}
{{- $masterSecret :=.Values.security.tls.master.existingSecret -}}
{{- $dataSecret :=.Values.security.tls.data.existingSecret -}}
{{- $coordSecret :=.Values.security.tls.coordinating.existingSecret -}}
{{- $ingestSecret :=.Values.security.tls.ingest.existingSecret -}}
{{- $ingestEnabled := .Values.ingest.enabled -}}
{{- if or $masterSecret $dataSecret $coordSecret (and $ingestEnabled $ingestSecret) }}
{{- true -}}
{{- end -}}
{{- end -}}
{{/* Validate values of Elasticsearch - Existing secret not provided for master nodes */}}
{{- define "elasticsearch.validateValues.security.missingTlsSecrets.master" -}}
{{- if and .Values.security.enabled (include "elasticsearch.security.tlsSecretsProvided" .) (not .Values.security.tls.master.existingSecret) -}}
elasticsearch: security.tls.master.existingSecret
Missing secret containing the TLS certificates for the Elasticsearch master nodes.
Provide the certificates using --set .Values.security.tls.master.existingSecret="my-secret".
{{- end -}}
{{- end -}}
{{/* Validate values of Elasticsearch - Existing secret not provided for data nodes */}}
{{- define "elasticsearch.validateValues.security.missingTlsSecrets.data" -}}
{{- if and .Values.security.enabled (include "elasticsearch.security.tlsSecretsProvided" .) (not .Values.security.tls.data.existingSecret) -}}
elasticsearch: security.tls.data.existingSecret
Missing secret containing the TLS certificates for the Elasticsearch data nodes.
Provide the certificates using --set .Values.security.tls.data.existingSecret="my-secret".
{{- end -}}
{{- end -}}
{{/* Validate values of Elasticsearch - Existing secret not provided for coordinating-only nodes */}}
{{- define "elasticsearch.validateValues.security.missingTlsSecrets.coordinating" -}}
{{- if and .Values.security.enabled (include "elasticsearch.security.tlsSecretsProvided" .) (not .Values.security.tls.coordinating.existingSecret) -}}
elasticsearch: security.tls.coordinating.existingSecret
Missing secret containing the TLS certificates for the Elasticsearch coordinating-only nodes.
Provide the certificates using --set .Values.security.tls.coordinating.existingSecret="my-secret".
{{- end -}}
{{- end -}}
{{/* Validate values of Elasticsearch - Existing secret not provided for ingest nodes */}}
{{- define "elasticsearch.validateValues.security.missingTlsSecrets.ingest" -}}
{{- if and .Values.security.enabled .Values.ingest.enabled (include "elasticsearch.security.tlsSecretsProvided" .) (not .Values.security.tls.ingest.existingSecret) -}}
elasticsearch: security.tls.ingest.existingSecret
Missing secret containing the TLS certificates for the Elasticsearch ingest nodes.
Provide the certificates using --set .Values.security.tls.ingest.existingSecret="my-secret".
{{- end -}}
{{- end -}}
{{/* Validate values of Elasticsearch - TLS enabled but no certificates provided */}}
{{- define "elasticsearch.validateValues.security.tls" -}}
{{- if and .Values.security.enabled (not .Values.security.tls.autoGenerated) (not (include "elasticsearch.security.tlsSecretsProvided" .)) -}}
elasticsearch: security.tls
In order to enable X-Pack Security, it is necessary to configure TLS.
Three different mechanisms can be used:
- Provide an existing secret containing the Keystore and Truststore for each role
- Provide an existing secret containing the PEM certificates for each role and enable `security.tls.usePemCerts=true`
- Enable using auto-generated certificates with `security.tls.autoGenerated=true`
Existing secrets containing either JKS/PKCS12 or PEM certificates can be provided using --set Values.security.tls.master.existingSecret=master-certs,
--set Values.security.tls.data.existingSecret=data-certs, --set Values.security.tls.coordinating.existingSecret=coordinating-certs, --set Values.security.tls.ingest.existingSecret=ingest-certs
{{- end -}}
{{- end -}}
{{/*
Compile all warnings into a single message, and call fail.
*/}}
{{- define "elasticsearch.validateValues" -}}
{{- $messages := list -}}
{{- $messages := append $messages (include "elasticsearch.validateValues.security.tls" .) -}}
{{- $messages := append $messages (include "elasticsearch.validateValues.security.missingTlsSecrets.master" .) -}}
{{- $messages := append $messages (include "elasticsearch.validateValues.security.missingTlsSecrets.data" .) -}}
{{- $messages := append $messages (include "elasticsearch.validateValues.security.missingTlsSecrets.coordinating" .) -}}
{{- $messages := append $messages (include "elasticsearch.validateValues.security.missingTlsSecrets.ingest" .) -}}
{{- $messages := without $messages "" -}}
{{- $message := join "\n" $messages -}}
{{- if $message -}}
{{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}}
{{- end -}}
{{- end -}}
{{/*
Sysctl set if less then
*/}}
{{- define "elasticsearch.sysctlIfLess" -}}
CURRENT=`sysctl -n {{ .key }}`;
DESIRED="{{ .value }}";
if [ "$DESIRED" -gt "$CURRENT" ]; then
sysctl -w {{ .key }}={{ .value }};
fi;
{{- end -}}

View File

@ -0,0 +1,11 @@
{{- if .Values.curator.enabled }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "elasticsearch.curator.fullname" . }}
labels: {{- include "common.labels.standard" . | nindent 4 }}
app.kubernetes.io/component: curator
data:
action_file.yml: {{ required "A valid .Values.curator.configMaps.action_file_yml entry is required!" (toYaml .Values.curator.configMaps.action_file_yml | indent 2) }}
config.yml: {{ required "A valid .Values.curator.configMaps.config_yml entry is required!" (tpl (toYaml .Values.curator.configMaps.config_yml | indent 2) $) }}
{{- end }}

View File

@ -0,0 +1,16 @@
{{- if or .Values.config .Values.extraConfig }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "common.names.fullname" . }}
labels: {{- include "common.labels.standard" . | nindent 4 }}
data:
{{- if .Values.config }}
elasticsearch.yml: |-
{{- toYaml .Values.config | nindent 4 }}
{{- end}}
{{- if .Values.extraConfig }}
my_elasticsearch.yml: |-
{{- toYaml .Values.extraConfig | nindent 4 }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,12 @@
{{- if .Values.initScripts }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "elasticsearch.initScripts" . }}
labels: {{- include "common.labels.standard" . | nindent 4 }}
component: master
data:
{{- with .Values.initScripts }}
{{ toYaml . | indent 2 }}
{{- end }}
{{ end }}

View File

@ -0,0 +1,35 @@
{{- if .Values.coordinating.autoscaling.enabled }}
apiVersion: autoscaling/v2beta1
kind: HorizontalPodAutoscaler
metadata:
name: {{ include "elasticsearch.coordinating.fullname" . }}
namespace: {{ .Release.Namespace | quote }}
labels: {{- include "common.labels.standard" . | nindent 4 }}
app.kubernetes.io/component: coordinating-only
{{- if .Values.commonLabels }}
{{- include "common.tplvalues.render" (dict "value" .Values.commonLabels "context" $) | nindent 4 }}
{{- end }}
{{- if .Values.commonAnnotations }}
annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
{{- end }}
spec:
scaleTargetRef:
apiVersion: {{ template "common.capabilities.deployment.apiVersion" . }}
kind: StatefulSet
name: {{ include "elasticsearch.coordinating.fullname" . }}
minReplicas: {{ .Values.coordinating.autoscaling.minReplicas }}
maxReplicas: {{ .Values.coordinating.autoscaling.maxReplicas }}
metrics:
{{- if .Values.coordinating.autoscaling.targetCPU }}
- type: Resource
resource:
name: cpu
targetAverageUtilization: {{ .Values.coordinating.autoscaling.targetCPU }}
{{- end }}
{{- if .Values.coordinating.autoscaling.targetMemory }}
- type: Resource
resource:
name: memory
targetAverageUtilization: {{ .Values.coordinating.autoscaling.targetMemory }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,280 @@
apiVersion: {{ template "common.capabilities.statefulset.apiVersion" . }}
kind: StatefulSet
metadata:
name: {{ include "elasticsearch.coordinating.fullname" . }}
labels: {{- include "common.labels.standard" . | nindent 4 }}
app.kubernetes.io/component: coordinating-only
## Istio Labels: https://istio.io/docs/ops/deployment/requirements/
app: coordinating-only
spec:
updateStrategy:
type: {{ .Values.coordinating.updateStrategy.type }}
{{- if (eq "Recreate" .Values.coordinating.updateStrategy.type) }}
rollingUpdate: null
{{- end }}
selector:
matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }}
app.kubernetes.io/component: coordinating-only
podManagementPolicy: Parallel
{{- if not .Values.coordinating.autoscaling.enabled }}
replicas: {{ .Values.coordinating.replicas }}
{{- end }}
serviceName: {{ template "elasticsearch.coordinating.fullname" . }}
template:
metadata:
labels: {{- include "common.labels.standard" . | nindent 8 }}
app.kubernetes.io/component: coordinating-only
## Istio Labels: https://istio.io/docs/ops/deployment/requirements/
app: coordinating-only
{{- if .Values.coordinating.podLabels }}
{{- include "common.tplvalues.render" (dict "value" .Values.coordinating.podLabels "context" $) | nindent 8 }}
{{- end }}
annotations:
{{- if and (include "elasticsearch.createTlsSecret" .) (not .Values.security.tls.coordinating.existingSecret) }}
checksum/tls: {{ include (print $.Template.BasePath "/tls-secret.yaml") . | sha256sum }}
{{- end }}
{{- if .Values.coordinating.podAnnotations }}
{{- include "common.tplvalues.render" (dict "value" .Values.coordinating.podAnnotations "context" $) | nindent 8 }}
{{- end }}
spec:
{{- include "elasticsearch.imagePullSecrets" . | nindent 6 }}
{{- if .Values.coordinating.hostAliases }}
hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.coordinating.hostAliases "context" $) | nindent 8 }}
{{- end }}
{{- if .Values.coordinating.schedulerName }}
schedulerName: {{ .Values.coordinating.schedulerName }}
{{- end }}
{{- if .Values.coordinating.priorityClassName }}
priorityClassName: {{ .Values.coordinating.priorityClassName | quote }}
{{- end }}
{{- if .Values.coordinating.affinity }}
affinity: {{- include "common.tplvalues.render" (dict "value" .Values.coordinating.affinity "context" $) | nindent 8 }}
{{- else }}
affinity:
podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.coordinating.podAffinityPreset "component" "coordinating-only" "context" $) | nindent 10 }}
podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.coordinating.podAntiAffinityPreset "component" "coordinating-only" "context" $) | nindent 10 }}
nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.coordinating.nodeAffinityPreset.type "key" .Values.coordinating.nodeAffinityPreset.key "values" .Values.coordinating.nodeAffinityPreset.values) | nindent 10 }}
{{- end }}
{{- if .Values.coordinating.nodeSelector }}
nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.coordinating.nodeSelector "context" $) | nindent 8 }}
{{- end }}
{{- if .Values.coordinating.tolerations }}
tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.coordinating.tolerations "context" $) | nindent 8 }}
{{- end }}
{{- if .Values.coordinating.topologySpreadConstraints }}
topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.coordinating.topologySpreadConstraints "context" $) | nindent 8 }}
{{- end }}
serviceAccountName: {{ template "elasticsearch.coordinating.serviceAccountName" . }}
{{- if or .Values.coordinating.podSecurityContext.enabled .Values.coordinating.securityContext.enabled }}
securityContext:
{{- if .Values.coordinating.podSecurityContext.enabled }}
{{- omit .Values.coordinating.podSecurityContext "enabled" | toYaml | nindent 8 }}
{{- else }}
fsGroup: {{ .Values.coordinating.securityContext.fsGroup }}
{{- end }}
{{- end }}
{{- if or .Values.coordinating.initContainers .Values.sysctlImage.enabled }}
initContainers:
{{- if .Values.sysctlImage.enabled }}
## Image that performs the sysctl operation to modify Kernel settings (needed sometimes to avoid boot errors)
- name: sysctl
image: {{ include "elasticsearch.sysctl.image" . }}
imagePullPolicy: {{ .Values.image.pullPolicy | quote }}
command:
- /bin/bash
- -ec
- |
{{- include "elasticsearch.sysctlIfLess" (dict "key" "vm.max_map_count" "value" "262144") | nindent 14 }}
{{- include "elasticsearch.sysctlIfLess" (dict "key" "fs.file-max" "value" "65536") | nindent 14 }}
securityContext:
privileged: true
{{- if .Values.sysctlImage.resources }}
resources: {{- toYaml .Values.sysctlImage.resources | nindent 12 }}
{{- end }}
{{- end }}
{{- if .Values.coordinating.initContainers }}
{{- include "common.tplvalues.render" (dict "value" .Values.coordinating.initContainers "context" $) | nindent 8 }}
{{- end }}
{{- end }}
containers:
- name: elasticsearch
image: {{ include "elasticsearch.image" . }}
imagePullPolicy: {{ .Values.image.pullPolicy | quote }}
{{- if or .Values.coordinating.containerSecurityContext.enabled .Values.coordinating.securityContext.enabled }}
securityContext:
{{- if .Values.coordinating.containerSecurityContext.enabled }}
{{- omit .Values.coordinating.containerSecurityContext "enabled" | toYaml | nindent 12 }}
{{- else }}
runAsUser: {{ .Values.coordinating.securityContext.runAsUser }}
{{- end }}
{{- end }}
{{- if .Values.diagnosticMode.enabled }}
command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }}
args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }}
{{- end }}
env:
- name: MY_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: BITNAMI_DEBUG
value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }}
- name: ELASTICSEARCH_CLUSTER_NAME
value: {{ .Values.name | quote }}
- name: ELASTICSEARCH_CLUSTER_HOSTS
value: {{ include "elasticsearch.hosts" . | quote }}
- name: ELASTICSEARCH_TOTAL_NODES
value: {{ add (ternary .Values.master.autoscaling.minReplicas .Values.master.replicas .Values.master.autoscaling.enabled) (ternary .Values.data.autoscaling.minReplicas .Values.data.replicas .Values.data.autoscaling.enabled) | quote }}
- name: ELASTICSEARCH_CLUSTER_MASTER_HOSTS
{{- $elasticsearchMasterFullname := include "elasticsearch.master.fullname" . }}
{{- $replicas := int (ternary .Values.master.autoscaling.minReplicas .Values.master.replicas .Values.master.autoscaling.enabled) }}
value: {{range $i, $e := until $replicas }}{{ $elasticsearchMasterFullname }}-{{ $e }} {{ end }}
- name: ELASTICSEARCH_MINIMUM_MASTER_NODES
value: {{ add (div (ternary .Values.master.autoscaling.minReplicas .Values.master.replicas .Values.master.autoscaling.enabled) 2) 1 | quote }}
- name: ELASTICSEARCH_ADVERTISED_HOSTNAME
value: "$(MY_POD_NAME).{{ include "elasticsearch.coordinating.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}"
{{- if .Values.plugins }}
- name: ELASTICSEARCH_PLUGINS
value: {{ .Values.plugins | quote }}
{{- end }}
- name: ELASTICSEARCH_HEAP_SIZE
value: {{ .Values.coordinating.heapSize | quote }}
- name: ELASTICSEARCH_IS_DEDICATED_NODE
value: "yes"
- name: ELASTICSEARCH_NODE_TYPE
value: "coordinating"
{{- if .Values.security.enabled }}
{{- include "elasticsearch.configure.security" . | nindent 12 }}
{{- end }}
{{- if .Values.extraEnvVars }}
{{- include "common.tplvalues.render" ( dict "value" .Values.extraEnvVars "context" $ ) | nindent 12 }}
{{- end }}
{{- if or .Values.extraEnvVarsConfigMap .Values.extraEnvVarsSecret }}
envFrom:
{{- if .Values.extraEnvVarsConfigMap }}
- configMapRef:
name: {{ include "common.tplvalues.render" ( dict "value" .Values.extraEnvVarsConfigMap "context" $ ) }}
{{- end }}
{{- if .Values.extraEnvVarsSecret }}
- secretRef:
name: {{ include "common.tplvalues.render" ( dict "value" .Values.extraEnvVarsSecret "context" $ ) }}
{{- end }}
{{- end }}
ports:
- name: http
containerPort: 9200
- name: transport
containerPort: 9300
{{- if not .Values.diagnosticMode.enabled }}
{{- if .Values.coordinating.startupProbe.enabled }}
startupProbe:
initialDelaySeconds: {{ .Values.coordinating.startupProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.coordinating.startupProbe.periodSeconds }}
timeoutSeconds: {{ .Values.coordinating.startupProbe.timeoutSeconds }}
successThreshold: {{ .Values.coordinating.startupProbe.successThreshold }}
failureThreshold: {{ .Values.coordinating.startupProbe.failureThreshold }}
exec:
command:
- /opt/bitnami/scripts/elasticsearch/healthcheck.sh
{{- else if .Values.coordinating.customStartupProbe }}
startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.coordinating.customStartupProbe "context" $) | nindent 12 }}
{{- end }}
{{- if .Values.coordinating.livenessProbe.enabled }}
livenessProbe:
initialDelaySeconds: {{ .Values.coordinating.livenessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.coordinating.livenessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.coordinating.livenessProbe.timeoutSeconds }}
successThreshold: {{ .Values.coordinating.livenessProbe.successThreshold }}
failureThreshold: {{ .Values.coordinating.livenessProbe.failureThreshold }}
exec:
command:
- /opt/bitnami/scripts/elasticsearch/healthcheck.sh
{{- else if .Values.coordinating.customLivenessProbe }}
livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.coordinating.customLivenessProbe "context" $) | nindent 12 }}
{{- end }}
{{- if .Values.coordinating.readinessProbe.enabled}}
readinessProbe:
initialDelaySeconds: {{ .Values.coordinating.readinessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.coordinating.readinessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.coordinating.readinessProbe.timeoutSeconds }}
successThreshold: {{ .Values.coordinating.readinessProbe.successThreshold }}
failureThreshold: {{ .Values.coordinating.readinessProbe.failureThreshold }}
exec:
command:
- /opt/bitnami/scripts/elasticsearch/healthcheck.sh
{{- else if .Values.coordinating.customReadinessProbe }}
readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.coordinating.customReadinessProbe "context" $) | nindent 12 }}
{{- end }}
{{- end }}
{{- if .Values.coordinating.resources }}
resources: {{- toYaml .Values.coordinating.resources | nindent 12 }}
{{- end}}
volumeMounts:
{{- if .Values.config }}
- mountPath: /opt/bitnami/elasticsearch/config/elasticsearch.yml
name: config
subPath: elasticsearch.yml
{{- end }}
{{- if .Values.extraConfig }}
- mountPath: /opt/bitnami/elasticsearch/config/my_elasticsearch.yml
name: config
subPath: my_elasticsearch.yml
{{- end }}
- name: data
mountPath: /bitnami/elasticsearch/data
{{- if .Values.security.enabled }}
- name: elasticsearch-certificates
mountPath: /opt/bitnami/elasticsearch/config/certs
readOnly: true
{{- end }}
{{- if .Values.extraVolumeMounts }}
{{- toYaml .Values.extraVolumeMounts | nindent 12 }}
{{- end }}
{{- if .Values.initScripts }}
- name: custom-init-scripts
mountPath: /docker-entrypoint-initdb.d/init-scripts
{{- end }}
{{- if .Values.initScriptsCM }}
- name: custom-init-scripts-cm
mountPath: /docker-entrypoint-initdb.d/init-scripts-cm
{{- end }}
{{- if .Values.initScriptsSecret }}
- name: custom-init-scripts-secret
mountPath: /docker-entrypoint-initdb.d/init-scripts-secret
{{- end }}
{{- if .Values.coordinating.sidecars }}
{{- include "common.tplvalues.render" ( dict "value" .Values.coordinating.sidecars "context" $) | nindent 8 }}
{{- end }}
volumes:
{{- if or .Values.config .Values.extraConfig }}
- name: config
configMap:
name: {{ include "common.names.fullname" . }}
{{- end }}
{{- if .Values.security.enabled }}
- name: elasticsearch-certificates
secret:
secretName: {{ include "elasticsearch.coordinating.tlsSecretName" . }}
defaultMode: 256
{{- end }}
{{- if .Values.extraVolumes }}
{{- toYaml .Values.extraVolumes | nindent 8 }}
{{- end }}
{{- if .Values.initScripts }}
- name: custom-init-scripts
configMap:
name: {{ template "elasticsearch.initScripts" . }}
{{- end }}
{{- if .Values.initScriptsCM }}
- name: custom-init-scripts-cm
configMap:
name: {{ template "elasticsearch.initScriptsCM" . }}
{{- end }}
{{- if .Values.initScriptsSecret }}
- name: custom-init-scripts-secret
secret:
secretName: {{ template "elasticsearch.initScriptsSecret" . }}
defaultMode: 0755
{{- end }}
- name: "data"
emptyDir: {}

View File

@ -0,0 +1,27 @@
apiVersion: v1
kind: Service
metadata:
name: {{ include "elasticsearch.coordinating.fullname" . }}
labels: {{- include "common.labels.standard" . | nindent 4 }}
app.kubernetes.io/component: coordinating-only
annotations: {{ include "common.tplvalues.render" ( dict "value" .Values.coordinating.service.annotations "context" $) | nindent 4 }}
spec:
type: {{ .Values.coordinating.service.type | quote }}
{{- if and (eq .Values.coordinating.service.type "LoadBalancer") (not (empty .Values.coordinating.service.loadBalancerIP)) }}
loadBalancerIP: {{ .Values.coordinating.service.loadBalancerIP }}
externalTrafficPolicy: {{ .Values.coordinating.service.externalTrafficPolicy | quote }}
{{- end }}
publishNotReadyAddresses: true
ports:
- name: http
port: {{ .Values.coordinating.service.port }}
targetPort: http
{{- if and (or (eq .Values.coordinating.service.type "NodePort") (eq .Values.coordinating.service.type "LoadBalancer")) (not (empty .Values.coordinating.service.nodePort)) }}
nodePort: {{ .Values.coordinating.service.nodePort }}
{{- else if eq .Values.coordinating.service.type "ClusterIP" }}
nodePort: null
{{- end }}
- name: tcp-transport
port: 9300
selector: {{- include "common.labels.matchLabels" . | nindent 4 }}
app.kubernetes.io/component: coordinating-only

View File

@ -0,0 +1,130 @@
{{- if .Values.curator.enabled }}
apiVersion: {{ template "cronjob.apiVersion" . }}
kind: CronJob
metadata:
name: {{ template "elasticsearch.curator.fullname" . }}
labels: {{- include "common.labels.standard" . | nindent 4 }}
app.kubernetes.io/component: curator
## Istio Labels: https://istio.io/docs/ops/deployment/requirements/
app: curator
{{- if .Values.curator.cronjob.annotations }}
annotations: {{- toYaml .Values.curator.cronjob.annotations | nindent 4 }}
{{- end }}
spec:
schedule: "{{ .Values.curator.cronjob.schedule }}"
{{- with .Values.curator.cronjob.concurrencyPolicy }}
concurrencyPolicy: {{ . }}
{{- end }}
{{- with .Values.curator.cronjob.failedJobsHistoryLimit }}
failedJobsHistoryLimit: {{ . }}
{{- end }}
{{- with .Values.curator.cronjob.successfulJobsHistoryLimit }}
successfulJobsHistoryLimit: {{ . }}
{{- end }}
jobTemplate:
metadata:
labels: {{- include "common.labels.standard" . | nindent 8 }}
app.kubernetes.io/component: curator
## Istio Labels: https://istio.io/docs/ops/deployment/requirements/
app: curator
{{- if .Values.curator.podLabels }}
{{- include "common.tplvalues.render" (dict "value" .Values.curator.podLabels "context" $) | nindent 8 }}
{{- end }}
spec:
template:
metadata:
labels: {{- include "common.labels.standard" . | nindent 12 }}
app.kubernetes.io/component: curator
## Istio Labels: https://istio.io/docs/ops/deployment/requirements/
app: curator
{{- if .Values.curator.podAnnotations }}
annotations: {{- toYaml .Values.curator.podAnnotations | nindent 12 }}
{{- end }}
spec:
volumes:
- name: config-volume
configMap:
name: {{ template "elasticsearch.curator.fullname" . }}
{{- if .Values.curator.extraVolumes }}
{{- toYaml .Values.curator.extraVolumes | nindent 12 }}
{{- end }}
restartPolicy: {{ .Values.curator.cronjob.jobRestartPolicy }}
{{- if .Values.curator.priorityClassName }}
priorityClassName: {{ .Values.curator.priorityClassName | quote }}
{{- end }}
{{- include "elasticsearch.imagePullSecrets" . | indent 10 }}
{{- $initContainers := coalesce .Values.curator.initContainers .Values.curator.extraInitContainers -}}
{{- if $initContainers }}
initContainers: {{- include "common.tplvalues.render" (dict "value" $initContainers "context" $) | nindent 8 }}
{{- end }}
{{- if .Values.curator.schedulerName }}
schedulerName: {{ .Values.curator.schedulerName }}
{{- end }}
{{- if .Values.curator.rbac.enabled }}
serviceAccountName: {{ include "elasticsearch.curator.serviceAccountName" . }}
{{- end }}
{{- if .Values.curator.affinity }}
affinity: {{- include "common.tplvalues.render" (dict "value" .Values.curator.affinity "context" $) | nindent 12 }}
{{- else }}
affinity:
podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.curator.podAffinityPreset "component" "curator" "context" $) | nindent 14 }}
podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.curator.podAntiAffinityPreset "component" "curator" "context" $) | nindent 14 }}
nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.curator.nodeAffinityPreset.type "key" .Values.curator.nodeAffinityPreset.key "values" .Values.curator.nodeAffinityPreset.values) | nindent 14 }}
{{- end }}
{{- if .Values.curator.nodeSelector }}
nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.curator.nodeSelector "context" $) | nindent 12 }}
{{- end }}
{{- if .Values.curator.tolerations }}
tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.curator.tolerations "context" $) | nindent 12 }}
{{- end }}
{{- if .Values.curator.topologySpreadConstraints }}
topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.curator.topologySpreadConstraints "context" $) | nindent 12 }}
{{- end }}
{{- if .Values.curator.securityContext }}
securityContext: {{- toYaml .Values.curator.securityContext | nindent 12 }}
{{- end }}
containers:
- name: {{ template "elasticsearch.curator.fullname" . }}
image: {{ template "elasticsearch.curator.image" . }}
imagePullPolicy: {{ .Values.curator.image.pullPolicy | quote }}
volumeMounts:
- name: config-volume
mountPath: /etc/es-curator
{{- if .Values.curator.extraVolumeMounts }}
{{- toYaml .Values.curator.extraVolumeMounts | nindent 16 }}
{{- end }}
{{- if .Values.diagnosticMode.enabled }}
command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 16 }}
{{ else if .Values.curator.command }}
command: {{ toYaml .Values.curator.command | nindent 16 }}
{{- end }}
{{- if .Values.diagnosticMode.enabled }}
args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 16 }}
{{- else if .Values.curator.dryrun }}
args: [ "--dry-run", "--config", "/etc/es-curator/config.yml", "/etc/es-curator/action_file.yml" ]
{{- else }}
args: [ "--config", "/etc/es-curator/config.yml", "/etc/es-curator/action_file.yml" ]
{{- end }}
env:
{{- if .Values.curator.env }}
{{- range $key,$value := .Values.curator.env }}
- name: {{ $key | upper | quote}}
value: {{ $value | quote}}
{{- end }}
{{- end }}
{{- if .Values.curator.envFromSecrets }}
{{- range $key,$value := .Values.curator.envFromSecrets }}
- name: {{ $key | upper | quote}}
valueFrom:
secretKeyRef:
name: {{ $value.from.secret | quote}}
key: {{ $value.from.key | quote}}
{{- end }}
{{- end }}
{{- if .Values.curator.resources }}
resources: {{- toYaml .Values.curator.resources | nindent 16 }}
{{- end }}
{{- if .Values.curator.sidecars }}
{{- include "common.tplvalues.render" ( dict "value" .Values.curator.sidecars "context" $) | nindent 12 }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,35 @@
{{- if .Values.data.autoscaling.enabled }}
apiVersion: autoscaling/v2beta1
kind: HorizontalPodAutoscaler
metadata:
name: {{ include "elasticsearch.data.fullname" . }}
namespace: {{ .Release.Namespace | quote }}
labels: {{- include "common.labels.standard" . | nindent 4 }}
app.kubernetes.io/component: data
{{- if .Values.commonLabels }}
{{- include "common.tplvalues.render" (dict "value" .Values.commonLabels "context" $) | nindent 4 }}
{{- end }}
{{- if .Values.commonAnnotations }}
annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
{{- end }}
spec:
scaleTargetRef:
apiVersion: {{ template "common.capabilities.statefulset.apiVersion" . }}
kind: StatefulSet
name: {{ include "elasticsearch.data.fullname" . }}
minReplicas: {{ .Values.data.autoscaling.minReplicas }}
maxReplicas: {{ .Values.data.autoscaling.maxReplicas }}
metrics:
{{- if .Values.data.autoscaling.targetCPU }}
- type: Resource
resource:
name: cpu
targetAverageUtilization: {{ .Values.data.autoscaling.targetCPU }}
{{- end }}
{{- if .Values.data.autoscaling.targetMemory }}
- type: Resource
resource:
name: memory
targetAverageUtilization: {{ .Values.data.autoscaling.targetMemory }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,326 @@
apiVersion: {{ template "common.capabilities.statefulset.apiVersion" . }}
kind: StatefulSet
metadata:
name: {{ include "elasticsearch.data.fullname" . }}
labels: {{- include "common.labels.standard" . | nindent 4 }}
app.kubernetes.io/component: data
## Istio Labels: https://istio.io/docs/ops/deployment/requirements/
app: data
spec:
updateStrategy:
type: {{ .Values.data.updateStrategy.type }}
{{- if (eq "OnDelete" .Values.data.updateStrategy.type) }}
rollingUpdate: null
{{- else if .Values.data.updateStrategy.rollingUpdatePartition }}
rollingUpdate:
partition: {{ .Values.data.updateStrategy.rollingUpdatePartition }}
{{- end }}
selector:
matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }}
app.kubernetes.io/component: data
podManagementPolicy: Parallel
{{- if not .Values.data.autoscaling.enabled }}
replicas: {{ .Values.data.replicas }}
{{- end }}
serviceName: {{ template "elasticsearch.data.fullname" . }}
template:
metadata:
labels: {{- include "common.labels.standard" . | nindent 8 }}
app.kubernetes.io/component: data
## Istio Labels: https://istio.io/docs/ops/deployment/requirements/
app: data
{{- if .Values.data.podLabels }}
{{- include "common.tplvalues.render" (dict "value" .Values.data.podLabels "context" $) | nindent 8 }}
{{- end }}
annotations:
{{- if and (include "elasticsearch.createTlsSecret" .) (not .Values.security.tls.data.existingSecret) }}
checksum/tls: {{ include (print $.Template.BasePath "/tls-secret.yaml") . | sha256sum }}
{{- end }}
{{- if .Values.data.podAnnotations }}
{{- include "common.tplvalues.render" (dict "value" .Values.data.podAnnotations "context" $) | nindent 8 }}
{{- end }}
spec:
{{- include "elasticsearch.imagePullSecrets" . | nindent 6 }}
{{- if .Values.data.priorityClassName }}
priorityClassName: {{ .Values.data.priorityClassName | quote }}
{{- end }}
{{- if .Values.data.affinity }}
affinity: {{- include "common.tplvalues.render" (dict "value" .Values.data.affinity "context" $) | nindent 8 }}
{{- else }}
affinity:
podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.data.podAffinityPreset "component" "data" "context" $) | nindent 10 }}
podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.data.podAntiAffinityPreset "component" "data" "context" $) | nindent 10 }}
nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.data.nodeAffinityPreset.type "key" .Values.data.nodeAffinityPreset.key "values" .Values.data.nodeAffinityPreset.values) | nindent 10 }}
{{- end }}
{{- if .Values.data.hostAliases }}
hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.data.hostAliases "context" $) | nindent 8 }}
{{- end }}
{{- if .Values.data.schedulerName }}
schedulerName: {{ .Values.data.schedulerName }}
{{- end }}
{{- if .Values.data.nodeSelector }}
nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.data.nodeSelector "context" $) | nindent 8 }}
{{- end }}
{{- if .Values.data.tolerations }}
tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.data.tolerations "context" $) | nindent 8 }}
{{- end }}
{{- if .Values.data.topologySpreadConstraints }}
topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.data.topologySpreadConstraints "context" $) | nindent 8 }}
{{- end }}
serviceAccountName: {{ template "elasticsearch.data.serviceAccountName" . }}
{{- if or .Values.data.podSecurityContext.enabled .Values.data.securityContext.enabled }}
securityContext:
{{- if .Values.data.podSecurityContext.enabled }}
{{- omit .Values.data.podSecurityContext "enabled" | toYaml | nindent 8 }}
{{- else }}
fsGroup: {{ .Values.data.securityContext.fsGroup }}
{{- end }}
{{- end }}
{{- if or .Values.data.initContainers .Values.sysctlImage.enabled (and .Values.volumePermissions.enabled .Values.data.persistence.enabled) }}
initContainers:
{{- if .Values.sysctlImage.enabled }}
## Image that performs the sysctl operation to modify Kernel settings (needed sometimes to avoid boot errors)
- name: sysctl
image: {{ include "elasticsearch.sysctl.image" . }}
imagePullPolicy: {{ .Values.image.pullPolicy | quote }}
command:
- /bin/bash
- -ec
- |
{{- include "elasticsearch.sysctlIfLess" (dict "key" "vm.max_map_count" "value" "262144") | nindent 14 }}
{{- include "elasticsearch.sysctlIfLess" (dict "key" "fs.file-max" "value" "65536") | nindent 14 }}
securityContext:
privileged: true
{{- if .Values.sysctlImage.resources }}
resources: {{- toYaml .Values.sysctlImage.resources | nindent 12 }}
{{- end }}
{{- end }}
{{- if and .Values.volumePermissions.enabled .Values.data.persistence.enabled }}
- name: volume-permissions
image: {{ include "elasticsearch.volumePermissions.image" . }}
imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }}
command:
- /bin/bash
- -ec
- |
chown -R {{ .Values.data.securityContext.runAsUser }}:{{ .Values.data.securityContext.fsGroup }} //bitnami/elasticsearch/data
securityContext:
runAsUser: 0
{{- if .Values.volumePermissions.resources }}
resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }}
{{- end }}
volumeMounts:
- name: data
mountPath: "/bitnami/elasticsearch/data"
{{- end }}
{{- if .Values.data.initContainers }}
{{- include "common.tplvalues.render" (dict "value" .Values.data.initContainers "context" $) | nindent 8 }}
{{- end }}
{{- end }}
containers:
- name: elasticsearch
image: {{ include "elasticsearch.image" . }}
imagePullPolicy: {{ .Values.image.pullPolicy | quote }}
{{- if or .Values.data.containerSecurityContext.enabled .Values.data.securityContext.enabled }}
securityContext:
{{- if .Values.data.containerSecurityContext.enabled }}
{{- omit .Values.data.containerSecurityContext "enabled" | toYaml | nindent 12 }}
{{- else }}
runAsUser: {{ .Values.data.securityContext.runAsUser }}
{{- end }}
{{- end }}
{{- if .Values.diagnosticMode.enabled }}
command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }}
args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }}
{{- end }}
env:
- name: MY_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: BITNAMI_DEBUG
value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }}
- name: ELASTICSEARCH_CLUSTER_NAME
value: {{ .Values.name | quote }}
- name: ELASTICSEARCH_CLUSTER_HOSTS
value: {{ include "elasticsearch.hosts" . | quote }}
- name: ELASTICSEARCH_TOTAL_NODES
value: {{ add (ternary .Values.master.autoscaling.minReplicas .Values.master.replicas .Values.master.autoscaling.enabled) (ternary .Values.data.autoscaling.minReplicas .Values.data.replicas .Values.data.autoscaling.enabled) | quote }}
{{- if .Values.plugins }}
- name: ELASTICSEARCH_PLUGINS
value: {{ .Values.plugins | quote }}
{{- end }}
{{- if .Values.snapshotRepoPath }}
- name: ELASTICSEARCH_FS_SNAPSHOT_REPO_PATH
value: {{ .Values.snapshotRepoPath | quote }}
{{- end }}
- name: ELASTICSEARCH_HEAP_SIZE
value: {{ .Values.data.heapSize | quote }}
- name: ELASTICSEARCH_IS_DEDICATED_NODE
value: "yes"
- name: ELASTICSEARCH_NODE_TYPE
value: "data"
- name: ELASTICSEARCH_ADVERTISED_HOSTNAME
value: "$(MY_POD_NAME).{{ include "elasticsearch.data.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}"
{{- if .Values.security.enabled }}
{{- include "elasticsearch.configure.security" . | nindent 12 }}
{{- end }}
{{- if .Values.extraEnvVars }}
{{- include "common.tplvalues.render" ( dict "value" .Values.extraEnvVars "context" $ ) | nindent 12 }}
{{- end }}
{{- if or .Values.extraEnvVarsConfigMap .Values.extraEnvVarsSecret }}
envFrom:
{{- if .Values.extraEnvVarsConfigMap }}
- configMapRef:
name: {{ include "common.tplvalues.render" ( dict "value" .Values.extraEnvVarsConfigMap "context" $ ) }}
{{- end }}
{{- if .Values.extraEnvVarsSecret }}
- secretRef:
name: {{ include "common.tplvalues.render" ( dict "value" .Values.extraEnvVarsSecret "context" $ ) }}
{{- end }}
{{- end }}
ports:
- name: http
containerPort: 9200
- name: transport
containerPort: 9300
{{- if not .Values.diagnosticMode.enabled }}
{{- if .Values.data.startupProbe.enabled }}
startupProbe:
initialDelaySeconds: {{ .Values.data.startupProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.data.startupProbe.periodSeconds }}
timeoutSeconds: {{ .Values.data.startupProbe.timeoutSeconds }}
successThreshold: {{ .Values.data.startupProbe.successThreshold }}
failureThreshold: {{ .Values.data.startupProbe.failureThreshold }}
exec:
command:
- /opt/bitnami/scripts/elasticsearch/healthcheck.sh
{{- else if .Values.data.customStartupProbe }}
startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.data.customStartupProbe "context" $) | nindent 12 }}
{{- end }}
{{- if .Values.data.livenessProbe.enabled }}
livenessProbe:
initialDelaySeconds: {{ .Values.data.livenessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.data.livenessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.data.livenessProbe.timeoutSeconds }}
successThreshold: {{ .Values.data.livenessProbe.successThreshold }}
failureThreshold: {{ .Values.data.livenessProbe.failureThreshold }}
exec:
command:
- /opt/bitnami/scripts/elasticsearch/healthcheck.sh
{{- else if .Values.data.customLivenessProbe }}
livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.data.customLivenessProbe "context" $) | nindent 12 }}
{{- end }}
{{- if .Values.data.readinessProbe.enabled }}
readinessProbe:
initialDelaySeconds: {{ .Values.data.readinessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.data.readinessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.data.readinessProbe.timeoutSeconds }}
successThreshold: {{ .Values.data.readinessProbe.successThreshold }}
failureThreshold: {{ .Values.data.readinessProbe.failureThreshold }}
exec:
command:
- /opt/bitnami/scripts/elasticsearch/healthcheck.sh
{{- else if .Values.data.customReadinessProbe }}
readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.data.customReadinessProbe "context" $) | nindent 12 }}
{{- end }}
{{- end }}
{{- if .Values.data.resources }}
resources: {{- toYaml .Values.data.resources | nindent 12 }}
{{- end }}
volumeMounts:
{{- if .Values.config }}
- mountPath: /opt/bitnami/elasticsearch/config/elasticsearch.yml
name: "config"
subPath: elasticsearch.yml
{{- end }}
{{- if .Values.extraConfig }}
- mountPath: /opt/bitnami/elasticsearch/config/my_elasticsearch.yml
name: config
subPath: my_elasticsearch.yml
{{- end }}
- name: "data"
mountPath: "/bitnami/elasticsearch/data"
{{- if .Values.security.enabled }}
- name: elasticsearch-certificates
mountPath: /opt/bitnami/elasticsearch/config/certs
readOnly: true
{{- end }}
{{- if .Values.extraVolumeMounts }}
{{- toYaml .Values.extraVolumeMounts | nindent 12 }}
{{- end }}
{{- if .Values.initScripts }}
- name: custom-init-scripts
mountPath: /docker-entrypoint-initdb.d/init-scripts
{{- end }}
{{- if .Values.initScriptsCM }}
- name: custom-init-scripts-cm
mountPath: /docker-entrypoint-initdb.d/init-scripts-cm
{{- end }}
{{- if .Values.initScriptsSecret }}
- name: custom-init-scripts-secret
mountPath: /docker-entrypoint-initdb.d/init-scripts-secret
{{- end }}
{{- if .Values.data.sidecars }}
{{- include "common.tplvalues.render" ( dict "value" .Values.data.sidecars "context" $) | nindent 8 }}
{{- end }}
volumes:
{{- if or .Values.config .Values.extraConfig }}
- name: "config"
configMap:
name: {{ template "common.names.fullname" . }}
{{- end }}
{{- if .Values.security.enabled }}
- name: elasticsearch-certificates
secret:
secretName: {{ include "elasticsearch.data.tlsSecretName" . }}
defaultMode: 256
{{- end }}
{{- if .Values.extraVolumes }}
{{- toYaml .Values.extraVolumes | nindent 8 }}
{{- end }}
{{- if .Values.initScripts }}
- name: custom-init-scripts
configMap:
name: {{ template "elasticsearch.initScripts" . }}
{{- end }}
{{- if .Values.initScriptsCM }}
- name: custom-init-scripts-cm
configMap:
name: {{ template "elasticsearch.initScriptsCM" . }}
{{- end }}
{{- if .Values.initScriptsSecret }}
- name: custom-init-scripts-secret
secret:
secretName: {{ template "elasticsearch.initScriptsSecret" . }}
defaultMode: 0755
{{- end }}
{{- if not .Values.data.persistence.enabled }}
- name: "data"
emptyDir: {}
{{- else if .Values.data.persistence.existingClaim }}
- name: "data"
persistentVolumeClaim:
claimName: {{ .Values.data.persistence.existingClaim }}
{{- else }}
volumeClaimTemplates:
- metadata:
name: "data"
{{- if .Values.data.persistence.annotations }}
annotations: {{- toYaml .Values.data.persistence.annotations | nindent 10 }}
{{- end }}
spec:
accessModes: {{- toYaml .Values.data.persistence.accessModes | nindent 10 }}
{{ $storage := dict "global" .Values.global "local" .Values.data }}
{{ include "elasticsearch.storageClass" $storage }}
resources:
requests:
storage: {{ .Values.data.persistence.size | quote }}
{{- if .Values.data.persistence.selector }}
selector: {{- include "common.tplvalues.render" (dict "value" .Values.data.persistence.selector "context" $) | nindent 10 }}
{{- else if .Values.data.persistence.existingVolume }}
selector:
matchLabels:
volume: {{ .Values.data.persistence.existingVolume }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,19 @@
apiVersion: v1
kind: Service
metadata:
name: {{ include "elasticsearch.data.fullname" . }}
labels: {{- include "common.labels.standard" . | nindent 4 }}
app.kubernetes.io/component: data
spec:
type: ClusterIP
publishNotReadyAddresses: true
ports:
- name: http
port: 9200
targetPort: http
- name: tcp-transport
port: 9300
targetPort: transport
nodePort: null
selector: {{- include "common.labels.matchLabels" . | nindent 4 }}
app.kubernetes.io/component: data

View File

@ -0,0 +1,4 @@
{{- range .Values.extraDeploy }}
---
{{ include "common.tplvalues.render" (dict "value" . "context" $) }}
{{- end }}

View File

@ -0,0 +1,73 @@
{{- if .Values.curator.enabled }}
{{- range $kind, $enabled := .Values.curator.hooks }}
{{- if $enabled }}
---
apiVersion: batch/v1
kind: Job
metadata:
name: {{ template "elasticsearch.curator.fullname" . }}-curator-on-{{ $kind }}
labels: {{- include "common.labels.standard" . | nindent 4 }}
app.kubernetes.io/component: curator
## Istio Labels: https://istio.io/docs/ops/deployment/requirements/
app: curator
annotations:
"helm.sh/hook": post-{{ $kind }}
"helm.sh/hook-weight": "1"
{{- if $.Values.cronjob.annotations }}
{{- toYaml $.Values.cronjob.annotations | nindent 4 }}
{{- end }}
spec:
template:
metadata:
labels: {{- include "common.labels.standard" . | nindent 10 }}
app.kubernetes.io/component: curator
## Istio Labels: https://istio.io/docs/ops/deployment/requirements/
app: curator
{{- if $.Values.podAnnotations }}
annotations: {{- toYaml $.Values.podAnnotations | nindent 8 }}
{{- end }}
spec:
restartPolicy: Never
{{- if $.Values.curator.priorityClassName }}
priorityClassName: {{ $.Values.curator.priorityClassName | quote }}
{{- end }}
{{- if .Values.curator.schedulerName }}
schedulerName: {{ .Values.curator.schedulerName }}
{{- end }}
{{- if $.Values.curator.affinity }}
affinity: {{- include "common.tplvalues.render" (dict "value" $.Values.curator.affinity "context" $) | nindent 8 }}
{{- end }}
{{- if $.Values.curator.nodeSelector }}
nodeSelector: {{- include "common.tplvalues.render" (dict "value" $.Values.curator.nodeSelector "context" $) | nindent 8 }}
{{- end }}
{{- if $.Values.curator.tolerations }}
tolerations: {{- include "common.tplvalues.render" (dict "value" $.Values.curator.tolerations "context" $) | nindent 8 }}
{{- end }}
containers:
- name: {{ template "elasticsearch.curator.fullname" . }}
image: {{ template "elasticsearch.curator.image" . }}
imagePullPolicy: {{ .Values.curator.image.pullPolicy | quote }}
{{- if .Values.diagnosticMode.enabled }}
command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }}
args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }}
{{- else }}
command: [ "curator" ]
args: [ "--config", "/etc/es-curator/config.yml", "/etc/es-curator/action_file.yml" ]
{{- end }}
resources: {{- toYaml $.Values.curator.resources | nindent 12 }}
volumeMounts:
- name: config-volume
mountPath: /etc/es-curator
{{- if $.Values.curator.extraVolumeMounts }}
{{- toYaml $.Values.curator.extraVolumeMounts | nindent 12 }}
{{- end }}
volumes:
- name: config-volume
configMap:
name: {{ template "elasticsearch.curator.fullname" . }}
{{- if $.Values.curator.extraVolumes }}
{{- toYaml $.Values.curator.extraVolumes | nindent 8 }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,280 @@
{{- if .Values.ingest.enabled }}
apiVersion: {{ template "common.capabilities.statefulset.apiVersion" . }}
kind: StatefulSet
metadata:
name: {{ include "elasticsearch.ingest.fullname" . }}
labels: {{- include "common.labels.standard" . | nindent 4 }}
app.kubernetes.io/component: ingest
## Istio Labels: https://istio.io/docs/ops/deployment/requirements/
app: ingest
spec:
updateStrategy:
type: {{ .Values.ingest.updateStrategy.type }}
{{- if (eq "OnDelete" .Values.ingest.updateStrategy.type) }}
rollingUpdate: null
{{- end }}
selector:
matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }}
app.kubernetes.io/component: ingest
podManagementPolicy: Parallel
replicas: {{ .Values.ingest.replicas }}
serviceName: {{ template "elasticsearch.ingest.fullname" . }}
template:
metadata:
labels: {{- include "common.labels.standard" . | nindent 8 }}
app.kubernetes.io/component: ingest
## Istio Labels: https://istio.io/docs/ops/deployment/requirements/
app: ingest
{{- if .Values.ingest.podLabels }}
{{- include "common.tplvalues.render" (dict "value" .Values.ingest.podLabels "context" $) | nindent 8 }}
{{- end }}
annotations:
{{- if and (include "elasticsearch.createTlsSecret" .) (not .Values.security.tls.ingest.existingSecret) }}
checksum/tls: {{ include (print $.Template.BasePath "/tls-secret.yaml") . | sha256sum }}
{{- end }}
{{- if .Values.ingest.podAnnotations }}
{{- include "common.tplvalues.render" (dict "value" .Values.ingest.podAnnotations "context" $) | nindent 8 }}
{{- end }}
spec:
{{- include "elasticsearch.imagePullSecrets" . | nindent 6 }}
{{- if .Values.ingest.hostAliases }}
hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.ingest.hostAliases "context" $) | nindent 8 }}
{{- end }}
{{- if .Values.ingest.schedulerName }}
schedulerName: {{ .Values.ingest.schedulerName }}
{{- end }}
{{- if .Values.ingest.priorityClassName }}
priorityClassName: {{ .Values.ingest.priorityClassName | quote }}
{{- end }}
{{- if .Values.ingest.affinity }}
affinity: {{- include "common.tplvalues.render" (dict "value" .Values.ingest.affinity "context" $) | nindent 8 }}
{{- else }}
affinity:
podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.ingest.podAffinityPreset "component" "ingest" "context" $) | nindent 10 }}
podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.ingest.podAntiAffinityPreset "component" "ingest" "context" $) | nindent 10 }}
nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.ingest.nodeAffinityPreset.type "key" .Values.ingest.nodeAffinityPreset.key "values" .Values.ingest.nodeAffinityPreset.values) | nindent 10 }}
{{- end }}
{{- if .Values.ingest.nodeSelector }}
nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.ingest.nodeSelector "context" $) | nindent 8 }}
{{- end }}
{{- if .Values.ingest.tolerations }}
tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.ingest.tolerations "context" $) | nindent 8 }}
{{- end }}
{{- if .Values.ingest.topologySpreadConstraints }}
topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.ingest.topologySpreadConstraints "context" $) | nindent 8 }}
{{- end }}
serviceAccountName: {{ template "elasticsearch.ingest.serviceAccountName" . }}
{{- if or .Values.ingest.podSecurityContext.enabled .Values.ingest.securityContext.enabled }}
securityContext:
{{- if .Values.ingest.podSecurityContext.enabled }}
{{- omit .Values.ingest.podSecurityContext "enabled" | toYaml | nindent 8 }}
{{- else }}
fsGroup: {{ .Values.ingest.securityContext.fsGroup }}
{{- end }}
{{- end }}
{{- if or .Values.ingest.initContainers .Values.sysctlImage.enabled }}
initContainers:
{{- if .Values.sysctlImage.enabled }}
## Image that performs the sysctl operation to modify Kernel settings (needed sometimes to avoid boot errors)
- name: sysctl
image: {{ include "elasticsearch.sysctl.image" . }}
imagePullPolicy: {{ .Values.image.pullPolicy | quote }}
command:
- /bin/bash
- -ec
- |
{{- include "elasticsearch.sysctlIfLess" (dict "key" "vm.max_map_count" "value" "262144") | nindent 14 }}
{{- include "elasticsearch.sysctlIfLess" (dict "key" "fs.file-max" "value" "65536") | nindent 14 }}
securityContext:
privileged: true
{{- if .Values.sysctlImage.resources }}
resources: {{- toYaml .Values.sysctlImage.resources | nindent 12 }}
{{- end }}
{{- end }}
{{- if .Values.ingest.initContainers }}
{{- include "common.tplvalues.render" (dict "value" .Values.ingest.initContainers "context" $) | nindent 8 }}
{{- end }}
{{- end }}
containers:
- name: elasticsearch
image: {{ include "elasticsearch.image" . }}
imagePullPolicy: {{ .Values.image.pullPolicy | quote }}
{{- if or .Values.ingest.containerSecurityContext.enabled .Values.ingest.securityContext.enabled }}
securityContext:
{{- if .Values.ingest.containerSecurityContext.enabled }}
{{- omit .Values.ingest.containerSecurityContext "enabled" | toYaml | nindent 12 }}
{{- else }}
runAsUser: {{ .Values.ingest.securityContext.runAsUser }}
{{- end }}
{{- end }}
{{- if .Values.diagnosticMode.enabled }}
command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }}
args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }}
{{- end }}
env:
- name: MY_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: BITNAMI_DEBUG
value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }}
- name: ELASTICSEARCH_CLUSTER_NAME
value: {{ .Values.name | quote }}
- name: ELASTICSEARCH_CLUSTER_HOSTS
value: {{ include "elasticsearch.hosts" . | quote }}
- name: ELASTICSEARCH_TOTAL_NODES
value: {{ add (ternary .Values.master.autoscaling.minReplicas .Values.master.replicas (eq .Values.master.autoscaling.enabled true)) (ternary .Values.data.autoscaling.minReplicas .Values.data.replicas (eq .Values.data.autoscaling.enabled true)) | quote }}
- name: ELASTICSEARCH_CLUSTER_MASTER_HOSTS
{{- $elasticsearchMasterFullname := include "elasticsearch.master.fullname" . }}
{{- $replicas := int (ternary .Values.master.autoscaling.minReplicas .Values.master.replicas (eq .Values.master.autoscaling.enabled true)) }}
value: {{range $i, $e := until $replicas }}{{ $elasticsearchMasterFullname }}-{{ $e }} {{ end }}
- name: ELASTICSEARCH_MINIMUM_MASTER_NODES
value: {{ add (div (ternary .Values.master.autoscaling.minReplicas .Values.master.replicas (eq .Values.master.autoscaling.enabled true)) 2) 1 | quote }}
- name: ELASTICSEARCH_ADVERTISED_HOSTNAME
value: "$(MY_POD_NAME).{{ include "elasticsearch.ingest.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}"
{{- if .Values.plugins }}
- name: ELASTICSEARCH_PLUGINS
value: {{ .Values.plugins | quote }}
{{- end }}
- name: ELASTICSEARCH_HEAP_SIZE
value: {{ .Values.ingest.heapSize | quote }}
- name: ELASTICSEARCH_IS_DEDICATED_NODE
value: "yes"
- name: ELASTICSEARCH_NODE_TYPE
value: "ingest"
{{- if .Values.security.enabled }}
{{- include "elasticsearch.configure.security" . | nindent 12 }}
{{- end }}
{{- if .Values.extraEnvVars }}
{{- include "common.tplvalues.render" ( dict "value" .Values.extraEnvVars "context" $ ) | nindent 12 }}
{{- end }}
{{- if or .Values.extraEnvVarsConfigMap .Values.extraEnvVarsSecret }}
envFrom:
{{- if .Values.extraEnvVarsConfigMap }}
- configMapRef:
name: {{ include "common.tplvalues.render" ( dict "value" .Values.extraEnvVarsConfigMap "context" $ ) }}
{{- end }}
{{- if .Values.extraEnvVarsSecret }}
- secretRef:
name: {{ include "common.tplvalues.render" ( dict "value" .Values.extraEnvVarsSecret "context" $ ) }}
{{- end }}
{{- end }}
ports:
- name: http
containerPort: 9200
- name: transport
containerPort: 9300
{{- if not .Values.diagnosticMode.enabled }}
{{- if .Values.ingest.startupProbe.enabled }}
startupProbe:
initialDelaySeconds: {{ .Values.ingest.startupProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.ingest.startupProbe.periodSeconds }}
timeoutSeconds: {{ .Values.ingest.startupProbe.timeoutSeconds }}
successThreshold: {{ .Values.ingest.startupProbe.successThreshold }}
failureThreshold: {{ .Values.ingest.startupProbe.failureThreshold }}
exec:
command:
- /opt/bitnami/scripts/elasticsearch/healthcheck.sh
{{- else if .Values.ingest.customStartupProbe }}
startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.ingest.customStartupProbe "context" $) | nindent 12 }}
{{- end }}
{{- if .Values.ingest.livenessProbe.enabled }}
livenessProbe:
initialDelaySeconds: {{ .Values.ingest.livenessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.ingest.livenessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.ingest.livenessProbe.timeoutSeconds }}
successThreshold: {{ .Values.ingest.livenessProbe.successThreshold }}
failureThreshold: {{ .Values.ingest.livenessProbe.failureThreshold }}
exec:
command:
- /opt/bitnami/scripts/elasticsearch/healthcheck.sh
{{- else if .Values.ingest.customLivenessProbe }}
livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.ingest.customLivenessProbe "context" $) | nindent 12 }}
{{- end }}
{{- if .Values.ingest.readinessProbe.enabled }}
readinessProbe:
initialDelaySeconds: {{ .Values.ingest.readinessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.ingest.readinessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.ingest.readinessProbe.timeoutSeconds }}
successThreshold: {{ .Values.ingest.readinessProbe.successThreshold }}
failureThreshold: {{ .Values.ingest.readinessProbe.failureThreshold }}
exec:
command:
- /opt/bitnami/scripts/elasticsearch/healthcheck.sh
{{- else if .Values.ingest.customReadinessProbe }}
readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.ingest.customReadinessProbe "context" $) | nindent 12 }}
{{- end }}
{{- end }}
{{- if .Values.ingest.resources }}
resources: {{- toYaml .Values.ingest.resources | nindent 12 }}
{{- end }}
volumeMounts:
{{- if .Values.config }}
- mountPath: /opt/bitnami/elasticsearch/config/elasticsearch.yml
name: config
subPath: elasticsearch.yml
{{- end }}
- name: data
mountPath: /bitnami/elasticsearch/data
{{- if .Values.security.enabled }}
- name: elasticsearch-certificates
mountPath: /opt/bitnami/elasticsearch/config/certs
readOnly: true
{{- end }}
{{- if .Values.extraConfig }}
- mountPath: /opt/bitnami/elasticsearch/config/my_elasticsearch.yml
name: config
subPath: my_elasticsearch.yml
{{- end }}
{{- if .Values.extraVolumeMounts }}
{{- toYaml .Values.extraVolumeMounts | nindent 12 }}
{{- end }}
{{- if .Values.initScripts }}
- name: custom-init-scripts
mountPath: /docker-entrypoint-initdb.d/init-scripts
{{- end }}
{{- if .Values.initScriptsCM }}
- name: custom-init-scripts-cm
mountPath: /docker-entrypoint-initdb.d/init-scripts-cm
{{- end }}
{{- if .Values.initScriptsSecret }}
- name: custom-init-scripts-secret
mountPath: /docker-entrypoint-initdb.d/init-scripts-secret
{{- end }}
{{- if .Values.ingest.sidecars }}
{{- include "common.tplvalues.render" ( dict "value" .Values.ingest.sidecars "context" $) | nindent 8 }}
{{- end }}
volumes:
{{- if or .Values.config .Values.extraConfig }}
- name: config
configMap:
name: {{ include "common.names.fullname" . }}
{{- end }}
{{- if .Values.security.enabled }}
- name: elasticsearch-certificates
secret:
secretName: {{ include "elasticsearch.ingest.tlsSecretName" . }}
defaultMode: 256
{{- end }}
{{- if .Values.extraVolumes }}
{{- toYaml .Values.extraVolumes | nindent 8 }}
{{- end }}
{{- if .Values.initScripts }}
- name: custom-init-scripts
configMap:
name: {{ template "elasticsearch.initScripts" . }}
{{- end }}
{{- if .Values.initScriptsCM }}
- name: custom-init-scripts-cm
configMap:
name: {{ template "elasticsearch.initScriptsCM" . }}
{{- end }}
{{- if .Values.initScriptsSecret }}
- name: custom-init-scripts-secret
secret:
secretName: {{ template "elasticsearch.initScriptsSecret" . }}
defaultMode: 0755
{{- end }}
- name: "data"
emptyDir: {}
{{- end }}

View File

@ -0,0 +1,29 @@
{{- if .Values.ingest.enabled }}
apiVersion: v1
kind: Service
metadata:
name: {{ include "elasticsearch.ingest.fullname" . }}
labels: {{- include "common.labels.standard" . | nindent 4 }}
app.kubernetes.io/component: ingest
annotations: {{ include "common.tplvalues.render" ( dict "value" .Values.ingest.service.annotations "context" $) | nindent 4 }}
spec:
type: {{ .Values.ingest.service.type | quote }}
{{- if and (eq .Values.ingest.service.type "LoadBalancer") (not (empty .Values.ingest.service.loadBalancerIP)) }}
loadBalancerIP: {{ .Values.ingest.service.loadBalancerIP }}
{{- end }}
publishNotReadyAddresses: true
ports:
- name: http
port: 9200
targetPort: http
- name: tcp-transport
port: {{ .Values.ingest.service.port }}
targetPort: transport
{{- if and (or (eq .Values.ingest.service.type "NodePort") (eq .Values.ingest.service.type "LoadBalancer")) (not (empty .Values.ingest.service.nodePort)) }}
nodePort: {{ .Values.ingest.service.nodePort }}
{{- else if eq .Values.ingest.service.type "ClusterIP" }}
nodePort: null
{{- end }}
selector: {{- include "common.labels.matchLabels" . | nindent 4 }}
app.kubernetes.io/component: ingest
{{- end }}

View File

@ -0,0 +1,35 @@
{{- if .Values.master.autoscaling.enabled }}
apiVersion: autoscaling/v2beta1
kind: HorizontalPodAutoscaler
metadata:
name: {{ include "elasticsearch.master.fullname" . }}
namespace: {{ .Release.Namespace | quote }}
labels: {{- include "common.labels.standard" . | nindent 4 }}
app.kubernetes.io/component: master
{{- if .Values.commonLabels }}
{{- include "common.tplvalues.render" (dict "value" .Values.commonLabels "context" $) | nindent 4 }}
{{- end }}
{{- if .Values.commonAnnotations }}
annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
{{- end }}
spec:
scaleTargetRef:
apiVersion: {{ template "common.capabilities.statefulset.apiVersion" . }}
kind: StatefulSet
name: {{ include "elasticsearch.master.fullname" . }}
minReplicas: {{ .Values.master.autoscaling.minReplicas }}
maxReplicas: {{ .Values.master.autoscaling.maxReplicas }}
metrics:
{{- if .Values.master.autoscaling.targetCPU }}
- type: Resource
resource:
name: cpu
targetAverageUtilization: {{ .Values.master.autoscaling.targetCPU }}
{{- end }}
{{- if .Values.master.autoscaling.targetMemory }}
- type: Resource
resource:
name: memory
targetAverageUtilization: {{ .Values.master.autoscaling.targetMemory }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,329 @@
apiVersion: {{ template "common.capabilities.statefulset.apiVersion" . }}
kind: StatefulSet
metadata:
name: {{ include "elasticsearch.master.fullname" . }}
labels: {{- include "common.labels.standard" . | nindent 4 }}
app.kubernetes.io/component: master
## Istio Labels: https://istio.io/docs/ops/deployment/requirements/
app: master
spec:
updateStrategy:
type: {{ .Values.master.updateStrategy.type }}
{{- if (eq "OnDelete" .Values.master.updateStrategy.type) }}
rollingUpdate: null
{{- end }}
selector:
matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }}
app.kubernetes.io/component: master
podManagementPolicy: Parallel
{{- if not .Values.master.autoscaling.enabled }}
replicas: {{ .Values.master.replicas }}
{{- end }}
serviceName: {{ template "elasticsearch.master.fullname" . }}
template:
metadata:
labels: {{- include "common.labels.standard" . | nindent 8 }}
app.kubernetes.io/component: master
## Istio Labels: https://istio.io/docs/ops/deployment/requirements/
app: master
{{- if .Values.master.podLabels }}
{{- include "common.tplvalues.render" (dict "value" .Values.master.podLabels "context" $) | nindent 8 }}
{{- end }}
annotations:
{{- if and (include "elasticsearch.createTlsSecret" .) (not .Values.security.tls.master.existingSecret) }}
checksum/tls: {{ include (print $.Template.BasePath "/tls-secret.yaml") . | sha256sum }}
{{- end }}
{{- if .Values.master.podAnnotations }}
{{- include "common.tplvalues.render" (dict "value" .Values.master.podAnnotations "context" $) | nindent 8 }}
{{- end }}
spec:
{{- include "elasticsearch.imagePullSecrets" . | nindent 6 }}
{{- if .Values.master.hostAliases }}
hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.master.hostAliases "context" $) | nindent 8 }}
{{- end }}
{{- if .Values.master.schedulerName }}
schedulerName: {{ .Values.master.schedulerName }}
{{- end }}
{{- if .Values.master.priorityClassName }}
priorityClassName: {{ .Values.master.priorityClassName | quote }}
{{- end }}
{{- if .Values.master.affinity }}
affinity: {{- include "common.tplvalues.render" (dict "value" .Values.master.affinity "context" $) | nindent 8 }}
{{- else }}
affinity:
podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.master.podAffinityPreset "component" "master" "context" $) | nindent 10 }}
podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.master.podAntiAffinityPreset "component" "master" "context" $) | nindent 10 }}
nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.master.nodeAffinityPreset.type "key" .Values.master.nodeAffinityPreset.key "values" .Values.master.nodeAffinityPreset.values) | nindent 10 }}
{{- end }}
{{- if .Values.master.nodeSelector }}
nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.master.nodeSelector "context" $) | nindent 8 }}
{{- end }}
{{- if .Values.master.tolerations }}
tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.master.tolerations "context" $) | nindent 8 }}
{{- end }}
{{- if .Values.master.topologySpreadConstraints }}
topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.master.topologySpreadConstraints "context" $) | nindent 8 }}
{{- end }}
serviceAccountName: {{ template "elasticsearch.master.serviceAccountName" . }}
{{- if or .Values.master.podSecurityContext.enabled .Values.master.securityContext.enabled }}
securityContext:
{{- if .Values.master.podSecurityContext.enabled }}
{{- omit .Values.master.podSecurityContext "enabled" | toYaml | nindent 8 }}
{{- else }}
fsGroup: {{ .Values.master.securityContext.fsGroup }}
{{- end }}
{{- end }}
{{- if or .Values.master.initContainers .Values.sysctlImage.enabled (and .Values.volumePermissions.enabled .Values.master.persistence.enabled) }}
initContainers:
{{- if .Values.sysctlImage.enabled }}
## Image that performs the sysctl operation to modify Kernel settings (needed sometimes to avoid boot errors)
- name: sysctl
image: {{ include "elasticsearch.sysctl.image" . }}
imagePullPolicy: {{ .Values.image.pullPolicy | quote }}
command:
- /bin/bash
- -ec
- |
{{- include "elasticsearch.sysctlIfLess" (dict "key" "vm.max_map_count" "value" "262144") | nindent 14 }}
{{- include "elasticsearch.sysctlIfLess" (dict "key" "fs.file-max" "value" "65536") | nindent 14 }}
securityContext:
privileged: true
{{- if .Values.sysctlImage.resources }}
resources: {{- toYaml .Values.sysctlImage.resources | nindent 12 }}
{{- end }}
{{- end }}
{{- if and .Values.volumePermissions.enabled .Values.master.persistence.enabled }}
- name: volume-permissions
image: {{ include "elasticsearch.volumePermissions.image" . }}
imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }}
command:
- /bin/bash
- -ec
- |
chown -R {{ .Values.master.securityContext.runAsUser }}:{{ .Values.master.securityContext.fsGroup }} //bitnami/elasticsearch/data
securityContext:
runAsUser: 0
{{- if .Values.volumePermissions.resources }}
resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }}
{{- end }}
volumeMounts:
- name: data
mountPath: "/bitnami/elasticsearch/data"
{{- end }}
{{- if .Values.master.initContainers }}
{{- include "common.tplvalues.render" (dict "value" .Values.master.initContainers "context" $) | nindent 8 }}
{{- end }}
{{- end }}
containers:
- name: elasticsearch
image: {{ include "elasticsearch.image" . }}
imagePullPolicy: {{ .Values.image.pullPolicy | quote }}
{{- if or .Values.master.containerSecurityContext.enabled .Values.master.securityContext.enabled }}
securityContext:
{{- if .Values.master.containerSecurityContext.enabled }}
{{- omit .Values.master.containerSecurityContext "enabled" | toYaml | nindent 12 }}
{{ else }}
runAsUser: {{ .Values.master.securityContext.runAsUser }}
{{- end }}
{{- end }}
{{- if .Values.diagnosticMode.enabled }}
command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }}
args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }}
{{- end }}
env:
- name: MY_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: BITNAMI_DEBUG
value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }}
- name: ELASTICSEARCH_CLUSTER_NAME
value: {{ .Values.name | quote }}
- name: ELASTICSEARCH_CLUSTER_HOSTS
value: {{ include "elasticsearch.hosts" . | quote }}
- name: ELASTICSEARCH_TOTAL_NODES
value: {{ add (ternary .Values.master.autoscaling.minReplicas .Values.master.replicas .Values.master.autoscaling.enabled) (ternary .Values.data.autoscaling.minReplicas .Values.data.replicas .Values.data.autoscaling.enabled) | quote }}
- name: ELASTICSEARCH_CLUSTER_MASTER_HOSTS
{{- $elasticsearchMasterFullname := include "elasticsearch.master.fullname" . }}
{{- $replicas := int (ternary .Values.master.autoscaling.minReplicas .Values.master.replicas .Values.master.autoscaling.enabled) }}
value: {{range $i, $e := until $replicas }}{{ $elasticsearchMasterFullname }}-{{ $e }} {{ end }}
- name: ELASTICSEARCH_MINIMUM_MASTER_NODES
value: {{ add (div (ternary .Values.master.autoscaling.minReplicas .Values.master.replicas .Values.master.autoscaling.enabled) 2) 1 | quote }}
- name: ELASTICSEARCH_ADVERTISED_HOSTNAME
value: "$(MY_POD_NAME).{{ include "elasticsearch.master.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}"
{{- if .Values.plugins }}
- name: ELASTICSEARCH_PLUGINS
value: {{ .Values.plugins | quote }}
{{- end }}
{{- if .Values.snapshotRepoPath }}
- name: ELASTICSEARCH_FS_SNAPSHOT_REPO_PATH
value: {{ .Values.snapshotRepoPath | quote }}
{{- end }}
- name: ELASTICSEARCH_HEAP_SIZE
value: {{ .Values.master.heapSize | quote }}
- name: ELASTICSEARCH_IS_DEDICATED_NODE
value: "yes"
- name: ELASTICSEARCH_NODE_TYPE
value: "master"
{{- if .Values.security.enabled }}
{{- include "elasticsearch.configure.security" . | nindent 12 }}
{{- end }}
{{- if .Values.extraEnvVars }}
{{- include "common.tplvalues.render" ( dict "value" .Values.extraEnvVars "context" $ ) | nindent 12 }}
{{- end }}
{{- if or .Values.extraEnvVarsConfigMap .Values.extraEnvVarsSecret }}
envFrom:
{{- if .Values.extraEnvVarsConfigMap }}
- configMapRef:
name: {{ include "common.tplvalues.render" ( dict "value" .Values.extraEnvVarsConfigMap "context" $ ) }}
{{- end }}
{{- if .Values.extraEnvVarsSecret }}
- secretRef:
name: {{ include "common.tplvalues.render" ( dict "value" .Values.extraEnvVarsSecret "context" $ ) }}
{{- end }}
{{- end }}
ports:
- name: http
containerPort: 9200
- name: transport
containerPort: 9300
{{- if not .Values.diagnosticMode.enabled }}
{{- if .Values.master.startupProbe.enabled }}
startupProbe:
initialDelaySeconds: {{ .Values.master.startupProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.master.startupProbe.periodSeconds }}
timeoutSeconds: {{ .Values.master.startupProbe.timeoutSeconds }}
successThreshold: {{ .Values.master.startupProbe.successThreshold }}
failureThreshold: {{ .Values.master.startupProbe.failureThreshold }}
exec:
command:
- /opt/bitnami/scripts/elasticsearch/healthcheck.sh
{{- else if .Values.master.customStartupProbe }}
startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.master.customStartupProbe "context" $) | nindent 12 }}
{{- end }}
{{- if .Values.master.livenessProbe.enabled }}
livenessProbe:
initialDelaySeconds: {{ .Values.master.livenessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.master.livenessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.master.livenessProbe.timeoutSeconds }}
successThreshold: {{ .Values.master.livenessProbe.successThreshold }}
failureThreshold: {{ .Values.master.livenessProbe.failureThreshold }}
exec:
command:
- /opt/bitnami/scripts/elasticsearch/healthcheck.sh
{{- else if .Values.master.customLivenessProbe }}
livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.master.customLivenessProbe "context" $) | nindent 12 }}
{{- end }}
{{- if .Values.master.readinessProbe.enabled }}
readinessProbe:
initialDelaySeconds: {{ .Values.master.readinessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.master.readinessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.master.readinessProbe.timeoutSeconds }}
successThreshold: {{ .Values.master.readinessProbe.successThreshold }}
failureThreshold: {{ .Values.master.readinessProbe.failureThreshold }}
exec:
command:
- /opt/bitnami/scripts/elasticsearch/healthcheck.sh
{{- else if .Values.master.customReadinessProbe }}
readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.master.customReadinessProbe "context" $) | nindent 12 }}
{{- end }}
{{- end }}
{{- if .Values.master.resources }}
resources: {{- toYaml .Values.master.resources | nindent 12 }}
{{- end }}
volumeMounts:
{{- if .Values.config }}
- mountPath: /opt/bitnami/elasticsearch/config/elasticsearch.yml
name: config
subPath: elasticsearch.yml
{{- end }}
{{- if .Values.extraConfig }}
- mountPath: /opt/bitnami/elasticsearch/config/my_elasticsearch.yml
name: config
subPath: my_elasticsearch.yml
{{- end }}
- name: data
mountPath: /bitnami/elasticsearch/data
{{- if .Values.security.enabled }}
- name: elasticsearch-certificates
mountPath: /opt/bitnami/elasticsearch/config/certs
readOnly: true
{{- end }}
{{- if .Values.extraVolumeMounts }}
{{- toYaml .Values.extraVolumeMounts | nindent 12 }}
{{- end }}
{{- if .Values.initScripts }}
- name: custom-init-scripts
mountPath: /docker-entrypoint-initdb.d/init-scripts
{{- end }}
{{- if .Values.initScriptsCM }}
- name: custom-init-scripts-cm
mountPath: /docker-entrypoint-initdb.d/init-scripts-cm
{{- end }}
{{- if .Values.initScriptsSecret }}
- name: custom-init-scripts-secret
mountPath: /docker-entrypoint-initdb.d/init-scripts-secret
{{- end }}
{{- if .Values.master.sidecars }}
{{- include "common.tplvalues.render" ( dict "value" .Values.master.sidecars "context" $) | nindent 8 }}
{{- end }}
volumes:
{{- if or .Values.config .Values.extraConfig }}
- name: config
configMap:
name: {{ include "common.names.fullname" . }}
{{- end }}
{{- if .Values.security.enabled }}
- name: elasticsearch-certificates
secret:
secretName: {{ include "elasticsearch.master.tlsSecretName" . }}
defaultMode: 256
{{- end }}
{{- if .Values.extraVolumes }}
{{- toYaml .Values.extraVolumes | nindent 8 }}
{{- end }}
{{- if .Values.initScripts }}
- name: custom-init-scripts
configMap:
name: {{ template "elasticsearch.initScripts" . }}
{{- end }}
{{- if .Values.initScriptsCM }}
- name: custom-init-scripts-cm
configMap:
name: {{ template "elasticsearch.initScriptsCM" . }}
{{- end }}
{{- if .Values.initScriptsSecret }}
- name: custom-init-scripts-secret
secret:
secretName: {{ template "elasticsearch.initScriptsSecret" . }}
defaultMode: 0755
{{- end }}
{{- if not .Values.master.persistence.enabled }}
- name: "data"
emptyDir: {}
{{- else if .Values.master.persistence.existingClaim }}
- name: "data"
persistentVolumeClaim:
claimName: {{ .Values.master.persistence.existingClaim }}
{{- else }}
volumeClaimTemplates:
- metadata:
name: "data"
{{- if .Values.master.persistence.annotations }}
annotations: {{- toYaml .Values.master.persistence.annotations | nindent 10 }}
{{- end }}
spec:
accessModes: {{- toYaml .Values.master.persistence.accessModes | nindent 10 }}
{{ $storage := dict "global" .Values.global "local" .Values.master }}
{{ include "elasticsearch.storageClass" $storage }}
resources:
requests:
storage: {{ .Values.master.persistence.size | quote }}
{{- if .Values.master.persistence.selector }}
selector: {{- include "common.tplvalues.render" (dict "value" .Values.master.persistence.selector "context" $) | nindent 10 }}
{{- else if .Values.master.persistence.existingVolume }}
selector:
matchLabels:
volume: {{ .Values.master.persistence.existingVolume }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,27 @@
apiVersion: v1
kind: Service
metadata:
name: {{ include "elasticsearch.master.fullname" . }}
labels: {{- include "common.labels.standard" . | nindent 4 }}
app.kubernetes.io/component: master
annotations: {{ include "common.tplvalues.render" ( dict "value" .Values.master.service.annotations "context" $) | nindent 4 }}
spec:
type: {{ .Values.master.service.type | quote }}
{{- if and (eq .Values.master.service.type "LoadBalancer") (not (empty .Values.master.service.loadBalancerIP)) }}
loadBalancerIP: {{ .Values.master.service.loadBalancerIP }}
{{- end }}
publishNotReadyAddresses: true
ports:
- name: http
port: 9200
targetPort: http
- name: tcp-transport
port: {{ .Values.master.service.port }}
targetPort: transport
{{- if and (or (eq .Values.master.service.type "NodePort") (eq .Values.master.service.type "LoadBalancer")) (not (empty .Values.master.service.nodePort)) }}
nodePort: {{ .Values.master.service.nodePort }}
{{- else if eq .Values.master.service.type "ClusterIP" }}
nodePort: null
{{- end }}
selector: {{- include "common.labels.matchLabels" . | nindent 4 }}
app.kubernetes.io/component: master

View File

@ -0,0 +1,107 @@
{{- if .Values.metrics.enabled }}
apiVersion: {{ template "common.capabilities.deployment.apiVersion" . }}
kind: Deployment
metadata:
name: {{ include "elasticsearch.metrics.fullname" . }}
labels: {{- include "common.labels.standard" . | nindent 4 }}
app.kubernetes.io/component: metrics
## Istio Labels: https://istio.io/docs/ops/deployment/requirements/
app: metrics
spec:
selector:
matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }}
app.kubernetes.io/component: metrics
replicas: 1
template:
metadata:
labels: {{- include "common.labels.standard" . | nindent 8 }}
app.kubernetes.io/component: metrics
## Istio Labels: https://istio.io/docs/ops/deployment/requirements/
app: metrics
{{- if .Values.metrics.podLabels }}
{{- include "common.tplvalues.render" (dict "value" .Values.metrics.podLabels "context" $) | nindent 8 }}
{{- end }}
{{- with .Values.metrics.podAnnotations }}
annotations: {{ toYaml . | nindent 8 }}
{{- end }}
spec:
{{- include "elasticsearch.imagePullSecrets" . | nindent 6 }}
{{- if .Values.metrics.hostAliases }}
hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.hostAliases "context" $) | nindent 8 }}
{{- end }}
{{- if .Values.metrics.schedulerName }}
schedulerName: {{ .Values.metrics.schedulerName }}
{{- end }}
containers:
- name: metrics
image: {{ include "elasticsearch.metrics.image" . }}
imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }}
{{- if .Values.diagnosticMode.enabled }}
command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }}
args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }}
{{- else }}
args:
{{- if gt (int .Values.coordinating.replicas) 0 }}
# Prefer coordinating only nodes to do the initial metrics query
- --es.uri=http://{{- if .Values.security.enabled }}elastic:{{ .Values.security.elasticPassword}}@{{- end}}{{ template "elasticsearch.coordinating.fullname" . }}:{{ .Values.coordinating.service.port }}
{{- else }}
# Using master nodes as there are no coordinating only nodes
- --es.uri=http://{{- if .Values.security.enabled }}elastic:{{ .Values.security.elasticPassword}}@{{- end}}{{ include "elasticsearch.master.fullname" . }}:{{ .Values.master.service.port }}
{{- end }}
- --es.all
{{- if .Values.metrics.extraArgs }}
{{- toYaml .Values.metrics.extraArgs | nindent 12 }}
{{- end }}
{{- end }}
ports:
- name: metrics
containerPort: 9114
{{- if not .Values.diagnosticMode.enabled }}
{{- if .Values.metrics.livenessProbe.enabled }}
livenessProbe:
initialDelaySeconds: {{ .Values.metrics.livenessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.metrics.livenessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.metrics.livenessProbe.timeoutSeconds }}
successThreshold: {{ .Values.metrics.livenessProbe.successThreshold }}
failureThreshold: {{ .Values.metrics.livenessProbe.failureThreshold }}
httpGet:
path: /metrics
port: metrics
{{- else if .Values.metrics.customLivenessProbe }}
livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customLivenessProbe "context" $) | nindent 12 }}
{{- end }}
{{- if .Values.metrics.readinessProbe.enabled }}
readinessProbe:
initialDelaySeconds: {{ .Values.metrics.readinessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.metrics.readinessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.metrics.readinessProbe.timeoutSeconds }}
successThreshold: {{ .Values.metrics.readinessProbe.successThreshold }}
failureThreshold: {{ .Values.metrics.readinessProbe.failureThreshold }}
httpGet:
path: /metrics
port: metrics
{{- else if .Values.metrics.customReadinessProbe }}
readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customReadinessProbe "context" $) | nindent 12 }}
{{- end }}
{{- end }}
{{- if .Values.metrics.resources }}
resources: {{- toYaml .Values.metrics.resources | nindent 12 }}
{{- end }}
{{- if .Values.metrics.affinity }}
affinity: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.affinity "context" $) | nindent 8 }}
{{- else }}
affinity:
podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.metrics.podAffinityPreset "component" "metrics" "context" $) | nindent 10 }}
podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.metrics.podAntiAffinityPreset "component" "metrics" "context" $) | nindent 10 }}
nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.metrics.nodeAffinityPreset.type "key" .Values.metrics.nodeAffinityPreset.key "values" .Values.metrics.nodeAffinityPreset.values) | nindent 10 }}
{{- end }}
{{- if .Values.metrics.nodeSelector }}
nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.nodeSelector "context" $) | nindent 8 }}
{{- end }}
{{- if .Values.metrics.tolerations }}
tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.tolerations "context" $) | nindent 8 }}
{{- end }}
{{- if .Values.metrics.topologySpreadConstraints }}
topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.topologySpreadConstraints "context" $) | nindent 8 }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,17 @@
{{- if .Values.metrics.enabled }}
apiVersion: v1
kind: Service
metadata:
name: {{ include "elasticsearch.metrics.fullname" . }}
labels: {{- include "common.labels.standard" . | nindent 4 }}
app.kubernetes.io/component: metrics
annotations: {{ include "common.tplvalues.render" ( dict "value" .Values.metrics.service.annotations "context" $) | nindent 4 }}
spec:
type: {{ .Values.metrics.service.type }}
ports:
- name: http-metrics
port: 9114
targetPort: metrics
selector: {{- include "common.labels.matchLabels" . | nindent 4 }}
app.kubernetes.io/component: metrics
{{- end }}

View File

@ -0,0 +1,34 @@
{{- $pspAvailable := (semverCompare "<1.25-0" (include "common.capabilities.kubeVersion" .)) -}}
{{- if and $pspAvailable .Values.curator.enabled .Values.curator.psp.create }}
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: {{ include "elasticsearch.curator.fullname" . }}
labels: {{- include "common.labels.standard" . | nindent 4 }}
role: curator
spec:
privileged: true
#requiredDropCapabilities:
volumes:
- 'configMap'
- 'secret'
hostNetwork: false
hostIPC: false
hostPID: false
runAsUser:
# Require the container to run without root privileges.
rule: 'MustRunAsNonRoot'
seLinux:
rule: 'RunAsAny'
supplementalGroups:
rule: 'MustRunAs'
ranges:
- min: 1
max: 65535
fsGroup:
rule: 'MustRunAs'
ranges:
- min: 1
max: 65535
readOnlyRootFilesystem: false
{{- end }}

View File

@ -0,0 +1,21 @@
{{- if and .Values.curator.enabled .Values.curator.rbac.enabled }}
kind: Role
apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }}
metadata:
name: {{ include "elasticsearch.curator.fullname" . }}
labels: {{- include "common.labels.standard" . | nindent 4 }}
role: curator
component: elasticsearch-curator-configmap
rules:
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["update", "patch"]
{{- $pspAvailable := (semverCompare "<1.25-0" (include "common.capabilities.kubeVersion" .)) -}}
{{- if and $pspAvailable .Values.curator.psp.create }}
- apiGroups: ["extensions"]
resources: ["podsecuritypolicies"]
verbs: ["use"]
resourceNames:
- {{ include "elasticsearch.curator.fullname" . }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,17 @@
{{- if and .Values.curator.enabled .Values.curator.rbac.enabled }}
kind: RoleBinding
apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }}
metadata:
name: {{ include "elasticsearch.curator.fullname" . }}
labels: {{- include "common.labels.standard" . | nindent 4 }}
role: curator
component: elasticsearch-curator-configmap
roleRef:
kind: Role
name: {{ template "elasticsearch.curator.fullname" . }}
apiGroup: rbac.authorization.k8s.io
subjects:
- kind: ServiceAccount
name: {{ include "elasticsearch.curator.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
{{- end }}

View File

@ -0,0 +1,54 @@
{{- if (include "elasticsearch.createSecret" . ) }}
apiVersion: v1
kind: Secret
metadata:
name: {{ template "common.names.fullname" . }}
labels: {{- include "common.labels.standard" . | nindent 4 }}
{{- if .Values.commonLabels }}
{{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
{{- end }}
{{- if .Values.commonAnnotations }}
annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
{{- end }}
type: Opaque
data:
{{- if .Values.security.elasticPassword }}
elasticsearch-password: {{ default "" .Values.security.elasticPassword | b64enc | quote }}
{{- else }}
elasticsearch-password: {{ randAlphaNum 14 | b64enc | quote }}
{{- end }}
{{- if .Values.security.tls.keystorePassword }}
keystore-password: {{ default "" .Values.security.tls.keystorePassword | b64enc | quote }}
{{- end }}
{{- if .Values.security.tls.truststorePassword }}
truststore-password: {{ default "" .Values.security.tls.truststorePassword | b64enc | quote }}
{{- end }}
{{- if .Values.security.tls.keyPassword }}
key-password: {{ default "" .Values.security.tls.keyPassword | b64enc | quote }}
{{- end }}
---
{{- end }}
{{- if (include "elasticsearch.createTlsPasswordsSecret" . ) }}
apiVersion: v1
kind: Secret
metadata:
name: {{ template "common.names.fullname" . }}-tls-pass
labels: {{- include "common.labels.standard" . | nindent 4 }}
{{- if .Values.commonLabels }}
{{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
{{- end }}
{{- if .Values.commonAnnotations }}
annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
{{- end }}
type: Opaque
data:
{{- if .Values.security.tls.keystorePassword }}
truststore-password: {{ default "" .Values.elasticPassword | b64enc | quote }}
{{- end }}
{{- if .Values.security.tls.truststorePassword }}
keystore-password: {{ default "" .Values.elasticPassword | b64enc | quote }}
{{- end }}
{{- if .Values.security.tls.elasticPassword }}
key-password: {{ default "" .Values.elasticPassword | b64enc | quote }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,44 @@
{{- if and .Values.curator.enabled .Values.curator.serviceAccount.create .Values.curator.rbac.enabled }}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "elasticsearch.curator.serviceAccountName" . }}
labels: {{- include "common.labels.standard" . | nindent 4 }}
role: curator
{{- end }}
{{- if .Values.data.serviceAccount.create }}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ template "elasticsearch.data.serviceAccountName" . }}
labels: {{- include "common.labels.standard" . | nindent 4 }}
role: data
{{- end }}
{{- if .Values.master.serviceAccount.create }}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ template "elasticsearch.master.serviceAccountName" . }}
labels: {{- include "common.labels.standard" . | nindent 4 }}
role: master
{{- end }}
{{- if .Values.coordinating.serviceAccount.create }}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ template "elasticsearch.coordinating.serviceAccountName" . }}
labels: {{- include "common.labels.standard" . | nindent 4 }}
role: coordinating-only
{{- end }}
{{- if and .Values.ingest.enabled .Values.ingest.serviceAccount.create }}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ template "elasticsearch.ingest.serviceAccountName" . }}
labels: {{- include "common.labels.standard" . | nindent 4 }}
role: ingest
{{- end }}

View File

@ -0,0 +1,29 @@
{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ include "elasticsearch.metrics.fullname" . }}
{{- if .Values.metrics.serviceMonitor.namespace }}
namespace: {{ .Values.metrics.serviceMonitor.namespace }}
{{- end }}
labels: {{- include "common.labels.standard" . | nindent 4 }}
app.kubernetes.io/component: metrics
spec:
selector:
matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }}
{{- if .Values.metrics.serviceMonitor.selector }}
{{- include "common.tplvalues.render" (dict "value" .Values.metrics.serviceMonitor.selector "context" $) | nindent 6 }}
{{- end }}
app.kubernetes.io/component: metrics
endpoints:
- port: http-metrics
{{- if .Values.metrics.serviceMonitor.interval }}
interval: {{ .Values.metrics.serviceMonitor.interval }}
{{- end }}
{{- if .Values.metrics.serviceMonitor.scrapeTimeout }}
scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }}
{{- end }}
namespaceSelector:
matchNames:
- {{ .Release.Namespace }}
{{- end }}

View File

@ -0,0 +1,99 @@
{{- if (include "elasticsearch.createTlsSecret" .) }}
{{- $ca := genCA "elasticsearch-ca" 365 }}
{{- $releaseNamespace := .Release.Namespace }}
{{- $clusterDomain := .Values.clusterDomain }}
{{- if not .Values.security.tls.master.existingSecret }}
---
{{- $fullname := include "elasticsearch.master.fullname" . }}
{{- $serviceName := include "elasticsearch.master.fullname" . }}
{{- $altNames := list (printf "*.%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) $fullname "127.0.0.1" "localhost" }}
{{- $crt := genSignedCert $fullname nil $altNames 365 $ca }}
apiVersion: v1
kind: Secret
metadata:
name: {{ include "elasticsearch.master.fullname" . }}-crt
labels: {{- include "common.labels.standard" . | nindent 4 }}
{{- if .Values.commonLabels }}
{{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
{{- end }}
app.kubernetes.io/component: master
type: kubernetes.io/tls
data:
ca.crt: {{ $ca.Cert | b64enc | quote }}
tls.crt: {{ $crt.Cert | b64enc | quote }}
tls.key: {{ $crt.Key | b64enc | quote }}
{{- end }}
{{- if not .Values.security.tls.data.existingSecret }}
---
{{- $fullname := include "elasticsearch.data.fullname" . }}
{{- $serviceName := include "elasticsearch.data.fullname" . }}
{{- $altNames := list (printf "*.%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) $fullname "127.0.0.1" "localhost" }}
{{- $crt := genSignedCert $fullname nil $altNames 365 $ca }}
apiVersion: v1
kind: Secret
metadata:
name: {{ include "elasticsearch.data.fullname" . }}-crt
labels: {{- include "common.labels.standard" . | nindent 4 }}
{{- if .Values.commonLabels }}
{{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
{{- end }}
app.kubernetes.io/component: data
{{- if .Values.commonAnnotations }}
annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
{{- end }}
type: kubernetes.io/tls
data:
ca.crt: {{ $ca.Cert | b64enc | quote }}
tls.crt: {{ $crt.Cert | b64enc | quote }}
tls.key: {{ $crt.Key | b64enc | quote }}
{{- end }}
{{- if not .Values.security.tls.coordinating.existingSecret }}
---
{{- $fullname := include "elasticsearch.coordinating.fullname" . }}
{{- $serviceName := include "elasticsearch.coordinating.fullname" . }}
{{- $altNames := list (printf "*.%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) $fullname "127.0.0.1" "localhost" }}
{{- $crt := genSignedCert $fullname nil $altNames 365 $ca }}
apiVersion: v1
kind: Secret
metadata:
name: {{ include "elasticsearch.coordinating.fullname" . }}-crt
labels: {{- include "common.labels.standard" . | nindent 4 }}
{{- if .Values.commonLabels }}
{{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
{{- end }}
app.kubernetes.io/component: coordinating-only
{{- if .Values.commonAnnotations }}
annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
{{- end }}
type: kubernetes.io/tls
data:
ca.crt: {{ $ca.Cert | b64enc | quote }}
tls.crt: {{ $crt.Cert | b64enc | quote }}
tls.key: {{ $crt.Key | b64enc | quote }}
{{- end }}
{{- if and .Values.ingest.enabled (not .Values.security.tls.ingest.existingSecret) }}
---
{{- $fullname := include "elasticsearch.ingest.fullname" . }}
{{- $serviceName := include "elasticsearch.ingest.fullname" . }}
{{- $altNames := list (printf "*.%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) $fullname "127.0.0.1" "localhost" }}
{{- $crt := genSignedCert $fullname nil $altNames 365 $ca }}
apiVersion: v1
kind: Secret
metadata:
name: {{ include "elasticsearch.ingest.fullname" . }}-crt
labels: {{- include "common.labels.standard" . | nindent 4 }}
{{- if .Values.commonLabels }}
{{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
{{- end }}
app.kubernetes.io/component: ingest
{{- if .Values.commonAnnotations }}
annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
{{- end }}
type: kubernetes.io/tls
data:
ca.crt: {{ $ca.Cert | b64enc | quote }}
tls.crt: {{ $crt.Cert | b64enc | quote }}
tls.key: {{ $crt.Key | b64enc | quote }}
{{- end }}
{{- end }}

1783
elasticsearch/values.yaml Normal file

File diff suppressed because it is too large Load Diff

1783
local-values/es/crm.yaml Normal file

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

21
mongodb/.helmignore Normal file
View File

@ -0,0 +1,21 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj

6
mongodb/Chart.lock Normal file
View File

@ -0,0 +1,6 @@
dependencies:
- name: common
repository: https://charts.bitnami.com/bitnami
version: 1.10.1
digest: sha256:46a0218b2fbb421c87da91166dc5230d3ec85aa7d822dff1d479619fff8314e7
generated: "2021-10-28T13:49:42.459839301Z"

29
mongodb/Chart.yaml Normal file
View File

@ -0,0 +1,29 @@
annotations:
category: Database
apiVersion: v2
appVersion: 4.5.0
dependencies:
- name: common
repository: https://charts.bitnami.com/bitnami
tags:
- bitnami-common
version: 1.x.x
description: NoSQL document-oriented database that stores JSON-like documents with dynamic schemas, simplifying the integration of data in content-driven applications.
engine: gotpl
home: https://github.com/bitnami/charts/tree/master/bitnami/mongodb
icon: https://bitnami.com/assets/stacks/mongodb/img/mongodb-stack-220x234.png
keywords:
- mongodb
- database
- nosql
- cluster
- replicaset
- replication
maintainers:
- email: containers@bitnami.com
name: Bitnami
name: mongodb
sources:
- https://github.com/bitnami/bitnami-docker-mongodb
- https://mongodb.org
version: 10.30.8

659
mongodb/README.md Normal file
View File

@ -0,0 +1,659 @@
# MongoDB&reg; packaged by Bitnami
[MongoDB&reg;](https://www.mongodb.com/) is a cross-platform document-oriented database. Classified as a NoSQL database, MongoDB&reg; eschews the traditional table-based relational database structure in favor of JSON-like documents with dynamic schemas, making the integration of data in certain types of applications easier and faster.
Disclaimer: The respective trademarks mentioned in the offering are owned by the respective companies. We do not provide a commercial license for any of these products. This listing has an open-source license. MongoDB&reg; is run and maintained by MongoDB, which is a completely separate project from Bitnami.
## TL;DR
```bash
$ helm repo add bitnami https://charts.bitnami.com/bitnami
$ helm install my-release bitnami/mongodb
```
## Introduction
This chart bootstraps a [MongoDB&reg;](https://github.com/bitnami/bitnami-docker-mongodb) deployment on a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This chart has been tested to work with Fluentd and Prometheus on top of the [BKPR](https://kubeprod.io/).
## Prerequisites
- Kubernetes 1.12+
- Helm 3.1.0
- PV provisioner support in the underlying infrastructure
## Installing the Chart
To install the chart with the release name `my-release`:
```bash
$ helm install my-release bitnami/mongodb
```
The command deploys MongoDB&reg; on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation.
> **Tip**: List all releases using `helm list`
## Uninstalling the Chart
To uninstall/delete the `my-release` deployment:
```bash
$ helm delete my-release
```
The command removes all the Kubernetes components associated with the chart and deletes the release.
## Architecture
This chart allows installing MongoDB&reg; using two different architecture setups: `standalone` or `replicaset`. Use the `architecture` parameter to choose the one to use:
```console
architecture="standalone"
architecture="replicaset"
```
Refer to the [chart documentation for more information on each of these architectures](https://docs.bitnami.com/kubernetes/infrastructure/mongodb/get-started/understand-architecture/).
## Parameters
### Global parameters
| Name | Description | Value |
| -------------------------- | ---------------------------------------------------------------------------------------------------------------------- | ----- |
| `global.imageRegistry` | Global Docker image registry | `""` |
| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` |
| `global.storageClass` | Global StorageClass for Persistent Volume(s) | `""` |
| `global.namespaceOverride` | Override the namespace for resource deployed by the chart, but can itself be overridden by the local namespaceOverride | `""` |
### Common parameters
| Name | Description | Value |
| ------------------------ | --------------------------------------------------------------------------------------------------------- | --------------- |
| `nameOverride` | String to partially override mongodb.fullname template (will maintain the release name) | `""` |
| `fullnameOverride` | String to fully override mongodb.fullname template | `""` |
| `clusterDomain` | Default Kubernetes cluster domain | `cluster.local` |
| `extraDeploy` | Array of extra objects to deploy with the release | `[]` |
| `commonLabels` | Add labels to all the deployed resources (sub-charts are not considered). Evaluated as a template | `{}` |
| `commonAnnotations` | Common annotations to add to all Mongo resources (sub-charts are not considered). Evaluated as a template | `{}` |
| `diagnosticMode.enabled` | Enable diagnostic mode (all probes will be disabled and the command will be overridden) | `false` |
| `diagnosticMode.command` | Command to override all containers in the deployment | `["sleep"]` |
| `diagnosticMode.args` | Args to override all containers in the deployment | `["infinity"]` |
### MongoDB&reg; parameters
| Name | Description | Value |
| ---------------------- | ----------------------------------------------------------------------------------------------------------------------------- | ---------------------- |
| `image.registry` | MongoDB&reg; image registry | `docker.io` |
| `image.repository` | MongoDB&reg; image registry | `bitnami/mongodb` |
| `image.tag` | MongoDB&reg; image tag (immutable tags are recommended) | `4.4.10-debian-10-r44` |
| `image.pullPolicy` | MongoDB&reg; image pull policy | `IfNotPresent` |
| `image.pullSecrets` | Specify docker-registry secret names as an array | `[]` |
| `image.debug` | Set to true if you would like to see extra information on logs | `false` |
| `schedulerName` | Name of the scheduler (other than default) to dispatch pods | `""` |
| `architecture` | MongoDB&reg; architecture (`standalone` or `replicaset`) | `standalone` |
| `useStatefulSet` | Set to true to use a StatefulSet instead of a Deployment (only when `architecture=standalone`) | `false` |
| `auth.enabled` | Enable authentication | `true` |
| `auth.rootUser` | MongoDB&reg; root user | `root` |
| `auth.rootPassword` | MongoDB&reg; root password | `""` |
| `auth.usernames` | List of custom users to be created during the initialization | `[]` |
| `auth.passwords` | List of passwords for the custom users set at `auth.usernames` | `[]` |
| `auth.databases` | List of custom databases to be created during the initialization | `[]` |
| `auth.username` | DEPRECATED: use `auth.usernames` instead | `""` |
| `auth.password` | DEPRECATED: use `auth.passwords` instead | `""` |
| `auth.database` | DEPRECATED: use `auth.databases` instead | `""` |
| `auth.replicaSetKey` | Key used for authentication in the replicaset (only when `architecture=replicaset`) | `""` |
| `auth.existingSecret` | Existing secret with MongoDB&reg; credentials (keys: `mongodb-password`, `mongodb-root-password`, ` mongodb-replica-set-key`) | `""` |
| `tls.enabled` | Enable MongoDB&reg; TLS support between nodes in the cluster as well as between mongo clients and nodes | `false` |
| `tls.autoGenerated` | Generate a custom CA and self-signed certificates | `true` |
| `tls.existingSecret` | Existing secret with TLS certificates (keys: `mongodb-ca-cert`, `mongodb-ca-key`, `client-pem`) | `""` |
| `tls.caCert` | Custom CA certificated (base64 encoded) | `""` |
| `tls.caKey` | CA certificate private key (base64 encoded) | `""` |
| `tls.image.registry` | Init container TLS certs setup image registry | `docker.io` |
| `tls.image.repository` | Init container TLS certs setup image repository | `bitnami/nginx` |
| `tls.image.tag` | Init container TLS certs setup image tag (immutable tags are recommended) | `1.21.4-debian-10-r25` |
| `tls.image.pullPolicy` | Init container TLS certs setup image pull policy | `IfNotPresent` |
| `tls.extraDnsNames` | Add extra dns names to the CA, can solve x509 auth issue for pod clients | `[]` |
| `tls.mode` | Allows to set the tls mode which should be used when tls is enabled (options: `allowTLS`, `preferTLS`, `requireTLS`) | `requireTLS` |
| `hostAliases` | Add deployment host aliases | `[]` |
| `replicaSetName` | Name of the replica set (only when `architecture=replicaset`) | `rs0` |
| `replicaSetHostnames` | Enable DNS hostnames in the replicaset config (only when `architecture=replicaset`) | `true` |
| `enableIPv6` | Switch to enable/disable IPv6 on MongoDB&reg; | `false` |
| `directoryPerDB` | Switch to enable/disable DirectoryPerDB on MongoDB&reg; | `false` |
| `systemLogVerbosity` | MongoDB&reg; system log verbosity level | `0` |
| `disableSystemLog` | Switch to enable/disable MongoDB&reg; system log | `false` |
| `disableJavascript` | Switch to enable/disable MongoDB&reg; server-side JavaScript execution | `false` |
| `enableJournal` | Switch to enable/disable MongoDB&reg; Journaling | `true` |
| `configuration` | MongoDB&reg; configuration file to be used for Primary and Secondary nodes | `""` |
### replicaSetConfigurationSettings settings applied during runtime (not via configuration file)
| Name | Description | Value |
| ----------------------------------------------- | ----------------------------------------------------------------------------------------------- | ------- |
| `replicaSetConfigurationSettings.enabled` | Enable MongoDB&reg; Switch to enable/disable configuring MongoDB&reg; run time rs.conf settings | `false` |
| `replicaSetConfigurationSettings.configuration` | run-time rs.conf settings | `{}` |
| `existingConfigmap` | Name of existing ConfigMap with MongoDB&reg; configuration for Primary and Secondary nodes | `""` |
| `initdbScripts` | Dictionary of initdb scripts | `{}` |
| `initdbScriptsConfigMap` | Existing ConfigMap with custom initdb scripts | `""` |
| `command` | Override default container command (useful when using custom images) | `[]` |
| `args` | Override default container args (useful when using custom images) | `[]` |
| `extraFlags` | MongoDB&reg; additional command line flags | `[]` |
| `extraEnvVars` | Extra environment variables to add to MongoDB&reg; pods | `[]` |
| `extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars | `""` |
| `extraEnvVarsSecret` | Name of existing Secret containing extra env vars (in case of sensitive data) | `""` |
### MongoDB&reg; statefulset parameters
| Name | Description | Value |
| --------------------------------------- | ------------------------------------------------------------------------------------------------------ | --------------- |
| `annotations` | Additional labels to be added to the MongoDB&reg; statefulset. Evaluated as a template | `{}` |
| `labels` | Annotations to be added to the MongoDB&reg; statefulset. Evaluated as a template | `{}` |
| `replicaCount` | Number of MongoDB&reg; nodes (only when `architecture=replicaset`) | `2` |
| `strategyType` | StrategyType for MongoDB&reg; statefulset | `RollingUpdate` |
| `podManagementPolicy` | Pod management policy for MongoDB&reg; | `OrderedReady` |
| `podAffinityPreset` | MongoDB&reg; Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` |
| `podAntiAffinityPreset` | MongoDB&reg; Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `soft` |
| `nodeAffinityPreset.type` | MongoDB&reg; Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` |
| `nodeAffinityPreset.key` | MongoDB&reg; Node label key to match Ignored if `affinity` is set. | `""` |
| `nodeAffinityPreset.values` | MongoDB&reg; Node label values to match. Ignored if `affinity` is set. | `[]` |
| `topologySpreadConstraints` | MongoDB&reg; Spread Constraints for Pods. | `[]` |
| `affinity` | MongoDB&reg; Affinity for pod assignment | `{}` |
| `nodeSelector` | MongoDB&reg; Node labels for pod assignment | `{}` |
| `tolerations` | MongoDB&reg; Tolerations for pod assignment | `[]` |
| `podLabels` | MongoDB&reg; pod labels | `{}` |
| `podAnnotations` | MongoDB&reg; Pod annotations | `{}` |
| `priorityClassName` | Name of the existing priority class to be used by MongoDB&reg; pod(s) | `""` |
| `runtimeClassName` | Name of the runtime class to be used by MongoDB&reg; pod(s) | `""` |
| `podSecurityContext.enabled` | Enable MongoDB&reg; pod(s)' Security Context | `true` |
| `podSecurityContext.fsGroup` | Group ID for the volumes of the MongoDB&reg; pod(s) | `1001` |
| `podSecurityContext.sysctls` | sysctl settings of the MongoDB&reg; pod(s)' | `[]` |
| `containerSecurityContext.enabled` | Enable MongoDB&reg; container(s)' Security Context | `true` |
| `containerSecurityContext.runAsUser` | User ID for the MongoDB&reg; container | `1001` |
| `containerSecurityContext.runAsNonRoot` | Set MongoDB&reg; container's Security Context runAsNonRoot | `true` |
| `resources.limits` | The resources limits for MongoDB&reg; containers | `{}` |
| `resources.requests` | The requested resources for MongoDB&reg; containers | `{}` |
| `livenessProbe.enabled` | Enable livenessProbe | `true` |
| `livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `30` |
| `livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` |
| `livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` |
| `livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` |
| `livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` |
| `readinessProbe.enabled` | Enable readinessProbe | `true` |
| `readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` |
| `readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` |
| `readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` |
| `readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `6` |
| `readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` |
| `startupProbe.enabled` | Enable startupProbe | `false` |
| `startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `5` |
| `startupProbe.periodSeconds` | Period seconds for startupProbe | `10` |
| `startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `5` |
| `startupProbe.failureThreshold` | Failure threshold for startupProbe | `30` |
| `startupProbe.successThreshold` | Success threshold for startupProbe | `1` |
| `customLivenessProbe` | Override default liveness probe for MongoDB&reg; containers | `{}` |
| `customReadinessProbe` | Override default readiness probe for MongoDB&reg; containers | `{}` |
| `customStartupProbe` | Override default startup probe for MongoDB&reg; containers | `{}` |
| `initContainers` | Add additional init containers for the hidden node pod(s) | `[]` |
| `sidecars` | Add additional sidecar containers for the MongoDB&reg; pod(s) | `[]` |
| `extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the MongoDB&reg; container(s) | `[]` |
| `extraVolumes` | Optionally specify extra list of additional volumes to the MongoDB&reg; statefulset | `[]` |
| `pdb.create` | Enable/disable a Pod Disruption Budget creation for MongoDB&reg; pod(s) | `false` |
| `pdb.minAvailable` | Minimum number/percentage of MongoDB&reg; pods that must still be available after the eviction | `1` |
| `pdb.maxUnavailable` | Maximum number/percentage of MongoDB&reg; pods that may be made unavailable after the eviction | `""` |
### Traffic exposure parameters
| Name | Description | Value |
| -------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------- |
| `service.nameOverride` | MongoDB&reg; service name | `""` |
| `service.type` | Kubernetes Service type | `ClusterIP` |
| `service.port` | MongoDB&reg; service port | `27017` |
| `service.portName` | MongoDB&reg; service port name | `mongodb` |
| `service.nodePort` | Port to bind to for NodePort and LoadBalancer service types | `""` |
| `service.clusterIP` | MongoDB&reg; service cluster IP | `""` |
| `service.externalIPs` | Specify the externalIP value ClusterIP service type. | `[]` |
| `service.loadBalancerIP` | loadBalancerIP for MongoDB&reg; Service | `""` |
| `service.loadBalancerSourceRanges` | Address(es) that are allowed when service is LoadBalancer | `[]` |
| `service.annotations` | Provide any additional annotations that may be required | `{}` |
| `externalAccess.enabled` | Enable Kubernetes external cluster access to MongoDB&reg; nodes (only for replicaset architecture) | `false` |
| `externalAccess.autoDiscovery.enabled` | Enable using an init container to auto-detect external IPs by querying the K8s API | `false` |
| `externalAccess.autoDiscovery.image.registry` | Init container auto-discovery image registry | `docker.io` |
| `externalAccess.autoDiscovery.image.repository` | Init container auto-discovery image repository | `bitnami/kubectl` |
| `externalAccess.autoDiscovery.image.tag` | Init container auto-discovery image tag (immutable tags are recommended) | `1.19.16-debian-10-r30` |
| `externalAccess.autoDiscovery.image.pullPolicy` | Init container auto-discovery image pull policy | `IfNotPresent` |
| `externalAccess.autoDiscovery.image.pullSecrets` | Init container auto-discovery image pull secrets | `[]` |
| `externalAccess.autoDiscovery.resources.limits` | Init container auto-discovery resource limits | `{}` |
| `externalAccess.autoDiscovery.resources.requests` | Init container auto-discovery resource requests | `{}` |
| `externalAccess.service.type` | Kubernetes Service type for external access. Allowed values: NodePort, LoadBalancer or ClusterIP | `LoadBalancer` |
| `externalAccess.service.port` | MongoDB&reg; port used for external access when service type is LoadBalancer | `27017` |
| `externalAccess.service.loadBalancerIPs` | Array of load balancer IPs for MongoDB&reg; nodes | `[]` |
| `externalAccess.service.loadBalancerSourceRanges` | Address(es) that are allowed when service is LoadBalancer | `[]` |
| `externalAccess.service.nodePorts` | Array of node ports used to configure MongoDB&reg; advertised hostname when service type is NodePort | `[]` |
| `externalAccess.service.domain` | Domain or external IP used to configure MongoDB&reg; advertised hostname when service type is NodePort | `""` |
| `externalAccess.service.annotations` | Service annotations for external access | `{}` |
| `externalAccess.hidden.enabled` | Enable Kubernetes external cluster access to MongoDB&reg; hidden nodes | `false` |
| `externalAccess.hidden.service.type` | Kubernetes Service type for external access. Allowed values: NodePort or LoadBalancer | `LoadBalancer` |
| `externalAccess.hidden.service.port` | MongoDB&reg; port used for external access when service type is LoadBalancer | `27017` |
| `externalAccess.hidden.service.loadBalancerIPs` | Array of load balancer IPs for MongoDB&reg; nodes | `[]` |
| `externalAccess.hidden.service.loadBalancerSourceRanges` | Address(es) that are allowed when service is LoadBalancer | `[]` |
| `externalAccess.hidden.service.nodePorts` | Array of node ports used to configure MongoDB&reg; advertised hostname when service type is NodePort. Length must be the same as replicaCount | `[]` |
| `externalAccess.hidden.service.domain` | Domain or external IP used to configure MongoDB&reg; advertised hostname when service type is NodePort | `""` |
| `externalAccess.hidden.service.annotations` | Service annotations for external access | `{}` |
### Persistence parameters
| Name | Description | Value |
| --------------------------------------------- | ---------------------------------------------------------------------------------- | ------------------- |
| `persistence.enabled` | Enable MongoDB&reg; data persistence using PVC | `true` |
| `persistence.medium` | Provide a medium for `emptyDir` volumes. | `""` |
| `persistence.existingClaim` | Provide an existing `PersistentVolumeClaim` (only when `architecture=standalone`) | `""` |
| `persistence.storageClass` | PVC Storage Class for MongoDB&reg; data volume | `""` |
| `persistence.accessModes` | PV Access Mode | `["ReadWriteOnce"]` |
| `persistence.size` | PVC Storage Request for MongoDB&reg; data volume | `8Gi` |
| `persistence.annotations` | PVC annotations | `{}` |
| `persistence.mountPath` | Path to mount the volume at | `/bitnami/mongodb` |
| `persistence.subPath` | Subdirectory of the volume to mount at | `""` |
| `persistence.volumeClaimTemplates.selector` | A label query over volumes to consider for binding (e.g. when using local volumes) | `{}` |
| `persistence.volumeClaimTemplates.requests` | Custom PVC requests attributes | `{}` |
| `persistence.volumeClaimTemplates.dataSource` | Add dataSource to the VolumeClaimTemplate | `{}` |
### RBAC parameters
| Name | Description | Value |
| -------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------- | ------- |
| `serviceAccount.create` | Enable creation of ServiceAccount for MongoDB&reg; pods | `true` |
| `serviceAccount.name` | Name of the created serviceAccount | `""` |
| `serviceAccount.annotations` | Additional Service Account annotations | `{}` |
| `rbac.create` | Whether to create & use RBAC resources or not | `false` |
| `rbac.role.rules` | Custom rules to create following the role specification | `[]` |
| `podSecurityPolicy.create` | Whether to create a PodSecurityPolicy. WARNING: PodSecurityPolicy is deprecated in Kubernetes v1.21 or later, unavailable in v1.25 or later | `false` |
| `podSecurityPolicy.allowPrivilegeEscalation` | Enable privilege escalation | `false` |
| `podSecurityPolicy.privileged` | Allow privileged | `false` |
| `podSecurityPolicy.spec` | Specify the full spec to use for Pod Security Policy | `{}` |
### Volume Permissions parameters
| Name | Description | Value |
| --------------------------------------------- | -------------------------------------------------------------------------------------------------------------------- | ----------------------- |
| `volumePermissions.enabled` | Enable init container that changes the owner and group of the persistent volume(s) mountpoint to `runAsUser:fsGroup` | `false` |
| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` |
| `volumePermissions.image.repository` | Init container volume-permissions image repository | `bitnami/bitnami-shell` |
| `volumePermissions.image.tag` | Init container volume-permissions image tag (immutable tags are recommended) | `10-debian-10-r265` |
| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `IfNotPresent` |
| `volumePermissions.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` |
| `volumePermissions.resources.limits` | Init container volume-permissions resource limits | `{}` |
| `volumePermissions.resources.requests` | Init container volume-permissions resource requests | `{}` |
| `volumePermissions.securityContext.runAsUser` | User ID for the volumePermissions container | `0` |
### Arbiter parameters
| Name | Description | Value |
| -------------------------------------------- | ------------------------------------------------------------------------------------------------- | ------- |
| `arbiter.enabled` | Enable deploying the arbiter | `true` |
| `arbiter.configuration` | Arbiter configuration file to be used | `""` |
| `arbiter.hostAliases` | Add deployment host aliases | `[]` |
| `arbiter.existingConfigmap` | Name of existing ConfigMap with Arbiter configuration | `""` |
| `arbiter.command` | Override default container command (useful when using custom images) | `[]` |
| `arbiter.args` | Override default container args (useful when using custom images) | `[]` |
| `arbiter.extraFlags` | Arbiter additional command line flags | `[]` |
| `arbiter.extraEnvVars` | Extra environment variables to add to Arbiter pods | `[]` |
| `arbiter.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars | `""` |
| `arbiter.extraEnvVarsSecret` | Name of existing Secret containing extra env vars (in case of sensitive data) | `""` |
| `arbiter.annotations` | Additional labels to be added to the Arbiter statefulset | `{}` |
| `arbiter.labels` | Annotations to be added to the Arbiter statefulset | `{}` |
| `arbiter.podAffinityPreset` | Arbiter Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` |
| `arbiter.podAntiAffinityPreset` | Arbiter Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `soft` |
| `arbiter.nodeAffinityPreset.type` | Arbiter Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` |
| `arbiter.nodeAffinityPreset.key` | Arbiter Node label key to match Ignored if `affinity` is set. | `""` |
| `arbiter.nodeAffinityPreset.values` | Arbiter Node label values to match. Ignored if `affinity` is set. | `[]` |
| `arbiter.affinity` | Arbiter Affinity for pod assignment | `{}` |
| `arbiter.nodeSelector` | Arbiter Node labels for pod assignment | `{}` |
| `arbiter.tolerations` | Arbiter Tolerations for pod assignment | `[]` |
| `arbiter.podLabels` | Arbiter pod labels | `{}` |
| `arbiter.podAnnotations` | Arbiter Pod annotations | `{}` |
| `arbiter.priorityClassName` | Name of the existing priority class to be used by Arbiter pod(s) | `""` |
| `arbiter.runtimeClassName` | Name of the runtime class to be used by Arbiter pod(s) | `""` |
| `arbiter.podSecurityContext.enabled` | Enable Arbiter pod(s)' Security Context | `true` |
| `arbiter.podSecurityContext.fsGroup` | Group ID for the volumes of the Arbiter pod(s) | `1001` |
| `arbiter.podSecurityContext.sysctls` | sysctl settings of the Arbiter pod(s)' | `[]` |
| `arbiter.containerSecurityContext.enabled` | Enable Arbiter container(s)' Security Context | `true` |
| `arbiter.containerSecurityContext.runAsUser` | User ID for the Arbiter container | `1001` |
| `arbiter.resources.limits` | The resources limits for Arbiter containers | `{}` |
| `arbiter.resources.requests` | The requested resources for Arbiter containers | `{}` |
| `arbiter.livenessProbe.enabled` | Enable livenessProbe | `true` |
| `arbiter.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `30` |
| `arbiter.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` |
| `arbiter.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` |
| `arbiter.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` |
| `arbiter.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` |
| `arbiter.readinessProbe.enabled` | Enable readinessProbe | `true` |
| `arbiter.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` |
| `arbiter.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` |
| `arbiter.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` |
| `arbiter.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `6` |
| `arbiter.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` |
| `arbiter.customLivenessProbe` | Override default liveness probe for Arbiter containers | `{}` |
| `arbiter.customReadinessProbe` | Override default readiness probe for Arbiter containers | `{}` |
| `arbiter.initContainers` | Add additional init containers for the Arbiter pod(s) | `[]` |
| `arbiter.sidecars` | Add additional sidecar containers for the Arbiter pod(s) | `[]` |
| `arbiter.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Arbiter container(s) | `[]` |
| `arbiter.extraVolumes` | Optionally specify extra list of additional volumes to the Arbiter statefulset | `[]` |
| `arbiter.pdb.create` | Enable/disable a Pod Disruption Budget creation for Arbiter pod(s) | `false` |
| `arbiter.pdb.minAvailable` | Minimum number/percentage of Arbiter pods that should remain scheduled | `1` |
| `arbiter.pdb.maxUnavailable` | Maximum number/percentage of Arbiter pods that may be made unavailable | `""` |
| `arbiter.service.nameOverride` | The arbiter service name | `""` |
### Hidden Node parameters
| Name | Description | Value |
| ---------------------------------------------------- | ---------------------------------------------------------------------------------------------------- | ------------------- |
| `hidden.enabled` | Enable deploying the hidden nodes | `false` |
| `hidden.configuration` | Hidden node configuration file to be used | `""` |
| `hidden.existingConfigmap` | Name of existing ConfigMap with Hidden node configuration | `""` |
| `hidden.command` | Override default container command (useful when using custom images) | `[]` |
| `hidden.args` | Override default container args (useful when using custom images) | `[]` |
| `hidden.extraFlags` | Hidden node additional command line flags | `[]` |
| `hidden.extraEnvVars` | Extra environment variables to add to Hidden node pods | `[]` |
| `hidden.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars | `""` |
| `hidden.extraEnvVarsSecret` | Name of existing Secret containing extra env vars (in case of sensitive data) | `""` |
| `hidden.annotations` | Additional labels to be added to thehidden node statefulset | `{}` |
| `hidden.labels` | Annotations to be added to the hidden node statefulset | `{}` |
| `hidden.replicaCount` | Number of hidden nodes (only when `architecture=replicaset`) | `1` |
| `hidden.strategyType` | StrategyType for hidden node statefulset | `RollingUpdate` |
| `hidden.podManagementPolicy` | Pod management policy for hidden node | `OrderedReady` |
| `hidden.podAffinityPreset` | Hidden node Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` |
| `hidden.podAntiAffinityPreset` | Hidden node Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `soft` |
| `hidden.nodeAffinityPreset.type` | Hidden Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` |
| `hidden.nodeAffinityPreset.key` | Hidden Node label key to match Ignored if `affinity` is set. | `""` |
| `hidden.nodeAffinityPreset.values` | Hidden Node label values to match. Ignored if `affinity` is set. | `[]` |
| `hidden.affinity` | Hidden node Affinity for pod assignment | `{}` |
| `hidden.nodeSelector` | Hidden node Node labels for pod assignment | `{}` |
| `hidden.tolerations` | Hidden node Tolerations for pod assignment | `[]` |
| `hidden.podLabels` | Hidden node pod labels | `{}` |
| `hidden.podAnnotations` | Hidden node Pod annotations | `{}` |
| `hidden.priorityClassName` | Name of the existing priority class to be used by hidden node pod(s) | `""` |
| `hidden.runtimeClassName` | Name of the runtime class to be used by hidden node pod(s) | `""` |
| `hidden.resources.limits` | The resources limits for hidden node containers | `{}` |
| `hidden.resources.requests` | The requested resources for hidden node containers | `{}` |
| `hidden.livenessProbe.enabled` | Enable livenessProbe | `true` |
| `hidden.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `30` |
| `hidden.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` |
| `hidden.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` |
| `hidden.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` |
| `hidden.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` |
| `hidden.readinessProbe.enabled` | Enable readinessProbe | `true` |
| `hidden.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` |
| `hidden.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` |
| `hidden.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` |
| `hidden.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `6` |
| `hidden.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` |
| `hidden.customLivenessProbe` | Override default liveness probe for hidden node containers | `{}` |
| `hidden.customReadinessProbe` | Override default readiness probe for hidden node containers | `{}` |
| `hidden.initContainers` | Add init containers to the MongoDB&reg; Hidden pods. | `[]` |
| `hidden.sidecars` | Add additional sidecar containers for the hidden node pod(s) | `[]` |
| `hidden.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the hidden node container(s) | `[]` |
| `hidden.extraVolumes` | Optionally specify extra list of additional volumes to the hidden node statefulset | `[]` |
| `hidden.pdb.create` | Enable/disable a Pod Disruption Budget creation for hidden node pod(s) | `false` |
| `hidden.pdb.minAvailable` | Minimum number/percentage of hidden node pods that should remain scheduled | `1` |
| `hidden.pdb.maxUnavailable` | Maximum number/percentage of hidden node pods that may be made unavailable | `""` |
| `hidden.persistence.enabled` | Enable hidden node data persistence using PVC | `true` |
| `hidden.persistence.medium` | Provide a medium for `emptyDir` volumes. | `""` |
| `hidden.persistence.storageClass` | PVC Storage Class for hidden node data volume | `""` |
| `hidden.persistence.accessModes` | PV Access Mode | `["ReadWriteOnce"]` |
| `hidden.persistence.size` | PVC Storage Request for hidden node data volume | `8Gi` |
| `hidden.persistence.annotations` | PVC annotations | `{}` |
| `hidden.persistence.mountPath` | The path the volume will be mounted at, useful when using different MongoDB&reg; images. | `/bitnami/mongodb` |
| `hidden.persistence.subPath` | The subdirectory of the volume to mount to, useful in dev environments | `""` |
| `hidden.persistence.volumeClaimTemplates.selector` | A label query over volumes to consider for binding (e.g. when using local volumes) | `{}` |
| `hidden.persistence.volumeClaimTemplates.dataSource` | Set volumeClaimTemplate dataSource | `{}` |
### Metrics parameters
| Name | Description | Value |
| -------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | -------------------------- |
| `metrics.enabled` | Enable using a sidecar Prometheus exporter | `false` |
| `metrics.image.registry` | MongoDB&reg; Prometheus exporter image registry | `docker.io` |
| `metrics.image.repository` | MongoDB&reg; Prometheus exporter image repository | `bitnami/mongodb-exporter` |
| `metrics.image.tag` | MongoDB&reg; Prometheus exporter image tag (immutable tags are recommended) | `0.11.2-debian-10-r354` |
| `metrics.image.pullPolicy` | MongoDB&reg; Prometheus exporter image pull policy | `IfNotPresent` |
| `metrics.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` |
| `metrics.username` | String with username for the metrics exporter | `""` |
| `metrics.password` | String with password for the metrics exporter | `""` |
| `metrics.extraFlags` | String with extra flags to the metrics exporter | `""` |
| `metrics.extraUri` | Additional URI options of the metrics service | `""` |
| `metrics.resources.limits` | The resources limits for Prometheus exporter containers | `{}` |
| `metrics.resources.requests` | The requested resources for Prometheus exporter containers | `{}` |
| `metrics.containerPort` | Port of the Prometheus metrics container | `9216` |
| `metrics.service.annotations` | Annotations for Prometheus Exporter pods. Evaluated as a template. | `{}` |
| `metrics.service.type` | Type of the Prometheus metrics service | `ClusterIP` |
| `metrics.service.port` | Port of the Prometheus metrics service | `9216` |
| `metrics.livenessProbe.enabled` | Enable livenessProbe | `true` |
| `metrics.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `15` |
| `metrics.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `5` |
| `metrics.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` |
| `metrics.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `3` |
| `metrics.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` |
| `metrics.readinessProbe.enabled` | Enable readinessProbe | `true` |
| `metrics.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` |
| `metrics.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `5` |
| `metrics.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `1` |
| `metrics.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `3` |
| `metrics.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` |
| `metrics.serviceMonitor.enabled` | Create ServiceMonitor Resource for scraping metrics using Prometheus Operator | `false` |
| `metrics.serviceMonitor.namespace` | Namespace which Prometheus is running in | `""` |
| `metrics.serviceMonitor.interval` | Interval at which metrics should be scraped | `30s` |
| `metrics.serviceMonitor.scrapeTimeout` | Specify the timeout after which the scrape is ended | `""` |
| `metrics.serviceMonitor.relabellings` | RelabelConfigs to apply to samples before scraping. | `[]` |
| `metrics.serviceMonitor.metricRelabelings` | MetricsRelabelConfigs to apply to samples before ingestion. | `[]` |
| `metrics.serviceMonitor.additionalLabels` | Used to pass Labels that are used by the Prometheus installed in your cluster to select Service Monitors to work with | `{}` |
| `metrics.prometheusRule.enabled` | Set this to true to create prometheusRules for Prometheus operator | `false` |
| `metrics.prometheusRule.additionalLabels` | Additional labels that can be used so prometheusRules will be discovered by Prometheus | `{}` |
| `metrics.prometheusRule.namespace` | Namespace where prometheusRules resource should be created | `""` |
| `metrics.prometheusRule.rules` | Rules to be created, check values for an example | `{}` |
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
```bash
$ helm install my-release \
--set auth.rootPassword=secretpassword,auth.username=my-user,auth.password=my-password,auth.database=my-database \
bitnami/mongodb
```
The above command sets the MongoDB&reg; `root` account password to `secretpassword`. Additionally, it creates a standard database user named `my-user`, with the password `my-password`, who has access to a database named `my-database`.
> NOTE: Once this chart is deployed, it is not possible to change the application's access credentials, such as usernames or passwords, using Helm. To change these application credentials after deployment, delete any persistent volumes (PVs) used by the chart and re-deploy it, or use the application's built-in administrative tools if available.
Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example,
```bash
$ helm install my-release -f values.yaml bitnami/mongodb
```
> **Tip**: You can use the default [values.yaml](values.yaml)
## Configuration and installation details
### [Rolling vs Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/)
It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image.
Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist.
### Customize a new MongoDB instance
The [Bitnami MongoDB&reg; image](https://github.com/bitnami/bitnami-docker-mongodb) supports the use of custom scripts to initialize a fresh instance. In order to execute the scripts, two options are available:
* Specify them using the `initdbScripts` parameter as dict.
* Define an external Kubernetes ConfigMap with all the initialization scripts by setting the `initdbScriptsConfigMap` parameter. Note that this will override the previous option.
The allowed script extensions are `.sh` and `.js`.
### Replicaset: Access MongoDB&reg; nodes from outside the cluster
In order to access MongoDB&reg; nodes from outside the cluster when using a replicaset architecture, a specific service per MongoDB&reg; pod will be created. There are two ways of configuring external access:
- Using LoadBalancer services
- Using NodePort services.
Refer to the [chart documentation for more details and configuration examples](https://docs.bitnami.com/kubernetes/infrastructure/mongodb/configuration/configure-external-access-replicaset/).
### Add extra environment variables
To add extra environment variables (useful for advanced operations like custom init scripts), use the `extraEnvVars` property.
```yaml
extraEnvVars:
- name: LOG_LEVEL
value: error
```
Alternatively, you can use a ConfigMap or a Secret with the environment variables. To do so, use the `extraEnvVarsCM` or the `extraEnvVarsSecret` properties.
### Use Sidecars and Init Containers
If additional containers are needed in the same pod (such as additional metrics or logging exporters), they can be defined using the `sidecars` config parameter. Similarly, extra init containers can be added using the `initContainers` parameter.
Refer to the chart documentation for more information on, and examples of, configuring and using [sidecars and init containers](https://docs.bitnami.com/kubernetes/infrastructure/mongodb/configuration/configure-sidecar-init-containers/).
## Persistence
The [Bitnami MongoDB&reg;](https://github.com/bitnami/bitnami-docker-mongodb) image stores the MongoDB&reg; data and configurations at the `/bitnami/mongodb` path of the container.
The chart mounts a [Persistent Volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) at this location. The volume is created using dynamic volume provisioning.
If you encounter errors when working with persistent volumes, refer to our [troubleshooting guide for persistent volumes](https://docs.bitnami.com/kubernetes/faq/troubleshooting/troubleshooting-persistence-volumes/).
## Use custom Prometheus rules
Custom Prometheus rules can be defined for the Prometheus Operator by using the `prometheusRule` parameter.
Refer to the [chart documentation for an example of a custom rule](https://docs.bitnami.com/kubernetes/infrastructure/mongodb/administration/use-prometheus-rules/).
## Enable SSL/TLS
This chart supports enabling SSL/TLS between nodes in the cluster, as well as between MongoDB&reg; clients and nodes, by setting the `MONGODB_EXTRA_FLAGS` and `MONGODB_CLIENT_EXTRA_FLAGS` container environment variables, together with the correct `MONGODB_ADVERTISED_HOSTNAME`. To enable full TLS encryption, set the `tls.enabled` parameter to `true`.
Refer to the [chart documentation for more information on enabling TLS](https://docs.bitnami.com/kubernetes/infrastructure/mongodb/administration/enable-tls/).
### Set Pod affinity
This chart allows you to set your custom affinity using the `XXX.affinity` parameter(s). Find more information about Pod affinity in the [Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity).
As an alternative, you can use the preset configurations for pod affinity, pod anti-affinity, and node affinity available at the [bitnami/common](https://github.com/bitnami/charts/tree/master/bitnami/common#affinities) chart. To do so, set the `XXX.podAffinityPreset`, `XXX.podAntiAffinityPreset`, or `XXX.nodeAffinityPreset` parameters.
## Troubleshooting
Find more information about how to deal with common errors related to Bitnamis Helm charts in [this troubleshooting guide](https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues).
## Upgrading
If authentication is enabled, it's necessary to set the `auth.rootPassword` (also `auth.replicaSetKey` when using a replicaset architecture) when upgrading for readiness/liveness probes to work properly. When you install this chart for the first time, some notes will be displayed providing the credentials you must use under the 'Credentials' section. Please note down the password, and run the command below to upgrade your chart:
```bash
$ helm upgrade my-release bitnami/mongodb --set auth.rootPassword=[PASSWORD] (--set auth.replicaSetKey=[REPLICASETKEY])
```
> Note: you need to substitute the placeholders [PASSWORD] and [REPLICASETKEY] with the values obtained in the installation notes.
### To 10.0.0
[On November 13, 2020, Helm v2 support formally ended](https://github.com/helm/charts#status-of-the-project). This major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL.
[Learn more about this change and related upgrade considerations](https://docs.bitnami.com/kubernetes/infrastructure/mongodb/administration/upgrade-helm3/).
### To 9.0.0
MongoDB&reg; container images were updated to `4.4.x` and it can affect compatibility with older versions of MongoDB&reg;. Refer to the following guides to upgrade your applications:
- [Standalone](https://docs.mongodb.com/manual/release-notes/4.4-upgrade-standalone/)
- [Replica Set](https://docs.mongodb.com/manual/release-notes/4.4-upgrade-replica-set/)
### To 8.0.0
- Architecture used to configure MongoDB&reg; as a replicaset was completely refactored. Now, both primary and secondary nodes are part of the same statefulset.
- Chart labels were adapted to follow the Helm charts best practices.
- This version introduces `bitnami/common`, a [library chart](https://helm.sh/docs/topics/library_charts/#helm) as a dependency. More documentation about this new utility could be found [here](https://github.com/bitnami/charts/tree/master/bitnami/common#bitnami-common-library-chart). Please, make sure that you have updated the chart dependencies before executing any upgrade.
- Several parameters were renamed or disappeared in favor of new ones on this major version. These are the most important ones:
- `replicas` is renamed to `replicaCount`.
- Authentication parameters are reorganized under the `auth.*` parameter:
- `usePassword` is renamed to `auth.enabled`.
- `mongodbRootPassword`, `mongodbUsername`, `mongodbPassword`, `mongodbDatabase`, and `replicaSet.key` are now `auth.rootPassword`, `auth.username`, `auth.password`, `auth.database`, and `auth.replicaSetKey` respectively.
- `securityContext.*` is deprecated in favor of `podSecurityContext` and `containerSecurityContext`.
- Parameters prefixed with `mongodb` are renamed removing the prefix. E.g. `mongodbEnableIPv6` is renamed to `enableIPv6`.
- Parameters affecting Arbiter nodes are reorganized under the `arbiter.*` parameter.
Consequences:
- Backwards compatibility is not guaranteed. To upgrade to `8.0.0`, install a new release of the MongoDB&reg; chart, and migrate your data by creating a backup of the database, and restoring it on the new release.
### To 7.0.0
From this version, the way of setting the ingress rules has changed. Instead of using `ingress.paths` and `ingress.hosts` as separate objects, you should now define the rules as objects inside the `ingress.hosts` value, for example:
```yaml
ingress:
hosts:
- name: mongodb.local
path: /
```
### To 6.0.0
From this version, `mongodbEnableIPv6` is set to `false` by default in order to work properly in most k8s clusters, if you want to use IPv6 support, you need to set this variable to `true` by adding `--set mongodbEnableIPv6=true` to your `helm` command.
You can find more information in the [`bitnami/mongodb` image README](https://github.com/bitnami/bitnami-docker-mongodb/blob/master/README.md).
### To 5.0.0
When enabling replicaset configuration, backwards compatibility is not guaranteed unless you modify the labels used on the chart's statefulsets.
Use the workaround below to upgrade from versions previous to 5.0.0. The following example assumes that the release name is `my-release`:
```console
$ kubectl delete statefulset my-release-mongodb-arbiter my-release-mongodb-primary my-release-mongodb-secondary --cascade=false
```
### Add extra deployment options
To add extra deployments (useful for advanced features like sidecars), use the `extraDeploy` property.
In the example below, you can find how to use a example here for a [MongoDB replica set pod labeler sidecar](https://github.com/combor/k8s-mongo-labeler-sidecar) to identify the primary pod and dynamically label it as the primary node:
```yaml
extraDeploy:
- apiVersion: v1
kind: Service
metadata:
name: mongodb-primary
namespace: default
labels:
app.kubernetes.io/component: mongodb
app.kubernetes.io/instance: mongodb
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: mongodb
spec:
type: NodePort
externalTrafficPolicy: Cluster
ports:
- name: mongodb-primary
port: 30001
nodePort: 30001
protocol: TCP
targetPort: mongodb
selector:
app.kubernetes.io/component: mongodb
app.kubernetes.io/instance: mongodb
app.kubernetes.io/name: mongodb
primary: "true"
```

View File

@ -0,0 +1,8 @@
architecture: replicaset
replicaCount: 3
pdb:
create: true
rbac:
create: true
serviceAccount:
create: true

204
mongodb/templates/NOTES.txt Normal file
View File

@ -0,0 +1,204 @@
CHART NAME: {{ .Chart.Name }}
CHART VERSION: {{ .Chart.Version }}
APP VERSION: {{ .Chart.AppVersion }}
{{- if .Values.diagnosticMode.enabled }}
The chart has been deployed in diagnostic mode. All probes have been disabled and the command has been overwritten with:
command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 4 }}
args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 4 }}
Get the list of pods by executing:
kubectl get pods --namespace {{ .Release.Namespace }} -l app.kubernetes.io/instance={{ .Release.Name }}
Access the pod you want to debug by executing
kubectl exec --namespace {{ .Release.Namespace }} -ti <NAME OF THE POD> -- bash
In order to replicate the container startup scripts execute this command:
/opt/bitnami/scripts/mongodb/entrypoint.sh /opt/bitnami/scripts/mongodb/run.sh
{{- else }}
{{- $replicaCount := int .Values.replicaCount }}
{{- $portNumber := int .Values.service.port }}
{{- $fullname := include "mongodb.fullname" . }}
{{- $releaseNamespace := include "mongodb.namespace" . }}
{{- $clusterDomain := .Values.clusterDomain }}
{{- $loadBalancerIPListLength := len .Values.externalAccess.service.loadBalancerIPs }}
{{- $mongoList := list }}
{{- range $e, $i := until $replicaCount }}
{{- $mongoList = append $mongoList (printf "%s-%d.%s-headless.%s.svc.%s:%d" $fullname $i $fullname $releaseNamespace $clusterDomain $portNumber) }}
{{- end }}
{{- if and (eq .Values.architecture "replicaset") .Values.externalAccess.enabled (not .Values.externalAccess.autoDiscovery.enabled) (not (eq $replicaCount $loadBalancerIPListLength )) (eq .Values.externalAccess.service.type "LoadBalancer") }}
####################################################################################
### ERROR: You enabled external access to MongoDB&reg; nodes without specifying ###
### the array of load balancer IPs for MongoDB&reg; nodes. ###
####################################################################################
This deployment will be incomplete until you configure the array of load balancer
IPs for MongoDB&reg; nodes. To complete your deployment follow the steps below:
1. Wait for the load balancer IPs (it may take a few minutes for them to be available):
kubectl get svc --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ template "mongodb.name" . }},app.kubernetes.io/instance={{ .Release.Name }},app.kubernetes.io/component=mongodb" -w
2. Obtain the load balancer IPs and upgrade your chart:
{{- range $e, $i := until $replicaCount }}
LOAD_BALANCER_IP_{{ add $i 1 }}="$(kubectl get svc --namespace {{ $releaseNamespace }} {{ $fullname }}-{{ $i }}-external -o jsonpath='{.status.loadBalancer.ingress[0].ip}')"
{{- end }}
3. Upgrade you chart:
helm upgrade --namespace {{ .Release.Namespace }} {{ .Release.Name }} bitnami/{{ .Chart.Name }} \
--set mongodb.replicaCount={{ $replicaCount }} \
--set mongodb.externalAccess.enabled=true \
{{- range $i, $e := until $replicaCount }}
--set mongodb.externalAccess.service.loadBalancerIPs[{{ $i }}]=$LOAD_BALANCER_IP_{{ add $i 1 }} \
{{- end }}
--set mongodb.externalAccess.service.type=LoadBalancer
{{- else }}
{{- if and (or (and (eq .Values.architecture "standalone") (or (eq .Values.service.type "LoadBalancer") (eq .Values.service.type "NodePort"))) (and (eq .Values.architecture "replicaset") .Values.externalAccess.enabled)) (not .Values.auth.enabled) }}
-------------------------------------------------------------------------------
WARNING
By not enabling "mongodb.auth.enabled" you have most likely exposed the
MongoDB&reg; service externally without any authentication mechanism.
For security reasons, we strongly suggest that you enable authentiation
setting the "mongodb.auth.enabled" parameter to "true".
-------------------------------------------------------------------------------
{{- end }}
** Please be patient while the chart is being deployed **
MongoDB&reg; can be accessed on the following DNS name(s) and ports from within your cluster:
{{- if eq .Values.architecture "replicaset" }}
{{ join "\n" $mongoList | nindent 4 }}
{{- else }}
{{ $fullname }}.{{ $releaseNamespace }}.svc.{{ .Values.clusterDomain }}
{{- end }}
{{- if .Values.auth.enabled }}
To get the root password run:
export MONGODB_ROOT_PASSWORD=$(kubectl get secret --namespace {{ template "mongodb.namespace" . }} {{ template "mongodb.secretName" . }} -o jsonpath="{.data.mongodb-root-password}" | base64 --decode)
{{- end }}
{{- $customUsers := include "mongodb.customUsers" . -}}
{{- $customDatabases := include "mongodb.customDatabases" . -}}
{{- if and (not (empty $customUsers)) (not (empty $customDatabases)) }}
{{- $customUsersList := splitList "," $customUsers }}
{{- range $index, $user := $customUsersList }}
To get the password for "{{ $user }}" run:
export MONGODB_PASSWORD=$(kubectl get secret --namespace {{ include "mongodb.namespace" $ }} {{ include "mongodb.secretName" $ }} -o jsonpath="{.data.mongodb-passwords}" | base64 --decode | awk -F',' '{print ${{ add 1 $index }}}')
{{- end }}
{{- end }}
To connect to your database, create a MongoDB&reg; client container:
kubectl run --namespace {{ template "mongodb.namespace" . }} {{ template "mongodb.fullname" . }}-client --rm --tty -i --restart='Never' --env="MONGODB_ROOT_PASSWORD=$MONGODB_ROOT_PASSWORD" --image {{ template "mongodb.image" . }} --command -- bash
Then, run the following command:
{{- if eq .Values.architecture "replicaset" }}
mongo admin --host "{{ join "," $mongoList }}" {{- if .Values.auth.enabled }} --authenticationDatabase admin -u root -p $MONGODB_ROOT_PASSWORD{{- end }}
{{- else }}
mongo admin --host "{{ template "mongodb.fullname" . }}" {{- if .Values.auth.enabled }} --authenticationDatabase admin -u root -p $MONGODB_ROOT_PASSWORD{{- end }}
{{- end }}
{{- if and (eq .Values.architecture "replicaset") .Values.externalAccess.enabled }}
To connect to your database nodes from outside, you need to add both primary and secondary nodes hostnames/IPs to your Mongo client. To obtain them, follow the instructions below:
{{- if eq "NodePort" .Values.externalAccess.service.type }}
{{- if .Values.externalAccess.service.domain }}
MongoDB&reg; nodes domain: Use your provided hostname to reach MongoDB&reg; nodes, {{ .Values.externalAccess.service.domain }}
{{- else }}
MongoDB&reg; nodes domain: you can reach MongoDB&reg; nodes on any of the K8s nodes external IPs.
kubectl get nodes -o wide
{{- end }}
MongoDB&reg; nodes port: You will have a different node port for each MongoDB&reg; node. You can get the list of configured node ports using the command below:
echo "$(kubectl get svc --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ template "mongodb.name" . }},app.kubernetes.io/instance={{ .Release.Name }},app.kubernetes.io/component=mongodb,pod" -o jsonpath='{.items[*].spec.ports[0].nodePort}' | tr ' ' '\n')"
{{- else if contains "LoadBalancer" .Values.externalAccess.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IPs to be available.
Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ template "mongodb.name" . }},app.kubernetes.io/instance={{ .Release.Name }},app.kubernetes.io/component=mongodb,pod" -w'
MongoDB&reg; nodes domain: You will have a different external IP for each MongoDB&reg; node. You can get the list of external IPs using the command below:
echo "$(kubectl get svc --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ template "mongodb.name" . }},app.kubernetes.io/instance={{ .Release.Name }},app.kubernetes.io/component=mongodb,pod" -o jsonpath='{.items[*].status.loadBalancer.ingress[0].ip}' | tr ' ' '\n')"
MongoDB&reg; nodes port: {{ .Values.externalAccess.service.port }}
{{- end }}
{{- else if eq .Values.architecture "standalone" }}
To connect to your database from outside the cluster execute the following commands:
{{- if contains "NodePort" .Values.service.type }}
export NODE_IP=$(kubectl get nodes --namespace {{ template "mongodb.namespace" . }} -o jsonpath="{.items[0].status.addresses[0].address}")
export NODE_PORT=$(kubectl get --namespace {{ template "mongodb.namespace" . }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "mongodb.fullname" . }})
mongo --host $NODE_IP --port $NODE_PORT {{- if .Values.auth.enabled }} --authenticationDatabase admin -p $MONGODB_ROOT_PASSWORD{{- end }}
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
Watch the status with: 'kubectl get svc --namespace {{ template "mongodb.namespace" . }} -w {{ template "mongodb.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ template "mongodb.namespace" . }} {{ template "mongodb.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
mongo --host $SERVICE_IP --port {{ $portNumber }} {{- if .Values.auth.enabled }} --authenticationDatabase admin -p $MONGODB_ROOT_PASSWORD{{- end }}
{{- else if contains "ClusterIP" .Values.service.type }}
kubectl port-forward --namespace {{ template "mongodb.namespace" . }} svc/{{ template "mongodb.fullname" . }} {{ $portNumber }}:{{ $portNumber }} &
mongo --host 127.0.0.1 {{- if .Values.auth.enabled }} --authenticationDatabase admin -p $MONGODB_ROOT_PASSWORD{{- end }}
{{- end }}
{{- end }}
{{- end }}
{{- if .Values.metrics.enabled }}
To access the MongoDB&reg; Prometheus metrics, get the MongoDB&reg; Prometheus URL by running:
kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ include "mongodb.fullname" . }}-metrics {{ .Values.metrics.service.port }}:{{ .Values.metrics.service.port }} &
echo "Prometheus Metrics URL: http://127.0.0.1:{{ .Values.metrics.service.port }}/metrics"
Then, open the obtained URL in a browser.
{{- end }}
{{- end }}
{{- include "common.warnings.rollingTag" .Values.image }}
{{- include "common.warnings.rollingTag" .Values.metrics.image }}
{{- include "common.warnings.rollingTag" .Values.externalAccess.autoDiscovery.image }}
{{- include "common.warnings.rollingTag" .Values.volumePermissions.image }}
{{- include "mongodb.validateValues" . }}
{{- $secretName := include "mongodb.fullname" . -}}
{{- $passwordValidationErrors := include "common.validations.values.mongodb.passwords" (dict "secret" $secretName "context" $) -}}
{{- include "common.errors.upgrade.passwords.empty" (dict "validationErrors" (list $passwordValidationErrors) "context" $) -}}

View File

@ -0,0 +1,444 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "mongodb.name" -}}
{{- include "common.names.name" . -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "mongodb.fullname" -}}
{{- include "common.names.fullname" . -}}
{{- end -}}
{{/*
Create a default mongo service name which can be overridden.
*/}}
{{- define "mongodb.service.nameOverride" -}}
{{- if .Values.service -}}
{{- if .Values.service.nameOverride }}
{{- .Values.service.nameOverride -}}
{{- else -}}
{{ include "mongodb.fullname" . }}-headless
{{- end -}}
{{- else -}}
{{ include "mongodb.fullname" . }}-headless
{{- end }}
{{- end }}
{{/*
Create a default mongo arbiter service name which can be overridden.
*/}}
{{- define "mongodb.arbiter.service.nameOverride" -}}
{{- if .Values.arbiter.service -}}
{{- if .Values.arbiter.service.nameOverride }}
{{- .Values.arbiter.service.nameOverride -}}
{{- else -}}
{{ include "mongodb.fullname" . }}-arbiter-headless
{{- end -}}
{{- else -}}
{{ include "mongodb.fullname" . }}-arbiter-headless
{{- end }}
{{- end }}
{{/*
Return the proper MongoDB&reg; image name
*/}}
{{- define "mongodb.image" -}}
{{ include "common.images.image" (dict "imageRoot" .Values.image "global" .Values.global) }}
{{- end -}}
{{/*
Return the proper image name (for the metrics image)
*/}}
{{- define "mongodb.metrics.image" -}}
{{ include "common.images.image" (dict "imageRoot" .Values.metrics.image "global" .Values.global) }}
{{- end -}}
{{/*
Return the proper image name (for the init container volume-permissions image)
*/}}
{{- define "mongodb.volumePermissions.image" -}}
{{ include "common.images.image" (dict "imageRoot" .Values.volumePermissions.image "global" .Values.global) }}
{{- end -}}
{{/*
Return the proper image name (for the init container auto-discovery image)
*/}}
{{- define "mongodb.externalAccess.autoDiscovery.image" -}}
{{ include "common.images.image" (dict "imageRoot" .Values.externalAccess.autoDiscovery.image "global" .Values.global) }}
{{- end -}}
{{/*
Return the proper image name (for the TLS Certs image)
*/}}
{{- define "mongodb.tls.image" -}}
{{ include "common.images.image" (dict "imageRoot" .Values.tls.image "global" .Values.global) }}
{{- end -}}
{{/*
Return the proper Docker Image Registry Secret Names
*/}}
{{- define "mongodb.imagePullSecrets" -}}
{{ include "common.images.pullSecrets" (dict "images" (list .Values.image .Values.metrics.image .Values.volumePermissions.image) "global" .Values.global) }}
{{- end -}}
{{/*
Allow the release namespace to be overridden for multi-namespace deployments in combined charts.
*/}}
{{- define "mongodb.namespace" -}}
{{- if .Values.global -}}
{{- if .Values.global.namespaceOverride }}
{{- .Values.global.namespaceOverride -}}
{{- else -}}
{{- .Release.Namespace -}}
{{- end -}}
{{- else -}}
{{- .Release.Namespace -}}
{{- end }}
{{- end -}}
{{- define "mongodb.serviceMonitor.namespace" -}}
{{- if .Values.metrics.serviceMonitor.namespace -}}
{{- .Values.metrics.serviceMonitor.namespace -}}
{{- else -}}
{{- include "mongodb.namespace" . -}}
{{- end }}
{{- end -}}
{{- define "mongodb.prometheusRule.namespace" -}}
{{- if .Values.metrics.prometheusRule.namespace -}}
{{- .Values.metrics.prometheusRule.namespace -}}
{{- else -}}
{{- include "mongodb.namespace" . -}}
{{- end }}
{{- end -}}
{{/*
Returns the proper service account name depending if an explicit service account name is set
in the values file. If the name is not set it will default to either mongodb.fullname if serviceAccount.create
is true or default otherwise.
*/}}
{{- define "mongodb.serviceAccountName" -}}
{{- if .Values.serviceAccount.create -}}
{{ default (include "mongodb.fullname" .) .Values.serviceAccount.name }}
{{- else -}}
{{ default "default" .Values.serviceAccount.name }}
{{- end -}}
{{- end -}}
{{/*
Return the list of custom users to create during the initialization (string format)
*/}}
{{- define "mongodb.customUsers" -}}
{{- $customUsers := list -}}
{{- if .Values.auth.username -}}
{{- $customUsers = append $customUsers .Values.auth.username }}
{{- end }}
{{- range .Values.auth.usernames }}
{{- $customUsers = append $customUsers . }}
{{- end }}
{{- printf "%s" (default "" (join "," $customUsers)) -}}
{{- end -}}
{{/*
Return the list of passwords for the custom users (string format)
*/}}
{{- define "mongodb.customPasswords" -}}
{{- $customPasswords := list -}}
{{- if .Values.auth.password -}}
{{- $customPasswords = append $customPasswords .Values.auth.password }}
{{- end }}
{{- range .Values.auth.passwords }}
{{- $customPasswords = append $customPasswords . }}
{{- end }}
{{- printf "%s" (default "" (join "," $customPasswords)) -}}
{{- end -}}
{{/*
Return the list of custom databases to create during the initialization (string format)
*/}}
{{- define "mongodb.customDatabases" -}}
{{- $customDatabases := list -}}
{{- if .Values.auth.database -}}
{{- $customDatabases = append $customDatabases .Values.auth.database }}
{{- end }}
{{- range .Values.auth.databases }}
{{- $customDatabases = append $customDatabases . }}
{{- end }}
{{- printf "%s" (default "" (join "," $customDatabases)) -}}
{{- end -}}
{{/*
Return the configmap with the MongoDB&reg; configuration
*/}}
{{- define "mongodb.configmapName" -}}
{{- if .Values.existingConfigmap -}}
{{- printf "%s" (tpl .Values.existingConfigmap $) -}}
{{- else -}}
{{- printf "%s" (include "mongodb.fullname" .) -}}
{{- end -}}
{{- end -}}
{{/*
Return true if a configmap object should be created for MongoDB&reg;
*/}}
{{- define "mongodb.createConfigmap" -}}
{{- if and .Values.configuration (not .Values.existingConfigmap) }}
{{- true -}}
{{- else -}}
{{- end -}}
{{- end -}}
{{/*
Return the secret with MongoDB&reg; credentials
*/}}
{{- define "mongodb.secretName" -}}
{{- if .Values.auth.existingSecret -}}
{{- printf "%s" (tpl .Values.auth.existingSecret $) -}}
{{- else -}}
{{- printf "%s" (include "mongodb.fullname" .) -}}
{{- end -}}
{{- end -}}
{{/*
Return true if a secret object should be created for MongoDB&reg;
*/}}
{{- define "mongodb.createSecret" -}}
{{- if and .Values.auth.enabled (not .Values.auth.existingSecret) }}
{{- true -}}
{{- else -}}
{{- end -}}
{{- end -}}
{{/*
Get the initialization scripts ConfigMap name.
*/}}
{{- define "mongodb.initdbScriptsCM" -}}
{{- if .Values.initdbScriptsConfigMap -}}
{{- printf "%s" .Values.initdbScriptsConfigMap -}}
{{- else -}}
{{- printf "%s-init-scripts" (include "mongodb.fullname" .) -}}
{{- end -}}
{{- end -}}
{{/*
Return true if the Arbiter should be deployed
*/}}
{{- define "mongodb.arbiter.enabled" -}}
{{- if and (eq .Values.architecture "replicaset") .Values.arbiter.enabled }}
{{- true -}}
{{- else -}}
{{- end -}}
{{- end -}}
{{/*
Return the configmap with the MongoDB&reg; configuration for the Arbiter
*/}}
{{- define "mongodb.arbiter.configmapName" -}}
{{- if .Values.arbiter.existingConfigmap -}}
{{- printf "%s" (tpl .Values.arbiter.existingConfigmap $) -}}
{{- else -}}
{{- printf "%s-arbiter" (include "mongodb.fullname" .) -}}
{{- end -}}
{{- end -}}
{{/*
Return true if a configmap object should be created for MongoDB&reg; Arbiter
*/}}
{{- define "mongodb.arbiter.createConfigmap" -}}
{{- if and (eq .Values.architecture "replicaset") .Values.arbiter.enabled .Values.arbiter.configuration (not .Values.arbiter.existingConfigmap) }}
{{- true -}}
{{- else -}}
{{- end -}}
{{- end -}}
{{/*
Return true if the Hidden should be deployed
*/}}
{{- define "mongodb.hidden.enabled" -}}
{{- if and (eq .Values.architecture "replicaset") .Values.hidden.enabled }}
{{- true -}}
{{- end -}}
{{- end -}}
{{/*
Return the configmap with the MongoDB&reg; configuration for the Hidden
*/}}
{{- define "mongodb.hidden.configmapName" -}}
{{- if .Values.hidden.existingConfigmap -}}
{{- printf "%s" (tpl .Values.hidden.existingConfigmap $) -}}
{{- else -}}
{{- printf "%s-hidden" (include "mongodb.fullname" .) -}}
{{- end -}}
{{- end -}}
{{/*
Return true if a configmap object should be created for MongoDB&reg; Hidden
*/}}
{{- define "mongodb.hidden.createConfigmap" -}}
{{- if and (include "mongodb.hidden.enabled" .) .Values.hidden.enabled .Values.hidden.configuration (not .Values.hidden.existingConfigmap) }}
{{- true -}}
{{- end -}}
{{- end -}}
{{/*
Compile all warnings into a single message, and call fail.
*/}}
{{- define "mongodb.validateValues" -}}
{{- $messages := list -}}
{{- $messages := append $messages (include "mongodb.validateValues.pspAndRBAC" .) -}}
{{- $messages := append $messages (include "mongodb.validateValues.architecture" .) -}}
{{- $messages := append $messages (include "mongodb.validateValues.customUsersDBs" .) -}}
{{- $messages := append $messages (include "mongodb.validateValues.customUsersDBsLength" .) -}}
{{- $messages := append $messages (include "mongodb.validateValues.externalAccessServiceType" .) -}}
{{- $messages := append $messages (include "mongodb.validateValues.loadBalancerIPsListLength" .) -}}
{{- $messages := append $messages (include "mongodb.validateValues.nodePortListLength" .) -}}
{{- $messages := append $messages (include "mongodb.validateValues.externalAccessAutoDiscoveryRBAC" .) -}}
{{- $messages := without $messages "" -}}
{{- $message := join "\n" $messages -}}
{{- if $message -}}
{{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}}
{{- end -}}
{{- end -}}
{{/* Validate RBAC is created when using PSP */}}
{{- define "mongodb.validateValues.pspAndRBAC" -}}
{{- if and (.Values.podSecurityPolicy.create) (not .Values.rbac.create) -}}
mongodb: podSecurityPolicy.create, rbac.create
Both podSecurityPolicy.create and rbac.create must be true, if you want
to create podSecurityPolicy
{{- end -}}
{{- end -}}
{{/* Validate values of MongoDB&reg; - must provide a valid architecture */}}
{{- define "mongodb.validateValues.architecture" -}}
{{- if and (ne .Values.architecture "standalone") (ne .Values.architecture "replicaset") -}}
mongodb: architecture
Invalid architecture selected. Valid values are "standalone" and
"replicaset". Please set a valid architecture (--set mongodb.architecture="xxxx")
{{- end -}}
{{- end -}}
{{/*
Validate values of MongoDB&reg; - both auth.usernames and auth.databases are necessary
to create a custom user and database during 1st initialization
*/}}
{{- define "mongodb.validateValues.customUsersDBs" -}}
{{- $customUsers := include "mongodb.customUsers" . -}}
{{- $customDatabases := include "mongodb.customDatabases" . -}}
{{- if or (and (empty $customUsers) (not (empty $customDatabases))) (and (not (empty $customUsers)) (empty $customDatabases)) }}
mongodb: auth.usernames, auth.databases
Both auth.usernames and auth.databases must be provided to create
custom users and databases during 1st initialization.
Please set both of them (--set auth.usernames[0]="xxxx",auth.databases[0]="yyyy")
{{- end -}}
{{- end -}}
{{/*
Validate values of MongoDB&reg; - both auth.usernames and auth.databases arrays should have the same length
to create a custom user and database during 1st initialization
*/}}
{{- define "mongodb.validateValues.customUsersDBsLength" -}}
{{- if ne (len .Values.auth.usernames) (len .Values.auth.databases) }}
mongodb: auth.usernames, auth.databases
Both auth.usernames and auth.databases arrays should have the same length
{{- end -}}
{{- end -}}
{{/*
Validate values of MongoDB&reg; - service type for external access
*/}}
{{- define "mongodb.validateValues.externalAccessServiceType" -}}
{{- if and (eq .Values.architecture "replicaset") (not (eq .Values.externalAccess.service.type "NodePort")) (not (eq .Values.externalAccess.service.type "LoadBalancer")) (not (eq .Values.externalAccess.service.type "ClusterIP")) -}}
mongodb: externalAccess.service.type
Available service type for external access are NodePort, LoadBalancer or ClusterIP.
{{- end -}}
{{- end -}}
{{/*
Validate values of MongoDB&reg; - number of replicas must be the same than LoadBalancer IPs list
*/}}
{{- define "mongodb.validateValues.loadBalancerIPsListLength" -}}
{{- $replicaCount := int .Values.replicaCount }}
{{- $loadBalancerListLength := len .Values.externalAccess.service.loadBalancerIPs }}
{{- if and (eq .Values.architecture "replicaset") .Values.externalAccess.enabled (not .Values.externalAccess.autoDiscovery.enabled ) (eq .Values.externalAccess.service.type "LoadBalancer") (not (eq $replicaCount $loadBalancerListLength )) -}}
mongodb: .Values.externalAccess.service.loadBalancerIPs
Number of replicas and loadBalancerIPs array length must be the same.
{{- end -}}
{{- end -}}
{{/*
Validate values of MongoDB&reg; - number of replicas must be the same than NodePort list
*/}}
{{- define "mongodb.validateValues.nodePortListLength" -}}
{{- $replicaCount := int .Values.replicaCount }}
{{- $nodePortListLength := len .Values.externalAccess.service.nodePorts }}
{{- if and (eq .Values.architecture "replicaset") .Values.externalAccess.enabled (eq .Values.externalAccess.service.type "NodePort") (not (eq $replicaCount $nodePortListLength )) -}}
mongodb: .Values.externalAccess.service.nodePorts
Number of replicas and nodePorts array length must be the same.
{{- end -}}
{{- end -}}
{{/*
Validate values of MongoDB&reg; - RBAC should be enabled when autoDiscovery is enabled
*/}}
{{- define "mongodb.validateValues.externalAccessAutoDiscoveryRBAC" -}}
{{- if and (eq .Values.architecture "replicaset") .Values.externalAccess.enabled .Values.externalAccess.autoDiscovery.enabled (not .Values.rbac.create )}}
mongodb: rbac.create
By specifying "externalAccess.enabled=true" and "externalAccess.autoDiscovery.enabled=true"
an initContainer will be used to autodetect the external IPs/ports by querying the
K8s API. Please note this initContainer requires specific RBAC resources. You can create them
by specifying "--set rbac.create=true".
{{- end -}}
{{- end -}}
{{/*
Validate values of MongoDB&reg; exporter URI string - auth.enabled and/or tls.enabled must be enabled or it defaults
*/}}
{{- define "mongodb.mongodb_exporter.uri" -}}
{{- $uriTlsArgs := ternary "tls=true&tlsCertificateKeyFile=/certs/mongodb.pem&tlsCAFile=/certs/mongodb-ca-cert" "" .Values.tls.enabled -}}
{{- if .Values.metrics.username }}
{{- $uriAuth := ternary "$(echo $MONGODB_METRICS_USERNAME | sed -r \"s/@/%40/g;s/:/%3A/g\"):$(echo $MONGODB_METRICS_PASSWORD | sed -r \"s/@/%40/g;s/:/%3A/g\")@" "" .Values.auth.enabled -}}
{{- printf "mongodb://%slocalhost:27017/admin?%s" $uriAuth $uriTlsArgs -}}
{{- else -}}
{{- $uriAuth := ternary "$MONGODB_ROOT_USER:$(echo $MONGODB_ROOT_PASSWORD | sed -r \"s/@/%40/g;s/:/%3A/g\")@" "" .Values.auth.enabled -}}
{{- printf "mongodb://%slocalhost:27017/admin?%s" $uriAuth $uriTlsArgs -}}
{{- end -}}
{{- end -}}
{{/*
Return the appropriate apiGroup for PodSecurityPolicy.
*/}}
{{- define "podSecurityPolicy.apiGroup" -}}
{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
{{- print "policy" -}}
{{- else -}}
{{- print "extensions" -}}
{{- end -}}
{{- end -}}
{{/*
Return true if a TLS secret object should be created
*/}}
{{- define "mongodb.createTlsSecret" -}}
{{- if and .Values.tls.enabled (not .Values.tls.existingSecret) }}
{{- true -}}
{{- end -}}
{{- end -}}
{{/*
Return the secret containing MongoDB&reg; TLS certificates
*/}}
{{- define "mongodb.tlsSecretName" -}}
{{- $secretName := .Values.tls.existingSecret -}}
{{- if $secretName -}}
{{- printf "%s" (tpl $secretName $) -}}
{{- else -}}
{{- printf "%s-ca" (include "mongodb.fullname" .) -}}
{{- end -}}
{{- end -}}

View File

@ -0,0 +1,18 @@
{{- if (include "mongodb.arbiter.createConfigmap" .) }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "mongodb.fullname" . }}-arbiter
namespace: {{ include "mongodb.namespace" . }}
labels: {{- include "common.labels.standard" . | nindent 4 }}
app.kubernetes.io/component: arbiter
{{- if .Values.commonLabels }}
{{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
{{- end }}
{{- if .Values.commonAnnotations }}
annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
{{- end }}
data:
mongodb.conf: |-
{{- include "common.tplvalues.render" (dict "value" .Values.arbiter.configuration "context" $) | nindent 4 }}
{{- end }}

View File

@ -0,0 +1,30 @@
{{- if (include "mongodb.arbiter.enabled" .) }}
apiVersion: v1
kind: Service
metadata:
name: {{ include "mongodb.arbiter.service.nameOverride" . }}
namespace: {{ include "mongodb.namespace" . }}
labels: {{- include "common.labels.standard" . | nindent 4 }}
app.kubernetes.io/component: arbiter
{{- if .Values.commonLabels }}
{{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
{{- end }}
{{- if or .Values.service.annotations .Values.commonAnnotations }}
annotations:
{{- if .Values.service.annotations }}
{{- include "common.tplvalues.render" ( dict "value" .Values.service.annotations "context" $) | nindent 4 }}
{{- end }}
{{- if .Values.commonAnnotations }}
{{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
{{- end }}
{{- end }}
spec:
type: ClusterIP
clusterIP: None
ports:
- name: tcp-mongodb
port: {{ .Values.service.port }}
targetPort: mongodb
selector: {{- include "common.labels.matchLabels" . | nindent 4 }}
app.kubernetes.io/component: arbiter
{{- end }}

View File

@ -0,0 +1,25 @@
{{- if and (include "mongodb.arbiter.enabled" .) .Values.arbiter.pdb.create }}
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
name: {{ include "mongodb.fullname" . }}-arbiter
namespace: {{ include "mongodb.namespace" . }}
labels: {{- include "common.labels.standard" . | nindent 4 }}
app.kubernetes.io/component: arbiter
{{- if .Values.commonLabels }}
{{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
{{- end }}
{{- if .Values.commonAnnotations }}
annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
{{- end }}
spec:
{{- if .Values.arbiter.pdb.minAvailable }}
minAvailable: {{ .Values.arbiter.pdb.minAvailable }}
{{- end }}
{{- if .Values.arbiter.pdb.maxUnavailable }}
maxUnavailable: {{ .Values.arbiter.pdb.maxUnavailable }}
{{- end }}
selector:
matchLabels: {{ include "common.labels.matchLabels" . | nindent 6 }}
app.kubernetes.io/component: arbiter
{{- end }}

View File

@ -0,0 +1,291 @@
{{- if (include "mongodb.arbiter.enabled" .) }}
apiVersion: {{ include "common.capabilities.statefulset.apiVersion" . }}
kind: StatefulSet
metadata:
name: {{ include "mongodb.fullname" . }}-arbiter
namespace: {{ include "mongodb.namespace" . }}
labels: {{- include "common.labels.standard" . | nindent 4 }}
app.kubernetes.io/component: arbiter
{{- if .Values.arbiter.labels }}
{{- include "common.tplvalues.render" (dict "value" .Values.arbiter.labels "context" $) | nindent 4 }}
{{- end }}
{{- if .Values.commonLabels }}
{{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
{{- end }}
{{- if or .Values.arbiter.annotations .Values.commonAnnotations }}
annotations:
{{- if .Values.arbiter.annotations }}
{{- include "common.tplvalues.render" ( dict "value" .Values.arbiter.annotations "context" $) | nindent 4 }}
{{- end }}
{{- if .Values.commonAnnotations }}
{{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
{{- end }}
{{- end }}
spec:
serviceName: {{ include "mongodb.arbiter.service.nameOverride" . }}
selector:
matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }}
app.kubernetes.io/component: arbiter
template:
metadata:
labels: {{- include "common.labels.standard" . | nindent 8 }}
app.kubernetes.io/component: arbiter
{{- if .Values.arbiter.podLabels }}
{{- include "common.tplvalues.render" (dict "value" .Values.arbiter.podLabels "context" $) | nindent 8 }}
{{- end }}
{{- if or (include "mongodb.arbiter.createConfigmap" .) .Values.arbiter.podAnnotations }}
annotations:
{{- if (include "mongodb.arbiter.createConfigmap" .) }}
checksum/configuration: {{ include (print $.Template.BasePath "/arbiter/configmap.yaml") . | sha256sum }}
{{- end }}
{{- if .Values.arbiter.podAnnotations }}
{{- include "common.tplvalues.render" (dict "value" .Values.arbiter.podAnnotations "context" $) | nindent 8 }}
{{- end }}
{{- end }}
spec:
{{- include "mongodb.imagePullSecrets" . | nindent 6 }}
{{- if .Values.schedulerName }}
schedulerName: "{{ .Values.schedulerName }}"
{{- end }}
serviceAccountName: {{ template "mongodb.serviceAccountName" . }}
{{- if .Values.arbiter.affinity }}
affinity: {{- include "common.tplvalues.render" (dict "value" .Values.arbiter.affinity "context" $) | nindent 8 }}
{{- else }}
affinity:
podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.arbiter.podAffinityPreset "component" "arbiter" "context" $) | nindent 10 }}
podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.arbiter.podAntiAffinityPreset "component" "arbiter" "context" $) | nindent 10 }}
nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.arbiter.nodeAffinityPreset.type "key" .Values.arbiter.nodeAffinityPreset.key "values" .Values.arbiter.nodeAffinityPreset.values) | nindent 10 }}
{{- end }}
{{- if .Values.arbiter.nodeSelector }}
nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.arbiter.nodeSelector "context" $) | nindent 8 }}
{{- end }}
{{- if .Values.arbiter.hostAliases }}
hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.arbiter.hostAliases "context" $) | nindent 8 }}
{{- end }}
{{- if .Values.arbiter.tolerations }}
tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.arbiter.tolerations "context" $) | nindent 8 }}
{{- end }}
{{- if .Values.arbiter.priorityClassName }}
priorityClassName: {{ .Values.arbiter.priorityClassName }}
{{- end }}
{{- if .Values.arbiter.runtimeClassName }}
runtimeClassName: {{ .Values.arbiter.runtimeClassName }}
{{- end }}
{{- if .Values.arbiter.podSecurityContext.enabled }}
securityContext: {{- omit .Values.arbiter.podSecurityContext "enabled" | toYaml | nindent 8 }}
{{- end }}
initContainers:
{{- if .Values.arbiter.initContainers }}
{{- include "common.tplvalues.render" (dict "value" .Values.arbiter.initContainers "context" $) | nindent 8 }}
{{- end }}
{{- if and .Values.tls.enabled .Values.arbiter.enabled }}
- name: generate-client
image: {{ include "mongodb.tls.image" . }}
imagePullPolicy: {{ .Values.tls.image.pullPolicy | quote }}
env:
- name: MY_POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: certs-volume
mountPath: /certs/CAs
- name: certs
mountPath: /certs
command:
- sh
- "-c"
- |
/bin/bash <<'EOF'
my_hostname=$(hostname)
svc=$(echo -n "$my_hostname" | sed s/-[0-9]*$//)-headless
cp /certs/CAs/* /certs/
cat >/certs/openssl.cnf <<EOL
[req]
req_extensions = v3_req
distinguished_name = req_distinguished_name
[req_distinguished_name]
[ v3_req ]
basicConstraints = CA:FALSE
keyUsage = nonRepudiation, digitalSignature, keyEncipherment
subjectAltName = @alt_names
[alt_names]
DNS.1 = $svc
DNS.2 = $my_hostname
DNS.3 = $my_hostname.$svc.$MY_POD_NAMESPACE.svc.cluster.local
DNS.4 = localhost
DNS.5 = 127.0.0.1
EOL
export RANDFILE=/certs/.rnd && openssl genrsa -out /certs/mongo.key 2048
#Create the client/server cert
openssl req -new -key /certs/mongo.key -out /certs/mongo.csr -subj "/C=US/O=My Organisations/OU=IT/CN=$my_hostname" -config /certs/openssl.cnf
#Signing the server cert with the CA cert and key
openssl x509 -req -in /certs/mongo.csr -CA /certs/mongodb-ca-cert -CAkey /certs/mongodb-ca-key -CAcreateserial -out /certs/mongo.crt -days 3650 -extensions v3_req -extfile /certs/openssl.cnf
rm /certs/mongo.csr
#Concatenate to a pem file for use as the client PEM file which can be used for both member and client authentication.
cat /certs/mongo.crt /certs/mongo.key > /certs/mongodb.pem
cd /certs/
shopt -s extglob
rm -rf !(mongodb-ca-cert|mongodb.pem|CAs|openssl.cnf)
chmod 0600 mongodb-ca-cert mongodb.pem
EOF
{{- end }}
containers:
- name: mongodb-arbiter
image: {{ include "mongodb.image" . }}
imagePullPolicy: {{ .Values.image.pullPolicy | quote }}
{{- if .Values.arbiter.containerSecurityContext.enabled }}
securityContext: {{- omit .Values.arbiter.containerSecurityContext "enabled" | toYaml | nindent 12 }}
{{- end }}
{{- if .Values.diagnosticMode.enabled }}
command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }}
{{- else if .Values.arbiter.command }}
command: {{- include "common.tplvalues.render" (dict "value" .Values.arbiter.command "context" $) | nindent 12 }}
{{- end }}
{{- if .Values.diagnosticMode.enabled }}
args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }}
{{- else if .Values.arbiter.args }}
args: {{- include "common.tplvalues.render" (dict "value" .Values.arbiter.args "context" $) | nindent 12 }}
{{- end }}
env:
- name: BITNAMI_DEBUG
value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }}
- name: MY_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: MY_POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: K8S_SERVICE_NAME
value: "{{ include "mongodb.arbiter.service.nameOverride" . }}"
- name: MONGODB_REPLICA_SET_MODE
value: "arbiter"
- name: MONGODB_INITIAL_PRIMARY_HOST
value: "{{ include "mongodb.fullname" . }}-0.{{ include "mongodb.service.nameOverride" . }}.$(MY_POD_NAMESPACE).svc.{{ .Values.clusterDomain }}"
- name: MONGODB_REPLICA_SET_NAME
value: {{ .Values.replicaSetName | quote }}
- name: MONGODB_ADVERTISED_HOSTNAME
value: "$(MY_POD_NAME).$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.{{ .Values.clusterDomain }}"
{{- if .Values.auth.enabled }}
- name: MONGODB_INITIAL_PRIMARY_ROOT_USER
value: {{ .Values.auth.rootUser | quote }}
- name: MONGODB_INITIAL_PRIMARY_ROOT_PASSWORD
valueFrom:
secretKeyRef:
name: {{ include "mongodb.secretName" . }}
key: mongodb-root-password
- name: MONGODB_REPLICA_SET_KEY
valueFrom:
secretKeyRef:
name: {{ include "mongodb.secretName" . }}
key: mongodb-replica-set-key
{{- end }}
- name: ALLOW_EMPTY_PASSWORD
value: {{ ternary "no" "yes" .Values.auth.enabled | quote }}
{{- if and .Values.tls.enabled .Values.arbiter.enabled }}
- name: MONGODB_EXTRA_FLAGS
value: --tlsMode={{ .Values.tls.mode }} --tlsCertificateKeyFile=/certs/mongodb.pem --tlsCAFile=/certs/mongodb-ca-cert
{{- end }}
{{- if .Values.arbiter.extraFlags }}
- name: MONGODB_EXTRA_FLAGS
value: {{ .Values.arbiter.extraFlags | join " " | quote }}
{{- end }}
{{- if and .Values.tls.enabled .Values.arbiter.enabled }}
- name: MONGODB_CLIENT_EXTRA_FLAGS
value: --tls --tlsCertificateKeyFile=/certs/mongodb.pem --tlsCAFile=/certs/mongodb-ca-cert
{{- end }}
{{- if .Values.arbiter.extraEnvVars }}
{{- include "common.tplvalues.render" (dict "value" .Values.arbiter.extraEnvVars "context" $) | nindent 12 }}
{{- end }}
{{- if or .Values.arbiter.extraEnvVarsCM .Values.arbiter.extraEnvVarsSecret }}
envFrom:
{{- if .Values.arbiter.extraEnvVarsCM }}
- configMapRef:
name: {{ tpl .Values.arbiter.extraEnvVarsCM . | quote }}
{{- end }}
{{- if .Values.arbiter.extraEnvVarsSecret }}
- secretRef:
name: {{ tpl .Values.arbiter.extraEnvVarsSecret . | quote }}
{{- end }}
{{- end }}
ports:
- containerPort: 27017
name: mongodb
{{- if not .Values.diagnosticMode.enabled }}
{{- if .Values.arbiter.livenessProbe.enabled }}
livenessProbe:
tcpSocket:
port: mongodb
initialDelaySeconds: {{ .Values.arbiter.livenessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.arbiter.livenessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.arbiter.livenessProbe.timeoutSeconds }}
successThreshold: {{ .Values.arbiter.livenessProbe.successThreshold }}
failureThreshold: {{ .Values.arbiter.livenessProbe.failureThreshold }}
{{- else if .Values.arbiter.customLivenessProbe }}
livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.arbiter.customLivenessProbe "context" $) | nindent 12 }}
{{- end }}
{{- if .Values.arbiter.readinessProbe.enabled }}
readinessProbe:
tcpSocket:
port: mongodb
initialDelaySeconds: {{ .Values.arbiter.readinessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.arbiter.readinessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.arbiter.readinessProbe.timeoutSeconds }}
successThreshold: {{ .Values.arbiter.readinessProbe.successThreshold }}
failureThreshold: {{ .Values.arbiter.readinessProbe.failureThreshold }}
{{- else if .Values.arbiter.customReadinessProbe }}
readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.arbiter.customReadinessProbe "context" $) | nindent 12 }}
{{- end }}
{{- end }}
{{- if .Values.arbiter.resources }}
resources: {{- toYaml .Values.arbiter.resources | nindent 12 }}
{{- end }}
{{- if or .Values.arbiter.configuration .Values.arbiter.existingConfigmap .Values.arbiter.extraVolumeMounts .Values.tls.enabled }}
volumeMounts:
{{- if or .Values.arbiter.configuration .Values.arbiter.existingConfigmap }}
- name: config
mountPath: /opt/bitnami/mongodb/conf/mongodb.conf
subPath: mongodb.conf
{{- end }}
{{- if and .Values.tls.enabled .Values.arbiter.enabled }}
- name: certs
mountPath: /certs
{{- end }}
{{- if .Values.arbiter.extraVolumeMounts }}
{{- toYaml .Values.arbiter.extraVolumeMounts | nindent 12 }}
{{- end }}
{{- end }}
{{- if .Values.arbiter.sidecars }}
{{- include "common.tplvalues.render" (dict "value" .Values.arbiter.sidecars "context" $) | nindent 8 }}
{{- end }}
{{- if or .Values.arbiter.configuration .Values.arbiter.existingConfigmap .Values.arbiter.extraVolumes .Values.tls.enabled }}
volumes:
{{- if or .Values.arbiter.configuration .Values.arbiter.existingConfigmap }}
- name: config
configMap:
name: {{ include "mongodb.arbiter.configmapName" . }}
{{- end }}
{{- if and .Values.tls.enabled .Values.arbiter.enabled }}
- name: certs
emptyDir: {}
- name: certs-volume
secret:
secretName: {{ template "mongodb.tlsSecretName" . }}
items:
- key: mongodb-ca-cert
path: mongodb-ca-cert
mode: 0600
- key: mongodb-ca-key
path: mongodb-ca-key
mode: 0600
{{- end }}
{{- if .Values.arbiter.extraVolumes }}
{{- toYaml .Values.arbiter.extraVolumes | nindent 8 }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,18 @@
{{- if (include "mongodb.createConfigmap" .) }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "mongodb.fullname" . }}
namespace: {{ include "mongodb.namespace" . }}
labels: {{- include "common.labels.standard" . | nindent 4 }}
app.kubernetes.io/component: mongodb
{{- if .Values.commonLabels }}
{{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
{{- end }}
{{- if .Values.commonAnnotations }}
annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
{{- end }}
data:
mongodb.conf: |-
{{- include "common.tplvalues.render" (dict "value" .Values.configuration "context" $) | nindent 4 }}
{{- end }}

View File

@ -0,0 +1,4 @@
{{- range .Values.extraDeploy }}
---
{{ include "common.tplvalues.render" (dict "value" . "context" $) }}
{{- end }}

View File

@ -0,0 +1,15 @@
{{- if (include "mongodb.hidden.createConfigmap" .) }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "mongodb.fullname" . }}-hidden
namespace: {{ include "mongodb.namespace" . }}
labels: {{- include "common.labels.standard" . | nindent 4 }}
app.kubernetes.io/component: hidden
{{- if .Values.commonLabels }}
{{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
{{- end }}
data:
mongodb.conf: |-
{{- include "common.tplvalues.render" (dict "value" .Values.hidden.configuration "context" $) | nindent 4 }}
{{- end }}

View File

@ -0,0 +1,49 @@
{{- if and (include "mongodb.hidden.enabled" .) .Values.externalAccess.hidden.enabled }}
{{- $fullName := include "mongodb.fullname" . }}
{{- $replicaCount := .Values.hidden.replicaCount | int }}
{{- $root := . }}
{{- range $i, $e := until $replicaCount }}
{{- $targetPod := printf "%s-hidden-%d" (printf "%s" $fullName) $i }}
{{- $_ := set $ "targetPod" $targetPod }}
---
apiVersion: v1
kind: Service
metadata:
name: {{ $fullName }}-hidden-{{ $i }}-external
namespace: {{ include "mongodb.namespace" $ }}
labels: {{- include "common.labels.standard" $ | nindent 4 }}
app.kubernetes.io/component: hidden
{{- if .Values.commonLabels }}
{{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
{{- end }}
pod: {{ $targetPod }}
{{- if $root.Values.externalAccess.hidden.service.annotations }}
annotations: {{- include "common.tplvalues.render" ( dict "value" $root.Values.externalAccess.hidden.service.annotations "context" $) | nindent 4 }}
{{- end }}
spec:
type: {{ $root.Values.externalAccess.hidden.service.type }}
{{- if eq $root.Values.externalAccess.hidden.service.type "LoadBalancer" }}
{{- if not (empty $root.Values.externalAccess.hidden.service.loadBalancerIPs) }}
loadBalancerIP: {{ index $root.Values.externalAccess.hidden.service.loadBalancerIPs $i }}
{{- end }}
{{- if $root.Values.externalAccess.hidden.service.loadBalancerSourceRanges }}
loadBalancerSourceRanges: {{- toYaml $root.Values.externalAccess.hidden.service.loadBalancerSourceRanges | nindent 4 }}
{{- end }}
{{- end }}
publishNotReadyAddresses: true
ports:
- name: {{ $root.Values.service.portName }}
port: {{ $root.Values.externalAccess.hidden.service.port }}
{{- if not (empty $root.Values.externalAccess.hidden.service.nodePorts) }}
nodePort: {{ index $root.Values.externalAccess.hidden.service.nodePorts $i }}
{{- else }}
nodePort: null
{{- end }}
targetPort: mongodb
selector: {{- include "common.labels.matchLabels" $ | nindent 4 }}
app.kubernetes.io/component: hidden
statefulset.kubernetes.io/pod-name: {{ $targetPod }}
---
{{- end }}
{{- end }}

View File

@ -0,0 +1,25 @@
{{- if (include "mongodb.hidden.enabled" .) }}
apiVersion: v1
kind: Service
metadata:
name: {{ include "mongodb.fullname" . }}-hidden-headless
namespace: {{ include "mongodb.namespace" . }}
labels: {{- include "common.labels.standard" . | nindent 4 }}
app.kubernetes.io/component: hidden
{{- if .Values.commonLabels }}
{{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
{{- end }}
{{- if .Values.service.annotations }}
annotations: {{- include "common.tplvalues.render" (dict "value" .Values.service.annotations "context" $) | nindent 4 }}
{{- end }}
spec:
type: ClusterIP
clusterIP: None
publishNotReadyAddresses: true
ports:
- name: {{ .Values.service.portName }}
port: {{ .Values.service.port }}
targetPort: mongodb
selector: {{- include "common.labels.matchLabels" . | nindent 4 }}
app.kubernetes.io/component: hidden
{{- end }}

View File

@ -0,0 +1,22 @@
{{- if and (include "mongodb.hidden.enabled" .) .Values.hidden.pdb.create }}
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
name: {{ include "mongodb.fullname" . }}-hidden
namespace: {{ include "mongodb.namespace" . }}
labels: {{- include "common.labels.standard" . | nindent 4 }}
app.kubernetes.io/component: hidden
{{- if .Values.commonLabels }}
{{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
{{- end }}
spec:
{{- if .Values.hidden.pdb.minAvailable }}
minAvailable: {{ .Values.hidden.pdb.minAvailable }}
{{- end }}
{{- if .Values.hidden.pdb.maxUnavailable }}
maxUnavailable: {{ .Values.hidden.pdb.maxUnavailable }}
{{- end }}
selector:
matchLabels: {{ include "common.labels.matchLabels" . | nindent 6 }}
app.kubernetes.io/component: hidden
{{- end }}

View File

@ -0,0 +1,547 @@
{{- if (include "mongodb.hidden.enabled" .) }}
{{- $replicaCount := int .Values.hidden.replicaCount }}
{{- $loadBalancerIPListLength := len .Values.externalAccess.hidden.service.loadBalancerIPs }}
{{- if not (and .Values.externalAccess.hidden.enabled (not .Values.externalAccess.autoDiscovery.enabled) (not (eq $replicaCount $loadBalancerIPListLength )) (eq .Values.externalAccess.hidden.service.type "LoadBalancer")) }}
apiVersion: {{ include "common.capabilities.statefulset.apiVersion" . }}
kind: StatefulSet
metadata:
name: {{ include "mongodb.fullname" . }}-hidden
namespace: {{ include "mongodb.namespace" . }}
labels: {{- include "common.labels.standard" . | nindent 4 }}
app.kubernetes.io/component: hidden
{{- if .Values.hidden.labels }}
{{- include "common.tplvalues.render" (dict "value" .Values.hidden.labels "context" $) | nindent 4 }}
{{- end }}
{{- if .Values.commonLabels }}
{{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
{{- end }}
{{- if .Values.hidden.annotations }}
annotations: {{- include "common.tplvalues.render" (dict "value" .Values.hidden.annotations "context" $) | nindent 4 }}
{{- end }}
spec:
serviceName: {{ include "mongodb.fullname" . }}-hidden-headless
podManagementPolicy: {{ .Values.hidden.podManagementPolicy }}
replicas: {{ .Values.hidden.replicaCount }}
updateStrategy:
type: {{ .Values.hidden.strategyType }}
{{- if (eq "OnDelete" .Values.hidden.strategyType) }}
rollingUpdate: null
{{- end }}
selector:
matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }}
app.kubernetes.io/component: hidden
template:
metadata:
labels: {{- include "common.labels.standard" . | nindent 8 }}
app.kubernetes.io/component: hidden
{{- if .Values.hidden.podLabels }}
{{- include "common.tplvalues.render" (dict "value" .Values.hidden.podLabels "context" $) | nindent 8 }}
{{- end }}
{{- if or (include "mongodb.hidden.createConfigmap" .) .Values.hidden.podAnnotations }}
annotations:
{{- if (include "mongodb.hidden.createConfigmap" .) }}
checksum/configuration: {{ include (print $.Template.BasePath "/hidden/configmap.yaml") . | sha256sum }}
{{- end }}
{{- if .Values.hidden.podAnnotations }}
{{- include "common.tplvalues.render" (dict "value" .Values.hidden.podAnnotations "context" $) | nindent 8 }}
{{- end }}
{{- end }}
spec:
{{- include "mongodb.imagePullSecrets" . | nindent 6 }}
{{- if .Values.schedulerName }}
schedulerName: "{{ .Values.schedulerName }}"
{{- end }}
serviceAccountName: {{ template "mongodb.serviceAccountName" . }}
{{- if .Values.hidden.affinity }}
affinity: {{- include "common.tplvalues.render" (dict "value" .Values.hidden.affinity "context" $) | nindent 8 }}
{{- else }}
affinity:
podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.hidden.podAffinityPreset "component" "" "context" $) | nindent 10 }}
podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.hidden.podAntiAffinityPreset "component" "" "context" $) | nindent 10 }}
nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.hidden.nodeAffinityPreset.type "key" .Values.nodeAffinityPreset.key "values" .Values.nodeAffinityPreset.values) | nindent 10 }}
{{- end }}
{{- if .Values.hidden.nodeSelector }}
nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.hidden.nodeSelector "context" $) | nindent 8 }}
{{- end }}
{{- if .Values.hidden.tolerations }}
tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.hidden.tolerations "context" $) | nindent 8 }}
{{- end }}
{{- if .Values.hidden.priorityClassName }}
priorityClassName: {{ .Values.hidden.priorityClassName }}
{{- end }}
{{- if .Values.hidden.runtimeClassName }}
runtimeClassName: {{ .Values.hidden.runtimeClassName }}
{{- end }}
{{- if .Values.podSecurityContext.enabled }}
securityContext: {{- omit .Values.podSecurityContext "enabled" | toYaml | nindent 8 }}
{{- end }}
{{- if or .Values.hidden.initContainers (and .Values.volumePermissions.enabled .Values.hidden.persistence.enabled) (and .Values.externalAccess.hidden.enabled .Values.externalAccess.autoDiscovery.enabled) .Values.tls.enabled }}
initContainers:
{{- if .Values.hidden.initContainers }}
{{- include "common.tplvalues.render" (dict "value" .Values.hidden.initContainers "context" $) | nindent 8 }}
{{- end }}
{{- if and .Values.volumePermissions.enabled .Values.hidden.persistence.enabled }}
- name: volume-permissions
image: {{ include "mongodb.volumePermissions.image" . }}
imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }}
command:
- /bin/bash
- -ec
args:
- |
mkdir -p {{ .Values.hidden.persistence.mountPath }}{{- if .Values.hidden.persistence.subPath }}/{{ .Values.hidden.persistence.subPath }}{{- end }}
{{- if and .Values.podSecurityContext.enabled .Values.containerSecurityContext.enabled }}
chown -R "{{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.podSecurityContext.fsGroup }}" "{{ .Values.hidden.persistence.mountPath }}{{- if .Values.hidden.persistence.subPath }}/{{ .Values.hidden.persistence.subPath }}{{- end }}"
{{- end }}
{{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }}
securityContext: {{- omit .Values.volumePermissions.securityContext "runAsUser" | toYaml | nindent 12 }}
{{- else }}
securityContext: {{- .Values.volumePermissions.securityContext | toYaml | nindent 12 }}
{{- end }}
{{- if .Values.volumePermissions.resources }}
resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }}
{{- end }}
volumeMounts:
- name: datadir
mountPath: {{ .Values.hidden.persistence.mountPath }}
{{- end }}
{{- if .Values.tls.enabled }}
- name: generate-tls-certs
image: {{ include "mongodb.tls.image" . }}
imagePullPolicy: {{ .Values.tls.image.pullPolicy | quote }}
env:
- name: MY_POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: certs-volume
mountPath: /certs/CAs
- name: certs
mountPath: /certs
command:
- sh
- "-c"
- |
/bin/bash <<'EOF'
my_hostname=$(hostname)
svc=$(echo -n "$my_hostname" | sed s/-[0-9]*$//)-headless
cp /certs/CAs/* /certs/
cat >/certs/openssl.cnf <<EOL
[req]
req_extensions = v3_req
distinguished_name = req_distinguished_name
[req_distinguished_name]
[ v3_req ]
basicConstraints = CA:FALSE
keyUsage = nonRepudiation, digitalSignature, keyEncipherment
subjectAltName = @alt_names
[alt_names]
DNS.1 = $svc
DNS.2 = $my_hostname
DNS.3 = $my_hostname.$svc.$MY_POD_NAMESPACE.svc.cluster.local
DNS.4 = localhost
DNS.5 = 127.0.0.1
{{- if .Values.externalAccess.hidden.service.loadBalancerIPs }}
{{- range $key, $val := .Values.externalAccess.hidden.service.loadBalancerIPs }}
IP.{{ $key }} = {{ $val | quote }}
{{- end }}
{{- end }}
EOL
export RANDFILE=/certs/.rnd && openssl genrsa -out /certs/mongo.key 2048
#Create the client/server certificate
openssl req -new -key /certs/mongo.key -out /certs/mongo.csr -subj "/C=US/O=My Organisations/OU=IT/CN=$my_hostname" -config /certs/openssl.cnf
#Signing the server certificate with the CA cert and key
openssl x509 -req -in /certs/mongo.csr -CA /certs/mongodb-ca-cert -CAkey /certs/mongodb-ca-key -CAcreateserial -out /certs/mongo.crt -days 3650 -extensions v3_req -extfile /certs/openssl.cnf
rm /certs/mongo.csr
#Concatenate to a pem file for use as the client PEM file which can be used for both member and client authentication.
cat /certs/mongo.crt /certs/mongo.key > /certs/mongodb.pem
cd /certs/
shopt -s extglob
rm -rf !(mongodb-ca-cert|mongodb.pem|CAs|openssl.cnf)
chmod 0600 mongodb-ca-cert mongodb.pem
EOF
{{- end }}
{{- if and .Values.externalAccess.hidden.enabled .Values.externalAccess.autoDiscovery.enabled (eq .Values.externalAccess.hidden.service.type "LoadBalancer") }}
- name: auto-discovery
image: {{ include "mongodb.externalAccess.autoDiscovery.image" . }}
imagePullPolicy: {{ .Values.externalAccess.autoDiscovery.image.pullPolicy | quote }}
command:
- /scripts/auto-discovery.sh
env:
- name: MY_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: SHARED_FILE
value: "/shared/info.txt"
{{- if .Values.externalAccess.autoDiscovery.resources }}
resources: {{- toYaml .Values.externalAccess.autoDiscovery.resources | nindent 12 }}
{{- end }}
volumeMounts:
- name: shared
mountPath: /shared
- name: scripts
mountPath: /scripts/auto-discovery.sh
subPath: auto-discovery.sh
{{- end }}
{{- end }}
containers:
- name: mongodb
image: {{ include "mongodb.image" . }}
imagePullPolicy: {{ .Values.image.pullPolicy | quote }}
{{- if .Values.containerSecurityContext.enabled }}
securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }}
{{- end }}
{{- if .Values.diagnosticMode.enabled }}
command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }}
{{- else if .Values.hidden.command }}
command: {{- include "common.tplvalues.render" (dict "value" .Values.hidden.command "context" $) | nindent 12 }}
{{- else }}
command:
- /scripts/setup-hidden.sh
{{- end }}
{{- if .Values.diagnosticMode.enabled }}
args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }}
{{- else if .Values.hidden.args }}
args: {{- include "common.tplvalues.render" (dict "value" .Values.hidden.args "context" $) | nindent 12 }}
{{- end }}
env:
- name: BITNAMI_DEBUG
value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }}
{{- if and .Values.externalAccess.hidden.enabled .Values.externalAccess.autoDiscovery.enabled (eq .Values.externalAccess.hidden.service.type "LoadBalancer") }}
- name: SHARED_FILE
value: "/shared/info.txt"
{{- end }}
- name: MY_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: MY_POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: K8S_SERVICE_NAME
value: "{{ include "mongodb.service.nameOverride" . }}"
- name: K8S_HIDDEN_NODE_SERVICE_NAME
value: "{{ include "mongodb.fullname" . }}-hidden-headless"
- name: MONGODB_REPLICA_SET_MODE
value: "hidden"
- name: MONGODB_INITIAL_PRIMARY_HOST
value: "{{ include "mongodb.fullname" . }}-0.$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.{{ .Values.clusterDomain }}"
- name: MONGODB_REPLICA_SET_NAME
value: {{ .Values.replicaSetName | quote }}
{{- if and .Values.replicaSetHostnames (not .Values.externalAccess.hidden.enabled) }}
- name: MONGODB_ADVERTISED_HOSTNAME
value: "$(MY_POD_NAME).$(K8S_HIDDEN_NODE_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.{{ .Values.clusterDomain }}"
{{- end }}
{{- $customUsers := include "mongodb.customUsers" . -}}
{{- $customDatabases := include "mongodb.customDatabases" . -}}
{{- if not (empty $customUsers) }}
- name: MONGODB_EXTRA_USERNAMES
value: {{ $customUsers | quote }}
{{- end }}
{{- if not (empty $customDatabases) }}
- name: MONGODB_EXTRA_DATABASES
value: {{ $customDatabases | quote }}
{{- end }}
{{- if .Values.auth.enabled }}
{{- if and (not (empty $customUsers)) (not (empty $customDatabases)) }}
- name: MONGODB_EXTRA_PASSWORDS
valueFrom:
secretKeyRef:
name: {{ include "mongodb.secretName" . }}
key: mongodb-passwords
{{- end }}
- name: MONGODB_ROOT_USER
value: {{ .Values.auth.rootUser | quote }}
- name: MONGODB_ROOT_PASSWORD
valueFrom:
secretKeyRef:
name: {{ include "mongodb.secretName" . }}
key: mongodb-root-password
- name: MONGODB_REPLICA_SET_KEY
valueFrom:
secretKeyRef:
name: {{ include "mongodb.secretName" . }}
key: mongodb-replica-set-key
{{- end }}
{{- if and .Values.metrics.enabled (not (empty .Values.metrics.username)) }}
- name: MONGODB_METRICS_USERNAME
value: {{ .Values.metrics.username | quote }}
{{- if .Values.auth.enabled }}
- name: MONGODB_METRICS_PASSWORD
valueFrom:
secretKeyRef:
name: {{ include "mongodb.secretName" . }}
key: mongodb-metrics-password
{{- end }}
{{- end }}
- name: ALLOW_EMPTY_PASSWORD
value: {{ ternary "no" "yes" .Values.auth.enabled | quote }}
- name: MONGODB_SYSTEM_LOG_VERBOSITY
value: {{ .Values.systemLogVerbosity | quote }}
- name: MONGODB_DISABLE_SYSTEM_LOG
value: {{ ternary "yes" "no" .Values.disableSystemLog | quote }}
- name: MONGODB_DISABLE_JAVASCRIPT
value: {{ ternary "yes" "no" .Values.disableJavascript | quote }}
- name: MONGODB_ENABLE_JOURNAL
value: {{ ternary "yes" "no" .Values.enableJournal | quote }}
- name: MONGODB_ENABLE_IPV6
value: {{ ternary "yes" "no" .Values.enableIPv6 | quote }}
- name: MONGODB_ENABLE_DIRECTORY_PER_DB
value: {{ ternary "yes" "no" .Values.directoryPerDB | quote }}
{{- $extraFlags := .Values.hidden.extraFlags | join " " -}}
{{- if .Values.tls.enabled }}
{{- $extraFlags = printf "--tlsMode=%s --tlsCertificateKeyFile=/certs/mongodb.pem --tlsCAFile=/certs/mongodb-ca-cert %s" .Values.tls.mode $extraFlags }}
{{- end }}
{{- if ne $extraFlags ""}}
- name: MONGODB_EXTRA_FLAGS
value: {{ $extraFlags | quote }}
{{- end }}
{{- if .Values.tls.enabled }}
- name: MONGODB_CLIENT_EXTRA_FLAGS
value: --tls --tlsCertificateKeyFile=/certs/mongodb.pem --tlsCAFile=/certs/mongodb-ca-cert
{{- end }}
{{- if .Values.hidden.extraEnvVars }}
{{- include "common.tplvalues.render" (dict "value" .Values.hidden.extraEnvVars "context" $) | nindent 12 }}
{{- end }}
{{- if or .Values.hidden.extraEnvVarsCM .Values.hidden.extraEnvVarsSecret }}
envFrom:
{{- if .Values.hidden.extraEnvVarsCM }}
- configMapRef:
name: {{ tpl .Values.hidden.extraEnvVarsCM . | quote }}
{{- end }}
{{- if .Values.hidden.extraEnvVarsSecret }}
- secretRef:
name: {{ tpl .Values.hidden.extraEnvVarsSecret . | quote }}
{{- end }}
{{- end }}
ports:
- containerPort: 27017
name: mongodb
{{- if not .Values.diagnosticMode.enabled }}
{{- if .Values.hidden.livenessProbe.enabled }}
livenessProbe:
exec:
command:
- mongo
{{- if .Values.tls.enabled }}
- --tls
- --tlsCertificateKeyFile=/certs/mongodb.pem
- --tlsCAFile=/certs/mongodb-ca-cert
{{- end }}
- --eval
- "db.adminCommand('ping')"
initialDelaySeconds: {{ .Values.hidden.livenessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.hidden.livenessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.hidden.livenessProbe.timeoutSeconds }}
successThreshold: {{ .Values.hidden.livenessProbe.successThreshold }}
failureThreshold: {{ .Values.hidden.livenessProbe.failureThreshold }}
{{- else if .Values.hidden.customLivenessProbe }}
livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.hidden.customLivenessProbe "context" $) | nindent 12 }}
{{- end }}
{{- if .Values.hidden.readinessProbe.enabled }}
readinessProbe:
exec:
command:
- mongo
{{- if .Values.tls.enabled }}
- --tls
- --tlsCertificateKeyFile=/certs/mongodb.pem
- --tlsCAFile=/certs/mongodb-ca-cert
{{- end }}
- --eval
- "db.adminCommand('ping')"
initialDelaySeconds: {{ .Values.hidden.readinessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.hidden.readinessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.hidden.readinessProbe.timeoutSeconds }}
successThreshold: {{ .Values.hidden.readinessProbe.successThreshold }}
failureThreshold: {{ .Values.hidden.readinessProbe.failureThreshold }}
{{- else if .Values.hidden.customReadinessProbe }}
readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.hidden.customReadinessProbe "context" $) | nindent 12 }}
{{- end }}
{{- end }}
{{- if .Values.hidden.resources }}
resources: {{- toYaml .Values.hidden.resources | nindent 12 }}
{{- end }}
volumeMounts:
- name: datadir
mountPath: {{ .Values.hidden.persistence.mountPath }}
subPath: {{ .Values.hidden.persistence.subPath }}
{{- if or .Values.initdbScriptsConfigMap .Values.initdbScripts }}
- name: custom-init-scripts
mountPath: /docker-entrypoint-initdb.d
{{- end }}
{{- if or .Values.hidden.configuration .Values.hidden.existingConfigmap }}
- name: config
mountPath: /opt/bitnami/mongodb/conf/mongodb.conf
subPath: mongodb.conf
{{- end }}
- name: scripts
mountPath: /scripts/setup-hidden.sh
subPath: setup-hidden.sh
{{- if and .Values.externalAccess.hidden.enabled .Values.externalAccess.autoDiscovery.enabled (eq .Values.externalAccess.hidden.service.type "LoadBalancer") }}
- name: shared
mountPath: /shared
{{- end }}
{{- if .Values.tls.enabled }}
- name: certs
mountPath: /certs
{{- end }}
{{- if .Values.hidden.extraVolumeMounts }}
{{- toYaml .Values.hidden.extraVolumeMounts | nindent 12 }}
{{- end }}
{{- if .Values.metrics.enabled }}
- name: metrics
image: {{ template "mongodb.metrics.image" . }}
imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }}
{{- if .Values.containerSecurityContext.enabled }}
securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }}
{{- end }}
{{- if .Values.diagnosticMode.enabled }}
command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }}
{{- else }}
command:
- /bin/bash
- -ec
{{- end }}
{{- if .Values.diagnosticMode.enabled }}
args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }}
{{- else }}
args:
- |
/bin/mongodb_exporter --mongodb.uri "{{ include "mongodb.mongodb_exporter.uri" . }}" {{ .Values.metrics.extraFlags }}
{{- end }}
env:
{{- if .Values.auth.enabled }}
{{- if not .Values.metrics.username }}
- name: MONGODB_ROOT_USER
value: {{ .Values.auth.rootUser | quote }}
- name: MONGODB_ROOT_PASSWORD
valueFrom:
secretKeyRef:
name: {{ include "mongodb.secretName" . }}
key: mongodb-root-password
{{- else }}
- name: MONGODB_METRICS_USERNAME
value: {{ .Values.metrics.username | quote }}
- name: MONGODB_METRICS_PASSWORD
valueFrom:
secretKeyRef:
name: {{ include "mongodb.secretName" . }}
key: mongodb-metrics-password
{{- end }}
{{- end }}
volumeMounts:
{{- if .Values.tls.enabled }}
- name: certs
mountPath: /certs
{{- end }}
ports:
- name: metrics
containerPort: 9216
{{- if not .Values.diagnosticMode.enabled }}
{{- if .Values.metrics.livenessProbe.enabled }}
livenessProbe:
httpGet:
path: /
port: metrics
initialDelaySeconds: {{ .Values.metrics.livenessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.metrics.livenessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.metrics.livenessProbe.timeoutSeconds }}
failureThreshold: {{ .Values.metrics.livenessProbe.failureThreshold }}
successThreshold: {{ .Values.metrics.livenessProbe.successThreshold }}
{{- end }}
{{- if .Values.metrics.readinessProbe.enabled }}
readinessProbe:
httpGet:
path: /
port: metrics
initialDelaySeconds: {{ .Values.metrics.readinessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.metrics.readinessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.metrics.readinessProbe.timeoutSeconds }}
failureThreshold: {{ .Values.metrics.readinessProbe.failureThreshold }}
successThreshold: {{ .Values.metrics.readinessProbe.successThreshold }}
{{- end }}
{{- end }}
{{- if .Values.metrics.resources }}
resources: {{- toYaml .Values.metrics.resources | nindent 12 }}
{{- end }}
{{- end }}
{{- if .Values.hidden.sidecars }}
{{- include "common.tplvalues.render" (dict "value" .Values.hidden.sidecars "context" $) | nindent 8 }}
{{- end }}
volumes:
{{- if or .Values.initdbScriptsConfigMap .Values.initdbScripts }}
- name: custom-init-scripts
configMap:
name: {{ template "mongodb.initdbScriptsCM" . }}
{{- end }}
{{- if or .Values.hidden.configuration .Values.hidden.existingConfigmap }}
- name: config
configMap:
name: {{ include "mongodb.hidden.configmapName" . }}
{{- end }}
{{- if and .Values.externalAccess.hidden.enabled .Values.externalAccess.autoDiscovery.enabled (eq .Values.externalAccess.hidden.service.type "LoadBalancer") }}
- name: shared
emptyDir: {}
{{- end }}
- name: scripts
configMap:
name: {{ include "mongodb.fullname" . }}-scripts
defaultMode: 0755
{{- if .Values.hidden.extraVolumes }}
{{- toYaml .Values.hidden.extraVolumes | nindent 8 }}
{{- end }}
{{- if .Values.tls.enabled }}
- name: certs
emptyDir: {}
- name: certs-volume
secret:
secretName: {{ template "mongodb.tlsSecretName" . }}
items:
- key: mongodb-ca-cert
path: mongodb-ca-cert
mode: 0600
- key: mongodb-ca-key
path: mongodb-ca-key
mode: 0600
{{- end }}
{{- if not .Values.hidden.persistence.enabled }}
- name: datadir
{{- if .Values.hidden.persistence.medium }}
emptyDir:
medium: {{ .Values.hidden.persistence.medium | quote }}
{{- else }}
emptyDir: {}
{{- end }}
{{- else }}
volumeClaimTemplates:
- metadata:
name: datadir
{{- if .Values.hidden.persistence.annotations }}
annotations: {{- include "common.tplvalues.render" (dict "value" .Values.hidden.persistence.annotations "context" $) | nindent 10 }}
{{- end }}
spec:
accessModes:
{{- range .Values.hidden.persistence.accessModes }}
- {{ . | quote }}
{{- end }}
resources:
requests:
storage: {{ .Values.hidden.persistence.size | quote }}
{{- if .Values.hidden.persistence.volumeClaimTemplates.requests }}
{{- include "common.tplvalues.render" (dict "value" .Values.hidden.persistence.volumeClaimTemplates.requests "context" $) | nindent 12 }}
{{- end }}
{{- if .Values.hidden.persistence.volumeClaimTemplates.dataSource }}
dataSource: {{- include "common.tplvalues.render" (dict "value" .Values.hidden.persistence.volumeClaimTemplates.dataSource "context" $) | nindent 10 }}
{{- end }}
{{- if .Values.hidden.persistence.volumeClaimTemplates.selector }}
selector: {{- include "common.tplvalues.render" (dict "value" .Values.hidden.persistence.volumeClaimTemplates.selector "context" $) | nindent 10 }}
{{- end }}
{{ include "common.storage.class" (dict "persistence" .Values.hidden.persistence "global" .Values.global) }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,17 @@
{{- if and .Values.initdbScripts (not .Values.initdbScriptsConfigMap) }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "mongodb.fullname" . }}-init-scripts
namespace: {{ include "mongodb.namespace" . }}
labels: {{- include "common.labels.standard" . | nindent 4 }}
app.kubernetes.io/component: mongodb
{{- if .Values.commonLabels }}
{{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
{{- end }}
{{- if .Values.commonAnnotations }}
annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
{{- end }}
data:
{{- include "common.tplvalues.render" (dict "value" .Values.initdbScripts "context" .) | nindent 2 }}
{{- end }}

View File

@ -0,0 +1,24 @@
{{- if .Values.metrics.enabled }}
apiVersion: v1
kind: Service
metadata:
name: {{ include "mongodb.fullname" . }}-metrics
namespace: {{ include "mongodb.namespace" . }}
labels: {{- include "common.labels.standard" . | nindent 4 }}
app.kubernetes.io/component: metrics
{{- if .Values.commonLabels }}
{{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
{{- end }}
{{- if .Values.metrics.service.annotations }}
annotations: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.service.annotations "context" $) | nindent 4 }}
{{- end }}
spec:
type: {{ .Values.metrics.service.type }}
ports:
- port: {{ .Values.metrics.service.port }}
targetPort: metrics
protocol: TCP
name: http-metrics
selector: {{- include "common.labels.matchLabels" $ | nindent 4 }}
app.kubernetes.io/component: mongodb
{{- end }}

View File

@ -0,0 +1,17 @@
{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled }}
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: {{ include "mongodb.fullname" . }}
namespace: {{ include "mongodb.prometheusRule.namespace" . }}
labels: {{- include "common.labels.standard" . | nindent 4 }}
{{- if .Values.commonLabels }}
{{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
{{- end }}
{{- if .Values.metrics.prometheusRule.additionalLabels }}
{{- include "common.tplvalues.render" (dict "value" .Values.metrics.prometheusRule.additionalLabels "context" $) | nindent 4 }}
{{- end }}
spec:
groups:
{{- include "common.tplvalues.render" (dict "value" .Values.metrics.prometheusRule.rules "context" $) | nindent 2 }}
{{- end }}

View File

@ -0,0 +1,49 @@
{{- $pspAvailable := (semverCompare "<1.25-0" (include "common.capabilities.kubeVersion" .)) -}}
{{- if and $pspAvailable .Values.podSecurityPolicy.create }}
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: {{ include "mongodb.fullname" . }}
labels: {{- include "common.labels.standard" . | nindent 4 }}
{{- if .Values.commonLabels }}
{{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
{{- end }}
{{- if .Values.commonAnnotations }}
annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
{{- end }}
spec:
{{- if .Values.podSecurityPolicy.spec }}
{{ include "common.tplvalues.render" ( dict "value" .Values.podSecurityPolicy.spec "context" $ ) | nindent 2 }}
{{- else }}
allowPrivilegeEscalation: {{ .Values.podSecurityPolicy.allowPrivilegeEscalation }}
fsGroup:
rule: 'MustRunAs'
ranges:
- min: {{ .Values.podSecurityContext.fsGroup }}
max: {{ .Values.podSecurityContext.fsGroup }}
hostIPC: false
hostNetwork: false
hostPID: false
privileged: {{ .Values.podSecurityPolicy.privileged }}
readOnlyRootFilesystem: false
requiredDropCapabilities:
- ALL
runAsUser:
rule: 'MustRunAs'
ranges:
- min: {{ .Values.containerSecurityContext.runAsUser }}
max: {{ .Values.containerSecurityContext.runAsUser }}
seLinux:
rule: 'RunAsAny'
supplementalGroups:
rule: 'MustRunAs'
ranges:
- min: {{ .Values.containerSecurityContext.runAsUser }}
max: {{ .Values.containerSecurityContext.runAsUser }}
volumes:
- 'configMap'
- 'secret'
- 'emptyDir'
- 'persistentVolumeClaim'
{{- end }}
{{- end }}

View File

@ -0,0 +1,47 @@
{{- if and (eq .Values.architecture "replicaset") .Values.externalAccess.enabled (not (eq .Values.externalAccess.service.type "ClusterIP"))}}
{{- $fullName := include "mongodb.fullname" . }}
{{- $replicaCount := .Values.replicaCount | int }}
{{- $root := . }}
{{- range $i, $e := until $replicaCount }}
{{- $targetPod := printf "%s-%d" (printf "%s" $fullName) $i }}
{{- $_ := set $ "targetPod" $targetPod }}
---
apiVersion: v1
kind: Service
metadata:
name: {{ $fullName }}-{{ $i }}-external
namespace: {{ include "mongodb.namespace" $ }}
labels: {{- include "common.labels.standard" $ | nindent 4 }}
app.kubernetes.io/component: mongodb
{{- if $root.Values.commonLabels }}
{{- include "common.tplvalues.render" ( dict "value" $root.Values.commonLabels "context" $ ) | nindent 4 }}
{{- end }}
pod: {{ $targetPod }}
{{- if $root.Values.externalAccess.service.annotations }}
annotations: {{- include "common.tplvalues.render" ( dict "value" $root.Values.externalAccess.service.annotations "context" $) | nindent 4 }}
{{- end }}
spec:
type: {{ $root.Values.externalAccess.service.type }}
{{- if eq $root.Values.externalAccess.service.type "LoadBalancer" }}
{{- if not (empty $root.Values.externalAccess.service.loadBalancerIPs) }}
loadBalancerIP: {{ index $root.Values.externalAccess.service.loadBalancerIPs $i }}
{{- end }}
{{- if $root.Values.externalAccess.service.loadBalancerSourceRanges }}
loadBalancerSourceRanges: {{- toYaml $root.Values.externalAccess.service.loadBalancerSourceRanges | nindent 4 }}
{{- end }}
{{- end }}
publishNotReadyAddresses: true
ports:
- name: {{ $root.Values.service.portName }}
port: {{ $root.Values.externalAccess.service.port }}
{{- if not (empty $root.Values.externalAccess.service.nodePorts) }}
nodePort: {{ index $root.Values.externalAccess.service.nodePorts $i }}
{{- end }}
targetPort: mongodb
selector: {{- include "common.labels.matchLabels" $ | nindent 4 }}
app.kubernetes.io/component: mongodb
statefulset.kubernetes.io/pod-name: {{ $targetPod }}
---
{{- end }}
{{- end }}

View File

@ -0,0 +1,31 @@
{{- if eq .Values.architecture "replicaset" }}
apiVersion: v1
kind: Service
metadata:
name: {{ include "mongodb.service.nameOverride" . }}
namespace: {{ include "mongodb.namespace" . }}
labels: {{- include "common.labels.standard" . | nindent 4 }}
app.kubernetes.io/component: mongodb
{{- if .Values.commonLabels }}
{{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
{{- end }}
{{- if or .Values.service.annotations .Values.commonAnnotations }}
annotations:
{{- if .Values.service.annotations }}
{{- include "common.tplvalues.render" ( dict "value" .Values.service.annotations "context" $) | nindent 4 }}
{{- end }}
{{- if .Values.commonAnnotations }}
{{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
{{- end }}
{{- end }}
spec:
type: ClusterIP
clusterIP: None
publishNotReadyAddresses: true
ports:
- name: {{ .Values.service.portName }}
port: {{ .Values.service.port }}
targetPort: mongodb
selector: {{- include "common.labels.matchLabels" . | nindent 4 }}
app.kubernetes.io/component: mongodb
{{- end }}

View File

@ -0,0 +1,25 @@
{{- if and (eq .Values.architecture "replicaset") .Values.pdb.create }}
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
name: {{ include "mongodb.fullname" . }}
namespace: {{ include "mongodb.namespace" . }}
labels: {{- include "common.labels.standard" . | nindent 4 }}
app.kubernetes.io/component: mongodb
{{- if .Values.commonLabels }}
{{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
{{- end }}
{{- if .Values.commonAnnotations }}
annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
{{- end }}
spec:
{{- if .Values.pdb.minAvailable }}
minAvailable: {{ .Values.pdb.minAvailable }}
{{- end }}
{{- if .Values.pdb.maxUnavailable }}
maxUnavailable: {{ .Values.pdb.maxUnavailable }}
{{- end }}
selector:
matchLabels: {{ include "common.labels.matchLabels" . | nindent 6 }}
app.kubernetes.io/component: mongodb
{{- end }}

View File

@ -0,0 +1,253 @@
{{- if eq .Values.architecture "replicaset" }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "mongodb.fullname" . }}-scripts
namespace: {{ include "mongodb.namespace" . }}
labels: {{- include "common.labels.standard" . | nindent 4 }}
app.kubernetes.io/component: mongodb
{{- if .Values.commonLabels }}
{{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
{{- end }}
{{- if .Values.commonAnnotations }}
annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
{{- end }}
data:
{{- $fullname := include "mongodb.fullname" . }}
{{- $releaseNamespace := include "mongodb.namespace" . }}
{{- if and .Values.externalAccess.autoDiscovery.enabled (eq .Values.externalAccess.service.type "LoadBalancer") }}
auto-discovery.sh: |-
#!/bin/bash
SVC_NAME="${MY_POD_NAME}-external"
# Auxiliary functions
retry_while() {
local -r cmd="${1:?cmd is missing}"
local -r retries="${2:-12}"
local -r sleep_time="${3:-5}"
local return_value=1
read -r -a command <<< "$cmd"
for ((i = 1 ; i <= retries ; i+=1 )); do
"${command[@]}" && return_value=0 && break
sleep "$sleep_time"
done
return $return_value
}
k8s_svc_lb_ip() {
local namespace=${1:?namespace is missing}
local service=${2:?service is missing}
local service_ip=$(kubectl get svc "$service" -n "$namespace" -o jsonpath="{.status.loadBalancer.ingress[0].ip}")
local service_hostname=$(kubectl get svc "$service" -n "$namespace" -o jsonpath="{.status.loadBalancer.ingress[0].hostname}")
if [[ -n ${service_ip} ]]; then
echo "${service_ip}"
else
echo "${service_hostname}"
fi
}
k8s_svc_lb_ip_ready() {
local namespace=${1:?namespace is missing}
local service=${2:?service is missing}
[[ -n "$(k8s_svc_lb_ip "$namespace" "$service")" ]]
}
# Wait until LoadBalancer IP is ready
retry_while "k8s_svc_lb_ip_ready {{ $releaseNamespace }} $SVC_NAME" || exit 1
# Obtain LoadBalancer external IP
k8s_svc_lb_ip "{{ $releaseNamespace }}" "$SVC_NAME" | tee "$SHARED_FILE"
{{- end }}
setup.sh: |-
#!/bin/bash
. /opt/bitnami/scripts/mongodb-env.sh
{{- if .Values.externalAccess.enabled }}
{{- if eq .Values.externalAccess.service.type "LoadBalancer" }}
{{- if .Values.externalAccess.autoDiscovery.enabled }}
export MONGODB_ADVERTISED_HOSTNAME="$(<${SHARED_FILE})"
{{- else }}
ID="${MY_POD_NAME#"{{ $fullname }}-"}"
export MONGODB_ADVERTISED_HOSTNAME=$(echo '{{ .Values.externalAccess.service.loadBalancerIPs }}' | tr -d '[]' | cut -d ' ' -f "$(($ID + 1))")
{{- end }}
{{- else if eq .Values.externalAccess.service.type "NodePort" }}
{{- if .Values.externalAccess.service.domain }}
export MONGODB_ADVERTISED_HOSTNAME={{ .Values.externalAccess.service.domain }}
{{- else }}
export MONGODB_ADVERTISED_HOSTNAME=$(curl -s https://ipinfo.io/ip)
{{- end }}
{{- end }}
{{- end }}
{{- if .Values.replicaSetConfigurationSettings.enabled }}
# placed here before root password env is overwritten
# makes no assumption about starting state
# ensures that any stepDown or non-default starting state is handled
/scripts/replicaSetConfigurationSettings.sh &
{{- end }}
echo "Advertised Hostname: $MONGODB_ADVERTISED_HOSTNAME"
if [[ "$MY_POD_NAME" = "{{ $fullname }}-0" ]]; then
echo "Pod name matches initial primary pod name, configuring node as a primary"
export MONGODB_REPLICA_SET_MODE="primary"
else
echo "Pod name doesn't match initial primary pod name, configuring node as a secondary"
export MONGODB_REPLICA_SET_MODE="secondary"
export MONGODB_INITIAL_PRIMARY_ROOT_USER="$MONGODB_ROOT_USER"
export MONGODB_INITIAL_PRIMARY_ROOT_PASSWORD="$MONGODB_ROOT_PASSWORD"
export MONGODB_INITIAL_PRIMARY_PORT_NUMBER="$MONGODB_PORT_NUMBER"
export MONGODB_ROOT_PASSWORD=""
export MONGODB_EXTRA_USERNAMES=""
export MONGODB_EXTRA_DATABASES=""
export MONGODB_EXTRA_PASSWORDS=""
export MONGODB_ROOT_PASSWORD_FILE=""
export MONGODB_EXTRA_USERNAMES_FILE=""
export MONGODB_EXTRA_DATABASES_FILE=""
export MONGODB_EXTRA_PASSWORDS_FILE=""
fi
exec /opt/bitnami/scripts/mongodb/entrypoint.sh /opt/bitnami/scripts/mongodb/run.sh
setup-hidden.sh: |-
#!/bin/bash
. /opt/bitnami/scripts/mongodb-env.sh
{{- if .Values.externalAccess.hidden.enabled }}
{{- if eq .Values.externalAccess.hidden.service.type "LoadBalancer" }}
{{- if .Values.externalAccess.autoDiscovery.enabled }}
export MONGODB_ADVERTISED_HOSTNAME="$(<${SHARED_FILE})"
{{- else }}
ID="${MY_POD_NAME#"{{ $fullname }}-hidden-"}"
export MONGODB_ADVERTISED_HOSTNAME=$(echo '{{ .Values.externalAccess.hidden.service.loadBalancerIPs }}' | tr -d '[]' | cut -d ' ' -f "$(($ID + 1))")
{{- end }}
{{- else if eq .Values.externalAccess.hidden.service.type "NodePort" }}
{{- if .Values.externalAccess.hidden.service.domain }}
export MONGODB_ADVERTISED_HOSTNAME={{ .Values.externalAccess.hidden.service.domain }}
{{- else }}
export MONGODB_ADVERTISED_HOSTNAME=$(curl -s https://ipinfo.io/ip)
{{- end }}
{{- end }}
{{- end }}
{{- if .Values.replicaSetConfigurationSettings.enabled }}
# placed here before root password env is overwritten
# makes no assumption about starting state
# ensures that any stepDown or non-default starting state is handled
/scripts/replicaSetConfigurationSettings.sh &
{{- end }}
echo "Advertised Hostname: $MONGODB_ADVERTISED_HOSTNAME"
echo "Configuring node as a hidden node"
export MONGODB_REPLICA_SET_MODE="hidden"
export MONGODB_INITIAL_PRIMARY_ROOT_USER="$MONGODB_ROOT_USER"
export MONGODB_INITIAL_PRIMARY_ROOT_PASSWORD="$MONGODB_ROOT_PASSWORD"
export MONGODB_INITIAL_PRIMARY_PORT_NUMBER="$MONGODB_PORT_NUMBER"
export MONGODB_ROOT_PASSWORD=""
export MONGODB_EXTRA_USERNAMES=""
export MONGODB_EXTRA_DATABASES=""
export MONGODB_EXTRA_PASSWORDS=""
export MONGODB_ROOT_PASSWORD_FILE=""
export MONGODB_EXTRA_USERNAMES_FILE=""
export MONGODB_EXTRA_DATABASES_FILE=""
export MONGODB_EXTRA_PASSWORDS_FILE=""
exec /opt/bitnami/scripts/mongodb/entrypoint.sh /opt/bitnami/scripts/mongodb/run.sh
{{- if .Values.replicaSetConfigurationSettings.enabled }}
replicaSetConfigurationSettings.sh: |-
#!/bin/bash
# This script to be called when pod starts.
# This script sets rs settings which can not be applied via conf file
function logger ()
#$1 is the line to be logged
{
echo "replicaSetConfigurationSettings.sh -- ${1}" >&1
}
SLEEP_PERIOD=10
{{- if and .Values.auth.enabled .Values.auth.rootPassword}}
usernameAndPassword="-u root -p ${MONGODB_ROOT_PASSWORD}"
{{- else }}
usernameAndPassword=""
{{- end }}
# load Values.replicaSetConfigurationSettings.configuration into associtive array which makes iterating and string manipulation easy
declare -A desiredRsConf
{{ range $setting, $value := .Values.replicaSetConfigurationSettings.configuration -}}
{{ printf "desiredRsConf[%s]='%v'" $setting $value }}
{{ end }}
rsConfWriteAttempts=0
rs_conf_configured_ok=unknown
while [[ "${rs_conf_configured_ok}" != "true" ]]; do
# give the rs setup a chance to succeed before attempting to read or configure
sleep ${SLEEP_PERIOD}
counter=0
while ! mongo ${usernameAndPassword} --eval 'rs.conf()'; do
counter=$((${counter} +1))
logger "not yet able to read rs.conf settings from the currently running rs (after ${counter} attempts)"
sleep ${SLEEP_PERIOD}
done
counter=$((${counter} +1))
logger "rs.conf settings have been read from the currently running rs (after ${counter} attempts)"
# read rs.conf again and store it. settings format is '"<key>" : <value>,'
currentRsConf=$(mongo ${usernameAndPassword} --eval 'rs.conf()')
desiredEqualsactual=unknown
settingsToConfigure=""
for key in ${!desiredRsConf[@]}; do
value=${desiredRsConf[$key]}
if ! $(echo "\"${currentRsConf}"\" | grep -q -e "\"${key}\" : ${value},"); then
logger "rs conf setting: ${key} value will be set to: ${value}"
settingsToConfigure="${settingsToConfigure}cfg.settings.${key} = ${value}; "
desiredEqualsactual=false
else
logger "rs conf: ${key} is already at desired value: ${value}"
fi
done
if [[ "${desiredEqualsactual}" != "false" ]]; then
logger "replicaSetConfigurationSettings match the settings of the currently running rs"
desiredEqualsactual=true
rs_conf_configured_ok=true
logger "Current settings match desired settings (There have been ${rsConfWriteAttempts} attempts to write to mongoDB rs configuration)"
exit
fi
# apply the settings only if this member is currently the mongo replicaset PRIMARY
# it might take a little time before any pod is PRIMARY
isMaster=unknown
if ! mongo ${usernameAndPassword} --eval 'rs.isMaster()' | grep -q "ismaster\" : true"; then
isMaster=false
logger "This node is not yet PRIMARY - replicaSetConfigurationSettings will only be set on the member that is currently PRIMARY"
else
isMaster=true
logger "This node is PRIMARY"
fi
if [[ "${isMaster}" == "true" ]]; then
logger "This node is currently PRIMARY - will apply rs.conf settings"
# avoiding tricky string substitution with single quotes by making the eval string a set of vars
rsconf="cfg = rs.conf();"
rsreconf="rs.reconfig(cfg);"
rsCommand="${rsconf} ${settingsToConfigure} ${rsreconf}"
mongo ${usernameAndPassword} --eval "${rsCommand}"
if [ $? -ne 0 ]; then
logger "Failed to apply mongodb cfg.settings configuration"
else
logger "mongodb replicaset cfg.settings configuration applied"
logger "Will check rs conf"
# don't exit just yet - the settings will be checked in the next loop
fi
rsConfWriteAttempts=$((${rsConfWriteAttempts} + 1 ))
fi
done
{{- end }}
{{- end }}

View File

@ -0,0 +1,594 @@
{{- if eq .Values.architecture "replicaset" }}
{{- $replicaCount := int .Values.replicaCount }}
{{- $loadBalancerIPListLength := len .Values.externalAccess.service.loadBalancerIPs }}
{{- if not (and .Values.externalAccess.enabled (not .Values.externalAccess.autoDiscovery.enabled) (not (eq $replicaCount $loadBalancerIPListLength )) (eq .Values.externalAccess.service.type "LoadBalancer")) }}
apiVersion: {{ include "common.capabilities.statefulset.apiVersion" . }}
kind: StatefulSet
metadata:
name: {{ include "mongodb.fullname" . }}
namespace: {{ include "mongodb.namespace" . }}
labels: {{- include "common.labels.standard" . | nindent 4 }}
app.kubernetes.io/component: mongodb
{{- if .Values.commonLabels }}
{{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
{{- end }}
{{- if .Values.labels }}
{{- include "common.tplvalues.render" (dict "value" .Values.labels "context" $) | nindent 4 }}
{{- end }}
{{- if or .Values.annotations .Values.commonAnnotations }}
annotations:
{{- if .Values.annotations }}
{{- include "common.tplvalues.render" ( dict "value" .Values.annotations "context" $) | nindent 4 }}
{{- end }}
{{- if .Values.commonAnnotations }}
{{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
{{- end }}
{{- end }}
spec:
serviceName: {{ include "mongodb.service.nameOverride" . }}
podManagementPolicy: {{ .Values.podManagementPolicy }}
replicas: {{ .Values.replicaCount }}
updateStrategy:
type: {{ .Values.strategyType }}
{{- if (eq "OnDelete" .Values.strategyType) }}
rollingUpdate: null
{{- end }}
selector:
matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }}
app.kubernetes.io/component: mongodb
template:
metadata:
labels: {{- include "common.labels.standard" . | nindent 8 }}
app.kubernetes.io/component: mongodb
{{- if .Values.podLabels }}
{{- include "common.tplvalues.render" (dict "value" .Values.podLabels "context" $) | nindent 8 }}
{{- end }}
{{- if or (include "mongodb.createConfigmap" .) .Values.podAnnotations }}
annotations:
{{- if (include "mongodb.createConfigmap" .) }}
checksum/configuration: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
{{- end }}
{{- if .Values.podAnnotations }}
{{- include "common.tplvalues.render" (dict "value" .Values.podAnnotations "context" $) | nindent 8 }}
{{- end }}
{{- end }}
spec:
{{- include "mongodb.imagePullSecrets" . | nindent 6 }}
{{- if .Values.schedulerName }}
schedulerName: "{{ .Values.schedulerName }}"
{{- end }}
serviceAccountName: {{ template "mongodb.serviceAccountName" . }}
{{- if .Values.hostAliases }}
hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.hostAliases "context" $) | nindent 8 }}
{{- end }}
{{- if .Values.affinity }}
affinity: {{- include "common.tplvalues.render" (dict "value" .Values.affinity "context" $) | nindent 8 }}
{{- else }}
affinity:
podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.podAffinityPreset "component" "mongodb" "context" $) | nindent 10 }}
podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.podAntiAffinityPreset "component" "mongodb" "context" $) | nindent 10 }}
nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.nodeAffinityPreset.type "key" .Values.nodeAffinityPreset.key "values" .Values.nodeAffinityPreset.values) | nindent 10 }}
{{- end }}
{{- if .Values.nodeSelector }}
nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.nodeSelector "context" $) | nindent 8 }}
{{- end }}
{{- if .Values.tolerations }}
tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.tolerations "context" $) | nindent 8 }}
{{- end }}
{{- if .Values.topologySpreadConstraints }}
topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.topologySpreadConstraints "context" $) | nindent 8 }}
{{- end }}
{{- if .Values.priorityClassName }}
priorityClassName: {{ .Values.priorityClassName }}
{{- end }}
{{- if .Values.runtimeClassName }}
runtimeClassName: {{ .Values.runtimeClassName }}
{{- end}}
{{- if .Values.podSecurityContext.enabled }}
securityContext: {{- omit .Values.podSecurityContext "enabled" | toYaml | nindent 8 }}
{{- end }}
{{- if or .Values.initContainers (and .Values.volumePermissions.enabled .Values.persistence.enabled) (and .Values.externalAccess.enabled .Values.externalAccess.autoDiscovery.enabled) .Values.tls.enabled }}
initContainers:
{{- if .Values.initContainers }}
{{- include "common.tplvalues.render" (dict "value" .Values.initContainers "context" $) | nindent 8 }}
{{- end }}
{{- if and .Values.volumePermissions.enabled .Values.persistence.enabled }}
- name: volume-permissions
image: {{ include "mongodb.volumePermissions.image" . }}
imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }}
command:
- /bin/bash
- -ec
args:
- |
mkdir -p {{ .Values.persistence.mountPath }}{{- if .Values.persistence.subPath }}/{{ .Values.persistence.subPath }}{{- end }}
{{- if and .Values.podSecurityContext.enabled .Values.containerSecurityContext.enabled }}
chown -R "{{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.podSecurityContext.fsGroup }}" "{{ .Values.persistence.mountPath }}{{- if .Values.persistence.subPath }}/{{ .Values.persistence.subPath }}{{- end }}"
{{- end }}
{{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }}
securityContext: {{- omit .Values.volumePermissions.securityContext "runAsUser" | toYaml | nindent 12 }}
{{- else }}
securityContext: {{- .Values.volumePermissions.securityContext | toYaml | nindent 12 }}
{{- end }}
{{- if .Values.volumePermissions.resources }}
resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }}
{{- end }}
volumeMounts:
- name: datadir
mountPath: {{ .Values.persistence.mountPath }}
{{- end }}
{{- if .Values.tls.enabled }}
- name: generate-tls-certs
image: {{ include "mongodb.tls.image" . }}
imagePullPolicy: {{ .Values.tls.image.pullPolicy | quote }}
env:
- name: MY_POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: certs-volume
mountPath: /certs/CAs
- name: certs
mountPath: /certs
command:
- sh
- "-c"
- |
/bin/bash <<'EOF'
my_hostname=$(hostname)
svc={{ include "mongodb.service.nameOverride" . }}
cp /certs/CAs/* /certs/
cat >/certs/openssl.cnf <<EOL
[req]
req_extensions = v3_req
distinguished_name = req_distinguished_name
[req_distinguished_name]
[ v3_req ]
basicConstraints = CA:FALSE
keyUsage = nonRepudiation, digitalSignature, keyEncipherment
subjectAltName = @alt_names
[alt_names]
DNS.1 = $svc
DNS.2 = $my_hostname
DNS.3 = $my_hostname.$svc.$MY_POD_NAMESPACE.svc.{{ .Values.clusterDomain }}
DNS.4 = localhost
DNS.5 = 127.0.0.1
{{- if .Values.tls.extraDnsNames }}
{{- range $key, $dnsName := .Values.tls.extraDnsNames }}
{{ $key }} = {{ $dnsName }}
{{- end }}
{{- end }}
{{- if .Values.externalAccess.service.loadBalancerIPs }}
{{- range $key, $val := .Values.externalAccess.service.loadBalancerIPs }}
IP.{{ $key }} = {{ $val | quote }}
{{- end }}
{{- end }}
EOL
export RANDFILE=/certs/.rnd && openssl genrsa -out /certs/mongo.key 2048
#Create the client/server certificate
openssl req -new -key /certs/mongo.key -out /certs/mongo.csr -subj "/C=US/O=My Organisations/OU=IT/CN=$my_hostname" -config /certs/openssl.cnf
#Signing the server certificate with the CA cert and key
openssl x509 -req -in /certs/mongo.csr -CA /certs/mongodb-ca-cert -CAkey /certs/mongodb-ca-key -CAcreateserial -out /certs/mongo.crt -days 3650 -extensions v3_req -extfile /certs/openssl.cnf
rm /certs/mongo.csr
#Concatenate to a pem file for use as the client PEM file which can be used for both member and client authentication.
cat /certs/mongo.crt /certs/mongo.key > /certs/mongodb.pem
cd /certs/
shopt -s extglob
rm -rf !(mongodb-ca-cert|mongodb.pem|CAs|openssl.cnf)
chmod 0600 mongodb-ca-cert mongodb.pem
EOF
{{- end }}
{{- if and .Values.externalAccess.enabled .Values.externalAccess.autoDiscovery.enabled (eq .Values.externalAccess.service.type "LoadBalancer") }}
- name: auto-discovery
image: {{ include "mongodb.externalAccess.autoDiscovery.image" . }}
imagePullPolicy: {{ .Values.externalAccess.autoDiscovery.image.pullPolicy | quote }}
command:
- /scripts/auto-discovery.sh
env:
- name: MY_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: SHARED_FILE
value: "/shared/info.txt"
{{- if .Values.externalAccess.autoDiscovery.resources }}
resources: {{- toYaml .Values.externalAccess.autoDiscovery.resources | nindent 12 }}
{{- end }}
volumeMounts:
- name: shared
mountPath: /shared
- name: scripts
mountPath: /scripts/auto-discovery.sh
subPath: auto-discovery.sh
{{- end }}
{{- end }}
containers:
- name: mongodb
image: {{ include "mongodb.image" . }}
imagePullPolicy: {{ .Values.image.pullPolicy | quote }}
{{- if .Values.containerSecurityContext.enabled }}
securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }}
{{- end }}
{{- if .Values.diagnosticMode.enabled }}
command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }}
{{- else if .Values.command }}
command: {{- include "common.tplvalues.render" (dict "value" .Values.command "context" $) | nindent 12 }}
{{- else }}
command:
- /scripts/setup.sh
{{- end }}
{{- if .Values.diagnosticMode.enabled }}
args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }}
{{- else if .Values.args }}
args: {{- include "common.tplvalues.render" (dict "value" .Values.args "context" $) | nindent 12 }}
{{- end }}
env:
- name: BITNAMI_DEBUG
value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }}
{{- if and .Values.externalAccess.enabled .Values.externalAccess.autoDiscovery.enabled (eq .Values.externalAccess.service.type "LoadBalancer") }}
- name: SHARED_FILE
value: "/shared/info.txt"
{{- end }}
- name: MY_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: MY_POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: K8S_SERVICE_NAME
value: "{{ include "mongodb.service.nameOverride" . }}"
- name: MONGODB_INITIAL_PRIMARY_HOST
value: "{{ include "mongodb.fullname" . }}-0.$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.{{ .Values.clusterDomain }}"
- name: MONGODB_REPLICA_SET_NAME
value: {{ .Values.replicaSetName | quote }}
{{- if and .Values.replicaSetHostnames (not .Values.externalAccess.enabled) }}
- name: MONGODB_ADVERTISED_HOSTNAME
value: "$(MY_POD_NAME).$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.{{ .Values.clusterDomain }}"
{{- end }}
{{- $customUsers := include "mongodb.customUsers" . -}}
{{- $customDatabases := include "mongodb.customDatabases" . -}}
{{- if not (empty $customUsers) }}
- name: MONGODB_EXTRA_USERNAMES
value: {{ $customUsers | quote }}
{{- end }}
{{- if not (empty $customDatabases) }}
- name: MONGODB_EXTRA_DATABASES
value: {{ $customDatabases | quote }}
{{- end }}
{{- if .Values.auth.enabled }}
{{- if and (not (empty $customUsers)) (not (empty $customDatabases)) }}
- name: MONGODB_EXTRA_PASSWORDS
valueFrom:
secretKeyRef:
name: {{ include "mongodb.secretName" . }}
key: mongodb-passwords
{{- end }}
- name: MONGODB_ROOT_USER
value: {{ .Values.auth.rootUser | quote }}
- name: MONGODB_ROOT_PASSWORD
valueFrom:
secretKeyRef:
name: {{ include "mongodb.secretName" . }}
key: mongodb-root-password
- name: MONGODB_REPLICA_SET_KEY
valueFrom:
secretKeyRef:
name: {{ include "mongodb.secretName" . }}
key: mongodb-replica-set-key
{{- end }}
{{- if and .Values.metrics.enabled (not (empty .Values.metrics.username)) }}
- name: MONGODB_METRICS_USERNAME
value: {{ .Values.metrics.username | quote }}
{{- if .Values.auth.enabled }}
- name: MONGODB_METRICS_PASSWORD
valueFrom:
secretKeyRef:
name: {{ include "mongodb.secretName" . }}
key: mongodb-metrics-password
{{- end }}
{{- end }}
- name: ALLOW_EMPTY_PASSWORD
value: {{ ternary "no" "yes" .Values.auth.enabled | quote }}
- name: MONGODB_SYSTEM_LOG_VERBOSITY
value: {{ .Values.systemLogVerbosity | quote }}
- name: MONGODB_DISABLE_SYSTEM_LOG
value: {{ ternary "yes" "no" .Values.disableSystemLog | quote }}
- name: MONGODB_DISABLE_JAVASCRIPT
value: {{ ternary "yes" "no" .Values.disableJavascript | quote }}
- name: MONGODB_ENABLE_JOURNAL
value: {{ ternary "yes" "no" .Values.enableJournal | quote }}
- name: MONGODB_ENABLE_IPV6
value: {{ ternary "yes" "no" .Values.enableIPv6 | quote }}
- name: MONGODB_ENABLE_DIRECTORY_PER_DB
value: {{ ternary "yes" "no" .Values.directoryPerDB | quote }}
{{- $extraFlags := .Values.extraFlags | join " " -}}
{{- if .Values.tls.enabled }}
{{- $extraFlags = printf "--tlsMode=%s --tlsCertificateKeyFile=/certs/mongodb.pem --tlsCAFile=/certs/mongodb-ca-cert %s" .Values.tls.mode $extraFlags }}
{{- end }}
{{- if ne $extraFlags ""}}
- name: MONGODB_EXTRA_FLAGS
value: {{ $extraFlags | quote }}
{{- end }}
{{- if .Values.tls.enabled }}
- name: MONGODB_CLIENT_EXTRA_FLAGS
value: --tls --tlsCertificateKeyFile=/certs/mongodb.pem --tlsCAFile=/certs/mongodb-ca-cert
{{- end }}
{{- if .Values.extraEnvVars }}
{{- include "common.tplvalues.render" (dict "value" .Values.extraEnvVars "context" $) | nindent 12 }}
{{- end }}
{{- if or .Values.extraEnvVarsCM .Values.extraEnvVarsSecret }}
envFrom:
{{- if .Values.extraEnvVarsCM }}
- configMapRef:
name: {{ tpl .Values.extraEnvVarsCM . | quote }}
{{- end }}
{{- if .Values.extraEnvVarsSecret }}
- secretRef:
name: {{ tpl .Values.extraEnvVarsSecret . | quote }}
{{- end }}
{{- end }}
ports:
- containerPort: 27017
name: mongodb
{{- if not .Values.diagnosticMode.enabled }}
{{- if .Values.livenessProbe.enabled }}
livenessProbe:
exec:
command:
- mongo
- --disableImplicitSessions
{{- if .Values.tls.enabled }}
- --tls
- --tlsCertificateKeyFile=/certs/mongodb.pem
- --tlsCAFile=/certs/mongodb-ca-cert
{{- end }}
- --eval
- "db.adminCommand('ping')"
initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.livenessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }}
successThreshold: {{ .Values.livenessProbe.successThreshold }}
failureThreshold: {{ .Values.livenessProbe.failureThreshold }}
{{- else if .Values.customLivenessProbe }}
livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customLivenessProbe "context" $) | nindent 12 }}
{{- end }}
{{- if .Values.readinessProbe.enabled }}
readinessProbe:
exec:
command:
- bash
- -ec
- |
{{- if .Values.tls.enabled }}
TLS_OPTIONS='--tls --tlsCertificateKeyFile=/certs/mongodb.pem --tlsCAFile=/certs/mongodb-ca-cert'
{{- end }}
# Run the proper check depending on the version
[[ $(mongo --version | grep "MongoDB shell") =~ ([0-9]+\.[0-9]+\.[0-9]+) ]] && VERSION=${BASH_REMATCH[1]}
. /opt/bitnami/scripts/libversion.sh
VERSION_MAJOR="$(get_sematic_version "$VERSION" 1)"
VERSION_MINOR="$(get_sematic_version "$VERSION" 2)"
VERSION_PATCH="$(get_sematic_version "$VERSION" 3)"
if [[ "$VERSION_MAJOR" -ge 4 ]] && [[ "$VERSION_MINOR" -ge 4 ]] && [[ "$VERSION_PATCH" -ge 2 ]]; then
mongo --disableImplicitSessions $TLS_OPTIONS --eval 'db.hello().isWritablePrimary || db.hello().secondary' | grep -q 'true'
else
mongo --disableImplicitSessions $TLS_OPTIONS --eval 'db.isMaster().ismaster || db.isMaster().secondary' | grep -q 'true'
fi
initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.readinessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }}
successThreshold: {{ .Values.readinessProbe.successThreshold }}
failureThreshold: {{ .Values.readinessProbe.failureThreshold }}
{{- else if .Values.customReadinessProbe }}
readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customReadinessProbe "context" $) | nindent 12 }}
{{- end }}
{{- if .Values.startupProbe.enabled }}
startupProbe:
exec:
command:
- bash
- -ec
- |
{{- if .Values.tls.enabled }}
TLS_OPTIONS='--tls --tlsCertificateKeyFile=/certs/mongodb.pem --tlsCAFile=/certs/mongodb-ca-cert'
{{- end }}
mongo --disableImplicitSessions $TLS_OPTIONS --eval 'db.hello().isWritablePrimary || db.hello().secondary' | grep -q 'true'
initialDelaySeconds: {{ .Values.startupProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.startupProbe.periodSeconds }}
timeoutSeconds: {{ .Values.startupProbe.timeoutSeconds }}
successThreshold: {{ .Values.startupProbe.successThreshold }}
failureThreshold: {{ .Values.startupProbe.failureThreshold }}
{{- else if .Values.customStartupProbe }}
startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customStartupProbe "context" $) | nindent 12 }}
{{- end }}
{{- end }}
{{- if .Values.resources }}
resources: {{- toYaml .Values.resources | nindent 12 }}
{{- end }}
volumeMounts:
- name: datadir
mountPath: {{ .Values.persistence.mountPath }}
subPath: {{ .Values.persistence.subPath }}
{{- if or .Values.initdbScriptsConfigMap .Values.initdbScripts }}
- name: custom-init-scripts
mountPath: /docker-entrypoint-initdb.d
{{- end }}
{{- if or .Values.configuration .Values.existingConfigmap }}
- name: config
mountPath: /opt/bitnami/mongodb/conf/mongodb.conf
subPath: mongodb.conf
{{- end }}
- name: scripts
mountPath: /scripts/setup.sh
subPath: setup.sh
{{ if .Values.replicaSetConfigurationSettings.enabled }}
- name: scripts
mountPath: /scripts/replicaSetConfigurationSettings.sh
subPath: replicaSetConfigurationSettings.sh
{{- end }}
{{- if and .Values.externalAccess.enabled .Values.externalAccess.autoDiscovery.enabled (eq .Values.externalAccess.service.type "LoadBalancer") }}
- name: shared
mountPath: /shared
{{- end }}
{{- if .Values.tls.enabled }}
- name: certs
mountPath: /certs
{{- end }}
{{- if .Values.extraVolumeMounts }}
{{- toYaml .Values.extraVolumeMounts | nindent 12 }}
{{- end }}
{{- if .Values.metrics.enabled }}
- name: metrics
image: {{ template "mongodb.metrics.image" . }}
imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }}
{{- if .Values.containerSecurityContext.enabled }}
securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }}
{{- end }}
{{- if .Values.diagnosticMode.enabled }}
command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }}
{{- else }}
command:
- /bin/bash
- -ec
{{- end }}
{{- if .Values.diagnosticMode.enabled }}
args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }}
{{- else }}
args:
- |
/bin/mongodb_exporter --web.listen-address ":{{ .Values.metrics.containerPort }}" --mongodb.uri "{{ include "mongodb.mongodb_exporter.uri" . }}" {{ .Values.metrics.extraFlags }}
{{- end }}
env:
{{- if .Values.auth.enabled }}
{{- if not .Values.metrics.username }}
- name: MONGODB_ROOT_USER
value: {{ .Values.auth.rootUser | quote }}
- name: MONGODB_ROOT_PASSWORD
valueFrom:
secretKeyRef:
name: {{ include "mongodb.secretName" . }}
key: mongodb-root-password
{{- else }}
- name: MONGODB_METRICS_USERNAME
value: {{ .Values.metrics.username | quote }}
- name: MONGODB_METRICS_PASSWORD
valueFrom:
secretKeyRef:
name: {{ include "mongodb.secretName" . }}
key: mongodb-metrics-password
{{- end }}
{{- end }}
volumeMounts:
{{- if .Values.tls.enabled }}
- name: certs
mountPath: /certs
{{- end }}
ports:
- name: metrics
containerPort: {{ .Values.metrics.containerPort }}
{{- if not .Values.diagnosticMode.enabled }}
{{- if .Values.metrics.livenessProbe.enabled }}
livenessProbe:
httpGet:
path: /
port: metrics
initialDelaySeconds: {{ .Values.metrics.livenessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.metrics.livenessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.metrics.livenessProbe.timeoutSeconds }}
failureThreshold: {{ .Values.metrics.livenessProbe.failureThreshold }}
successThreshold: {{ .Values.metrics.livenessProbe.successThreshold }}
{{- end }}
{{- if .Values.metrics.readinessProbe.enabled }}
readinessProbe:
httpGet:
path: /
port: metrics
initialDelaySeconds: {{ .Values.metrics.readinessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.metrics.readinessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.metrics.readinessProbe.timeoutSeconds }}
failureThreshold: {{ .Values.metrics.readinessProbe.failureThreshold }}
successThreshold: {{ .Values.metrics.readinessProbe.successThreshold }}
{{- end }}
{{- end }}
{{- if .Values.metrics.resources }}
resources: {{- toYaml .Values.metrics.resources | nindent 12 }}
{{- end }}
{{- end }}
{{- if .Values.sidecars }}
{{- include "common.tplvalues.render" (dict "value" .Values.sidecars "context" $) | nindent 8 }}
{{- end }}
volumes:
{{- if or .Values.initdbScriptsConfigMap .Values.initdbScripts }}
- name: custom-init-scripts
configMap:
name: {{ template "mongodb.initdbScriptsCM" . }}
{{- end }}
{{- if or .Values.configuration .Values.existingConfigmap }}
- name: config
configMap:
name: {{ include "mongodb.configmapName" . }}
{{- end }}
{{- if and .Values.externalAccess.enabled .Values.externalAccess.autoDiscovery.enabled (eq .Values.externalAccess.service.type "LoadBalancer") }}
- name: shared
emptyDir: {}
{{- end }}
- name: scripts
configMap:
name: {{ include "mongodb.fullname" . }}-scripts
defaultMode: 0755
{{- if .Values.extraVolumes }}
{{- toYaml .Values.extraVolumes | nindent 8 }}
{{- end }}
{{- if .Values.tls.enabled }}
- name: certs
emptyDir: {}
- name: certs-volume
secret:
secretName: {{ template "mongodb.tlsSecretName" . }}
items:
- key: mongodb-ca-cert
path: mongodb-ca-cert
mode: 0600
- key: mongodb-ca-key
path: mongodb-ca-key
mode: 0600
{{- end }}
{{- if not .Values.persistence.enabled }}
- name: datadir
{{- if .Values.persistence.medium }}
emptyDir:
medium: {{ .Values.persistence.medium | quote }}
{{- else }}
emptyDir: {}
{{- end }}
{{- else }}
volumeClaimTemplates:
- metadata:
name: datadir
{{- if .Values.persistence.annotations }}
annotations: {{- include "common.tplvalues.render" (dict "value" .Values.persistence.annotations "context" $) | nindent 10 }}
{{- end }}
spec:
accessModes:
{{- range .Values.persistence.accessModes }}
- {{ . | quote }}
{{- end }}
resources:
requests:
storage: {{ .Values.persistence.size | quote }}
{{- if .Values.persistence.volumeClaimTemplates.requests }}
{{- include "common.tplvalues.render" (dict "value" .Values.persistence.volumeClaimTemplates.requests "context" $) | nindent 12 }}
{{- end }}
{{- if .Values.persistence.volumeClaimTemplates.dataSource }}
dataSource: {{- include "common.tplvalues.render" (dict "value" .Values.persistence.volumeClaimTemplates.dataSource "context" $) | nindent 10 }}
{{- end }}
{{- if .Values.persistence.volumeClaimTemplates.selector }}
selector: {{- include "common.tplvalues.render" (dict "value" .Values.persistence.volumeClaimTemplates.selector "context" $) | nindent 10 }}
{{- end }}
{{ include "common.storage.class" (dict "persistence" .Values.persistence "global" .Values.global) }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,35 @@
{{- if and (eq .Values.architecture "replicaset") .Values.externalAccess.enabled (eq .Values.externalAccess.service.type "ClusterIP") }}
{{- $fullName := include "mongodb.fullname" . }}
{{- $replicaCount := .Values.replicaCount | int }}
{{- $root := . }}
{{- range $i, $e := until $replicaCount }}
{{- $targetPod := printf "%s-%d" (printf "%s" $fullName) $i }}
{{- $_ := set $ "targetPod" $targetPod }}
---
apiVersion: v1
kind: Service
metadata:
name: {{ $fullName }}-{{ $i }}
namespace: {{ include "mongodb.namespace" $ }}
labels: {{- include "common.labels.standard" $ | nindent 4 }}
app.kubernetes.io/component: mongodb
{{- if $root.Values.commonLabels }}
{{- include "common.tplvalues.render" ( dict "value" $root.Values.commonLabels "context" $ ) | nindent 4 }}
{{- end }}
{{- if $root.Values.service.annotations }}
annotations: {{- include "common.tplvalues.render" (dict "value" $root.Values.service.annotations "context" $) | nindent 4 }}
{{- end }}
spec:
type: ClusterIP
ports:
- name: {{ $root.Values.service.portName }}
port: {{ $root.Values.service.port }}
targetPort: mongodb
selector: {{- include "common.labels.matchLabels" $ | nindent 4 }}
app.kubernetes.io/component: mongodb
statefulset.kubernetes.io/pod-name: {{ $targetPod }}
---
{{- end }}
{{- end }}

View File

@ -0,0 +1,30 @@
{{- if .Values.rbac.create }}
apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }}
kind: Role
metadata:
name: {{ include "mongodb.fullname" . }}
namespace: {{ include "mongodb.namespace" . }}
labels: {{- include "common.labels.standard" . | nindent 4 }}
{{- if .Values.commonLabels }}
{{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
{{- end }}
rules:
- apiGroups:
- ""
resources:
- services
verbs:
- get
- list
- watch
{{- if .Values.rbac.role.rules }}
{{- toYaml .Values.rbac.role.rules | nindent 2 }}
{{- end -}}
{{- $pspAvailable := (semverCompare "<1.25-0" (include "common.capabilities.kubeVersion" .)) -}}
{{- if and $pspAvailable .Values.podSecurityPolicy.create }}
- apiGroups: ['{{ template "podSecurityPolicy.apiGroup" . }}']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames: [{{ include "mongodb.fullname" . }}]
{{- end -}}
{{- end }}

View File

@ -0,0 +1,19 @@
{{- if and .Values.serviceAccount.create .Values.rbac.create }}
apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }}
kind: RoleBinding
metadata:
name: {{ include "mongodb.fullname" . }}
namespace: {{ include "mongodb.namespace" . }}
labels: {{- include "common.labels.standard" . | nindent 4 }}
{{- if .Values.commonLabels }}
{{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
{{- end }}
roleRef:
kind: Role
name: {{ include "mongodb.fullname" . }}
apiGroup: rbac.authorization.k8s.io
subjects:
- kind: ServiceAccount
name: {{ include "mongodb.serviceAccountName" . }}
namespace: {{ include "mongodb.namespace" . }}
{{- end }}

View File

@ -0,0 +1,37 @@
{{- if (include "mongodb.createTlsSecret" .) }}
{{- $fullname := include "mongodb.fullname" . }}
{{- $releaseNamespace := .Release.Namespace }}
{{- $clusterDomain := .Values.clusterDomain }}
{{- $cn := printf "%s.%s.svc.%s" $fullname .Release.Namespace $clusterDomain}}
apiVersion: v1
kind: Secret
metadata:
name: {{ template "mongodb.tlsSecretName" . }}
namespace: {{ template "mongodb.namespace" . }}
labels:
{{- include "common.labels.standard" . | nindent 4 }}
app.kubernetes.io/component: mongodb
{{- if .Values.commonLabels }}
{{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
{{- end }}
{{- if .Values.commonAnnotations }}
annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
{{- end }}
type: Opaque
data:
{{- if or .Values.tls.caCert .Values.tls.caKey (not .Values.tls.autoGenerated) }}
{{- $ca := buildCustomCert (required "A valid .Values.tls.caCert is required!" .Values.tls.caCert) (required "A valid .Values.tls.caKey is required!" .Values.tls.caKey) }}
{{- $cert := genSignedCert $cn nil nil 3650 $ca }}
{{- $pem := printf "%s%s" $cert.Cert $cert.Key }}
mongodb-ca-cert: {{ b64enc $ca.Cert }}
mongodb-ca-key: {{ b64enc $ca.Key }}
client-pem: {{ b64enc $pem }}
{{- else }}
{{- $ca:= genCA "myMongo-ca" 3650 }}
{{- $cert := genSignedCert $cn nil nil 3650 $ca }}
{{- $pem := printf "%s%s" $cert.Cert $cert.Key }}
mongodb-ca-cert: {{ b64enc $ca.Cert }}
mongodb-ca-key: {{ b64enc $ca.Key }}
client-pem: {{ b64enc $pem }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,51 @@
{{- if (include "mongodb.createSecret" .) }}
apiVersion: v1
kind: Secret
metadata:
name: {{ include "mongodb.fullname" . }}
namespace: {{ template "mongodb.namespace" . }}
labels: {{- include "common.labels.standard" . | nindent 4 }}
app.kubernetes.io/component: mongodb
{{- if .Values.commonLabels }}
{{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
{{- end }}
{{- if .Values.commonAnnotations }}
annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
{{- end }}
type: Opaque
data:
{{- if .Values.auth.rootPassword }}
mongodb-root-password: {{ .Values.auth.rootPassword | toString | b64enc | quote }}
{{- else }}
mongodb-root-password: {{ randAlphaNum 10 | b64enc | quote }}
{{- end }}
{{- $customUsers := include "mongodb.customUsers" . -}}
{{- $customDatabases := include "mongodb.customDatabases" . -}}
{{- $customPasswords := include "mongodb.customPasswords" . -}}
{{- if and (not (empty $customUsers)) (not (empty $customDatabases)) }}
{{- if not (empty $customPasswords) }}
mongodb-passwords: {{ $customPasswords | toString | b64enc | quote }}
{{- else }}
{{- $customUsersList := splitList "," $customUsers }}
{{- $customPasswordsList := list }}
{{- range $customUsersList }}
{{- $customPasswordsList = append $customPasswordsList (randAlphaNum 10) }}
{{- end }}
mongodb-passwords: {{ (join "," $customPasswordsList) | b64enc | quote }}
{{- end }}
{{- end }}
{{- if .Values.metrics.username }}
{{- if .Values.metrics.password }}
mongodb-metrics-password: {{ .Values.metrics.password | toString | b64enc | quote }}
{{- else }}
mongodb-metrics-password: {{ randAlphaNum 10 | b64enc | quote }}
{{- end }}
{{- end }}
{{- if eq .Values.architecture "replicaset" }}
{{- if .Values.auth.replicaSetKey }}
mongodb-replica-set-key: {{ .Values.auth.replicaSetKey | toString | b64enc | quote }}
{{- else }}
mongodb-replica-set-key: {{ randAlphaNum 10 | b64enc | quote }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,22 @@
{{- if .Values.serviceAccount.create }}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "mongodb.serviceAccountName" . }}
namespace: {{ include "mongodb.namespace" . }}
labels: {{- include "common.labels.standard" . | nindent 4 }}
{{- if .Values.commonLabels }}
{{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
{{- end }}
{{- if or .Values.serviceAccount.annotations .Values.commonAnnotations }}
annotations:
{{- if .Values.serviceAccount.annotations }}
{{ toYaml .Values.serviceAccount.annotations | nindent 4 }}
{{- end }}
{{- if .Values.commonAnnotations }}
{{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
{{- end }}
{{- end }}
secrets:
- name: {{ template "mongodb.fullname" . }}
{{- end }}

View File

@ -0,0 +1,38 @@
{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ include "mongodb.fullname" . }}
namespace: {{ include "mongodb.serviceMonitor.namespace" . }}
labels: {{- include "common.labels.standard" . | nindent 4 }}
{{- if .Values.metrics.serviceMonitor.additionalLabels }}
{{- include "common.tplvalues.render" (dict "value" .Values.metrics.serviceMonitor.additionalLabels "context" $) | nindent 4 }}
{{- end }}
{{- if .Values.commonLabels }}
{{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
{{- end }}
{{- if .Values.commonAnnotations }}
annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
{{- end }}
spec:
endpoints:
- port: http-metrics
{{- if .Values.metrics.serviceMonitor.interval }}
interval: {{ .Values.metrics.serviceMonitor.interval }}
{{- end }}
{{- if .Values.metrics.serviceMonitor.scrapeTimeout }}
scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }}
{{- end }}
{{- if .Values.metrics.serviceMonitor.relabelings }}
relabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.relabelings "context" $) | nindent 8 }}
{{- end }}
{{- if .Values.metrics.serviceMonitor.metricRelabelings }}
metricRelabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.metricRelabelings "context" $) | nindent 8 }}
{{- end }}
namespaceSelector:
matchNames:
- "{{ include "mongodb.namespace" . }}"
selector:
matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }}
app.kubernetes.io/component: metrics
{{- end }}

View File

@ -0,0 +1,517 @@
{{- if not (eq .Values.architecture "replicaset") }}
apiVersion: {{ if .Values.useStatefulSet }}{{ include "common.capabilities.statefulset.apiVersion" . }}{{- else }}{{ include "common.capabilities.deployment.apiVersion" . }}{{- end }}
kind: {{ if .Values.useStatefulSet }}StatefulSet{{- else }}Deployment{{- end }}
metadata:
name: {{ include "mongodb.fullname" . }}
namespace: {{ include "mongodb.namespace" . }}
labels: {{- include "common.labels.standard" . | nindent 4 }}
app.kubernetes.io/component: mongodb
{{- if .Values.labels }}
{{- include "common.tplvalues.render" (dict "value" .Values.labels "context" $) | nindent 4 }}
{{- end }}
{{- if .Values.commonLabels }}
{{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
{{- end }}
{{- if or .Values.annotations .Values.commonAnnotations }}
annotations:
{{- if .Values.annotations }}
{{- include "common.tplvalues.render" ( dict "value" .Values.annotations "context" $) | nindent 4 }}
{{- end }}
{{- if .Values.commonAnnotations }}
{{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
{{- end }}
{{- end }}
spec:
replicas: 1
{{- if .Values.useStatefulSet }}
serviceName: {{ include "mongodb.fullname" . }}
updateStrategy:
{{- else }}
strategy:
{{- end }}
{{- if .Values.useStatefulSet }}
type: {{ .Values.strategyType }}
{{- else }}
type: Recreate
{{- end -}}
{{- if or (and (not .Values.useStatefulSet) (eq "Recreate" .Values.strategyType)) (and .Values.useStatefulSet (eq "OnDelete" .Values.strategyType)) }}
rollingUpdate: null
{{- end }}
selector:
matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }}
app.kubernetes.io/component: mongodb
template:
metadata:
labels: {{- include "common.labels.standard" . | nindent 8 }}
app.kubernetes.io/component: mongodb
{{- if .Values.podLabels }}
{{- include "common.tplvalues.render" (dict "value" .Values.podLabels "context" $) | nindent 8 }}
{{- end }}
{{- if .Values.commonLabels }}
{{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 8 }}
{{- end }}
{{- if or (include "mongodb.createConfigmap" .) .Values.podAnnotations }}
annotations:
{{- if (include "mongodb.createConfigmap" .) }}
checksum/configuration: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
{{- end }}
{{- if .Values.podAnnotations }}
{{- include "common.tplvalues.render" (dict "value" .Values.podAnnotations "context" $) | nindent 8 }}
{{- end }}
{{- end }}
spec:
{{- include "mongodb.imagePullSecrets" . | nindent 6 }}
{{- if .Values.schedulerName }}
schedulerName: "{{ .Values.schedulerName }}"
{{- end }}
serviceAccountName: {{ template "mongodb.serviceAccountName" . }}
{{- if .Values.affinity }}
affinity: {{- include "common.tplvalues.render" (dict "value" .Values.affinity "context" $) | nindent 8 }}
{{- else }}
affinity:
podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.podAffinityPreset "component" "mongodb" "context" $) | nindent 10 }}
podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.podAntiAffinityPreset "component" "mongodb" "context" $) | nindent 10 }}
nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.nodeAffinityPreset.type "key" .Values.nodeAffinityPreset.key "values" .Values.nodeAffinityPreset.values) | nindent 10 }}
{{- end }}
{{- if .Values.nodeSelector }}
nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.nodeSelector "context" $) | nindent 8 }}
{{- end }}
{{- if .Values.hostAliases }}
hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.hostAliases "context" $) | nindent 8 }}
{{- end }}
{{- if .Values.tolerations }}
tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.tolerations "context" $) | nindent 8 }}
{{- end }}
{{- if .Values.priorityClassName }}
priorityClassName: {{ .Values.priorityClassName }}
{{- end }}
{{- if .Values.runtimeClassName }}
runtimeClassName: {{ .Values.runtimeClassName }}
{{- end }}
{{- if .Values.podSecurityContext.enabled }}
securityContext: {{- omit .Values.podSecurityContext "enabled" | toYaml | nindent 8 }}
{{- end }}
{{- if or .Values.initContainers (and .Values.volumePermissions.enabled .Values.persistence.enabled) .Values.tls.enabled }}
initContainers:
{{- if .Values.initContainers }}
{{- include "common.tplvalues.render" (dict "value" .Values.initContainers "context" $) | nindent 8 }}
{{- end }}
{{- if and .Values.volumePermissions.enabled .Values.persistence.enabled }}
- name: volume-permissions
image: {{ include "mongodb.volumePermissions.image" . }}
imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }}
command:
- /bin/bash
- -ec
args:
- |
mkdir -p {{ .Values.persistence.mountPath }}{{- if .Values.persistence.subPath }}/{{ .Values.persistence.subPath }}{{- end }}
{{- if and .Values.podSecurityContext.enabled .Values.containerSecurityContext.enabled }}
chown -R "{{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.podSecurityContext.fsGroup }}" "{{ .Values.persistence.mountPath }}{{- if .Values.persistence.subPath }}/{{ .Values.persistence.subPath }}{{- end }}"
{{- end }}
{{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }}
securityContext: {{- omit .Values.volumePermissions.securityContext "runAsUser" | toYaml | nindent 12 }}
{{- else }}
securityContext: {{- .Values.volumePermissions.securityContext | toYaml | nindent 12 }}
{{- end }}
{{- if .Values.volumePermissions.resources }}
resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }}
{{- end }}
volumeMounts:
- name: datadir
mountPath: {{ .Values.persistence.mountPath }}
{{- end }}
{{- if .Values.tls.enabled }}
- name: generate-tls-certs
image: {{ include "mongodb.tls.image" . }}
imagePullPolicy: {{ .Values.tls.image.pullPolicy | quote }}
env:
- name: MY_POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: certs-volume
mountPath: /certs/CAs
- name: certs
mountPath: /certs
command:
- sh
- "-c"
- |
/bin/bash <<'EOF'
my_hostname=$(hostname)
svc=$(echo -n "$my_hostname" | sed s/-[0-9]*$//)-headless
cp /certs/CAs/* /certs/
cat >/certs/openssl.cnf <<EOL
[req]
req_extensions = v3_req
distinguished_name = req_distinguished_name
[req_distinguished_name]
[ v3_req ]
basicConstraints = CA:FALSE
keyUsage = nonRepudiation, digitalSignature, keyEncipherment
subjectAltName = @alt_names
[alt_names]
DNS.1 = $svc
DNS.2 = $my_hostname
DNS.3 = $my_hostname.$svc.$MY_POD_NAMESPACE.svc.cluster.local
DNS.4 = localhost
DNS.5 = 127.0.0.1
{{- if .Values.tls.extraDnsNames }}
{{- range $key, $dnsName := .Values.tls.extraDnsNames }}
{{ $key }} = {{ $dnsName }}
{{- end }}
{{- end }}
EOL
export RANDFILE=/certs/.rnd && openssl genrsa -out /certs/mongo.key 2048
#Create the client/server cert
openssl req -new -key /certs/mongo.key -out /certs/mongo.csr -subj "/C=US/O=My Organisations/OU=IT/CN=$my_hostname" -config /certs/openssl.cnf
#Signing the server cert with the CA cert and key
openssl x509 -req -in /certs/mongo.csr -CA /certs/mongodb-ca-cert -CAkey /certs/mongodb-ca-key -CAcreateserial -out /certs/mongo.crt -days 3650 -extensions v3_req -extfile /certs/openssl.cnf
rm /certs/mongo.csr
#Concatenate to a pem file for use as the client PEM file which can be used for both member and client authentication.
cat /certs/mongo.crt /certs/mongo.key > /certs/mongodb.pem
cd /certs/
shopt -s extglob
rm -rf !(mongodb-ca-cert|mongodb.pem|CAs|openssl.cnf)
chmod 0600 mongodb-ca-cert mongodb.pem
EOF
{{- end }}
{{- end }}
containers:
- name: mongodb
image: {{ include "mongodb.image" . }}
imagePullPolicy: {{ .Values.image.pullPolicy | quote }}
{{- if .Values.containerSecurityContext.enabled }}
securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }}
{{- end }}
{{- if .Values.diagnosticMode.enabled }}
command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }}
{{- else if .Values.command }}
command: {{- include "common.tplvalues.render" (dict "value" .Values.command "context" $) | nindent 12 }}
{{- end }}
{{- if .Values.diagnosticMode.enabled }}
args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }}
{{- else if .Values.args }}
args: {{- include "common.tplvalues.render" (dict "value" .Values.args "context" $) | nindent 12 }}
{{- end }}
env:
- name: BITNAMI_DEBUG
value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }}
{{- $customUsers := include "mongodb.customUsers" . -}}
{{- $customDatabases := include "mongodb.customDatabases" . -}}
{{- if not (empty $customUsers) }}
- name: MONGODB_EXTRA_USERNAMES
value: {{ $customUsers | quote }}
{{- end }}
{{- if not (empty $customDatabases) }}
- name: MONGODB_EXTRA_DATABASES
value: {{ $customDatabases | quote }}
{{- end }}
{{- if .Values.auth.enabled }}
{{- if and (not (empty $customUsers)) (not (empty $customDatabases)) }}
- name: MONGODB_EXTRA_PASSWORDS
valueFrom:
secretKeyRef:
name: {{ include "mongodb.secretName" . }}
key: mongodb-passwords
{{- end }}
- name: MONGODB_ROOT_USER
value: {{ .Values.auth.rootUser | quote }}
- name: MONGODB_ROOT_PASSWORD
valueFrom:
secretKeyRef:
name: {{ include "mongodb.secretName" . }}
key: mongodb-root-password
{{- end }}
{{- if and .Values.metrics.enabled (not (empty .Values.metrics.username)) }}
- name: MONGODB_METRICS_USERNAME
value: {{ .Values.metrics.username | quote }}
{{- if .Values.auth.enabled }}
- name: MONGODB_METRICS_PASSWORD
valueFrom:
secretKeyRef:
name: {{ include "mongodb.secretName" . }}
key: mongodb-metrics-password
{{- end }}
{{- end }}
- name: ALLOW_EMPTY_PASSWORD
value: {{ ternary "no" "yes" .Values.auth.enabled | quote }}
- name: MONGODB_SYSTEM_LOG_VERBOSITY
value: {{ .Values.systemLogVerbosity | quote }}
- name: MONGODB_DISABLE_SYSTEM_LOG
value: {{ ternary "yes" "no" .Values.disableSystemLog | quote }}
- name: MONGODB_DISABLE_JAVASCRIPT
value: {{ ternary "yes" "no" .Values.disableJavascript | quote }}
- name: MONGODB_ENABLE_JOURNAL
value: {{ ternary "yes" "no" .Values.enableJournal | quote }}
- name: MONGODB_ENABLE_IPV6
value: {{ ternary "yes" "no" .Values.enableIPv6 | quote }}
- name: MONGODB_ENABLE_DIRECTORY_PER_DB
value: {{ ternary "yes" "no" .Values.directoryPerDB | quote }}
{{- $extraFlags := .Values.extraFlags | join " " -}}
{{- if .Values.tls.enabled }}
{{- $extraFlags = printf "--tlsMode=%s --tlsCertificateKeyFile=/certs/mongodb.pem --tlsCAFile=/certs/mongodb-ca-cert %s" .Values.tls.mode $extraFlags }}
{{- end }}
{{- if ne $extraFlags ""}}
- name: MONGODB_EXTRA_FLAGS
value: {{ $extraFlags | quote }}
{{- end }}
{{- if .Values.tls.enabled }}
- name: MONGODB_CLIENT_EXTRA_FLAGS
value: --tls --tlsCertificateKeyFile=/certs/mongodb.pem --tlsCAFile=/certs/mongodb-ca-cert
{{- end }}
{{- if .Values.extraEnvVars }}
{{- include "common.tplvalues.render" (dict "value" .Values.extraEnvVars "context" $) | nindent 12 }}
{{- end }}
{{- if or .Values.extraEnvVarsCM .Values.extraEnvVarsSecret }}
envFrom:
{{- if .Values.extraEnvVarsCM }}
- configMapRef:
name: {{ tpl .Values.extraEnvVarsCM . | quote }}
{{- end }}
{{- if .Values.extraEnvVarsSecret }}
- secretRef:
name: {{ tpl .Values.extraEnvVarsSecret . | quote }}
{{- end }}
{{- end }}
ports:
- name: mongodb
containerPort: 27017
{{- if not .Values.diagnosticMode.enabled }}
{{- if .Values.livenessProbe.enabled }}
livenessProbe:
exec:
command:
- mongo
- --disableImplicitSessions
{{- if .Values.tls.enabled }}
- --tls
- --tlsCertificateKeyFile=/certs/mongodb.pem
- --tlsCAFile=/certs/mongodb-ca-cert
{{- end }}
- --eval
- "db.adminCommand('ping')"
initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.livenessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }}
successThreshold: {{ .Values.livenessProbe.successThreshold }}
failureThreshold: {{ .Values.livenessProbe.failureThreshold }}
{{- else if .Values.customLivenessProbe }}
livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customLivenessProbe "context" $) | nindent 12 }}
{{- end }}
{{- end }}
{{- if not .Values.diagnosticMode.enabled }}
{{- if .Values.readinessProbe.enabled }}
readinessProbe:
exec:
command:
- bash
- -ec
- |
{{- if .Values.tls.enabled }}
TLS_OPTIONS='--tls --tlsCertificateKeyFile=/certs/mongodb.pem --tlsCAFile=/certs/mongodb-ca-cert'
{{- end }}
# Run the proper check depending on the version
[[ $(mongo --version | grep "MongoDB shell") =~ ([0-9]+\.[0-9]+\.[0-9]+) ]] && VERSION=${BASH_REMATCH[1]}
. /opt/bitnami/scripts/libversion.sh
VERSION_MAJOR="$(get_sematic_version "$VERSION" 1)"
VERSION_MINOR="$(get_sematic_version "$VERSION" 2)"
VERSION_PATCH="$(get_sematic_version "$VERSION" 3)"
if [[ "$VERSION_MAJOR" -ge 4 ]] && [[ "$VERSION_MINOR" -ge 4 ]] && [[ "$VERSION_PATCH" -ge 2 ]]; then
mongo --disableImplicitSessions $TLS_OPTIONS --eval 'db.hello().isWritablePrimary || db.hello().secondary' | grep -q 'true'
else
mongo --disableImplicitSessions $TLS_OPTIONS --eval 'db.isMaster().ismaster || db.isMaster().secondary' | grep -q 'true'
fi
initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.readinessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }}
successThreshold: {{ .Values.readinessProbe.successThreshold }}
failureThreshold: {{ .Values.readinessProbe.failureThreshold }}
{{- else if .Values.customReadinessProbe }}
readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customReadinessProbe "context" $) | nindent 12 }}
{{- end }}
{{- end }}
{{- if not .Values.diagnosticMode.enabled }}
{{- if .Values.startupProbe.enabled }}
startupProbe:
exec:
command:
- bash
- -ec
- |
{{- if .Values.tls.enabled }}
TLS_OPTIONS='--tls --tlsCertificateKeyFile=/certs/mongodb.pem --tlsCAFile=/certs/mongodb-ca-cert'
{{- end }}
mongo --disableImplicitSessions $TLS_OPTIONS --eval 'db.hello().isWritablePrimary || db.hello().secondary' | grep -q 'true'
initialDelaySeconds: {{ .Values.startupProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.startupProbe.periodSeconds }}
timeoutSeconds: {{ .Values.startupProbe.timeoutSeconds }}
successThreshold: {{ .Values.startupProbe.successThreshold }}
failureThreshold: {{ .Values.startupProbe.failureThreshold }}
{{- else if .Values.customStartupProbe }}
startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customStartupProbe "context" $) | nindent 12 }}
{{- end }}
{{- end }}
{{- if .Values.resources }}
resources: {{- toYaml .Values.resources | nindent 12 }}
{{- end }}
volumeMounts:
- name: datadir
mountPath: {{ .Values.persistence.mountPath }}
subPath: {{ .Values.persistence.subPath }}
{{- if or .Values.initdbScriptsConfigMap .Values.initdbScripts }}
- name: custom-init-scripts
mountPath: /docker-entrypoint-initdb.d
{{- end }}
{{- if or .Values.configuration .Values.existingConfigmap }}
- name: config
mountPath: /opt/bitnami/mongodb/conf/mongodb.conf
subPath: mongodb.conf
{{- end }}
{{- if .Values.tls.enabled }}
- name: certs
mountPath: /certs
{{- end }}
{{- if .Values.extraVolumeMounts }}
{{- toYaml .Values.extraVolumeMounts | nindent 12 }}
{{- end }}
{{- if .Values.metrics.enabled }}
- name: metrics
image: {{ template "mongodb.metrics.image" . }}
imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }}
{{- if .Values.containerSecurityContext.enabled }}
securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }}
{{- end }}
command:
- /bin/bash
- -ec
args:
- |
/bin/mongodb_exporter --web.listen-address ":{{ .Values.metrics.containerPort }}" --mongodb.uri "{{ include "mongodb.mongodb_exporter.uri" . }}" {{ .Values.metrics.extraFlags }}
env:
{{- if .Values.auth.enabled }}
{{- if not .Values.metrics.username }}
- name: MONGODB_ROOT_USER
value: {{ .Values.auth.rootUser | quote }}
- name: MONGODB_ROOT_PASSWORD
valueFrom:
secretKeyRef:
name: {{ include "mongodb.secretName" . }}
key: mongodb-root-password
{{- else }}
- name: MONGODB_METRICS_USERNAME
value: {{ .Values.metrics.username | quote }}
- name: MONGODB_METRICS_PASSWORD
valueFrom:
secretKeyRef:
name: {{ include "mongodb.secretName" . }}
key: mongodb-metrics-password
{{- end }}
{{- end }}
volumeMounts:
{{- if .Values.tls.enabled }}
- name: certs
mountPath: /certs
{{- end }}
ports:
- name: metrics
containerPort: {{ .Values.metrics.containerPort }}
{{- if not .Values.diagnosticMode.enabled }}
{{- if .Values.metrics.livenessProbe.enabled }}
livenessProbe:
httpGet:
path: /
port: metrics
initialDelaySeconds: {{ .Values.metrics.livenessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.metrics.livenessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.metrics.livenessProbe.timeoutSeconds }}
failureThreshold: {{ .Values.metrics.livenessProbe.failureThreshold }}
successThreshold: {{ .Values.metrics.livenessProbe.successThreshold }}
{{- end }}
{{- if .Values.metrics.readinessProbe.enabled }}
readinessProbe:
httpGet:
path: /
port: metrics
initialDelaySeconds: {{ .Values.metrics.readinessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.metrics.readinessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.metrics.readinessProbe.timeoutSeconds }}
failureThreshold: {{ .Values.metrics.readinessProbe.failureThreshold }}
successThreshold: {{ .Values.metrics.readinessProbe.successThreshold }}
{{- end }}
{{- end }}
{{- if .Values.metrics.resources }}
resources: {{- toYaml .Values.metrics.resources | nindent 12 }}
{{- end }}
{{- end }}
{{- if .Values.sidecars }}
{{- include "common.tplvalues.render" (dict "value" .Values.sidecars "context" $) | nindent 8 }}
{{- end }}
volumes:
{{- if or .Values.initdbScriptsConfigMap .Values.initdbScripts }}
- name: custom-init-scripts
configMap:
name: {{ template "mongodb.initdbScriptsCM" . }}
{{- end }}
{{- if or .Values.configuration .Values.existingConfigmap }}
- name: config
configMap:
name: {{ include "mongodb.configmapName" . }}
{{- end }}
{{- if .Values.extraVolumes }}
{{- toYaml .Values.extraVolumes | nindent 8 }}
{{- end }}
{{- if .Values.tls.enabled }}
- name: certs
emptyDir: {}
- name: certs-volume
secret:
secretName: {{ template "mongodb.tlsSecretName" . }}
items:
- key: mongodb-ca-cert
path: mongodb-ca-cert
mode: 0600
- key: mongodb-ca-key
path: mongodb-ca-key
mode: 0600
{{- end }}
{{- if not .Values.persistence.enabled }}
- name: datadir
{{- if .Values.persistence.medium }}
emptyDir:
medium: {{ .Values.persistence.medium | quote }}
{{- else }}
emptyDir: {}
{{- end }}
{{- else if .Values.persistence.existingClaim }}
- name: datadir
persistentVolumeClaim:
claimName: {{ printf "%s" (tpl .Values.persistence.existingClaim .) }}
{{- else if not .Values.useStatefulSet }}
- name: datadir
persistentVolumeClaim:
claimName: {{ template "mongodb.fullname" . }}
{{- else }}
volumeClaimTemplates:
- metadata:
name: datadir
{{- if .Values.persistence.annotations }}
annotations: {{- include "common.tplvalues.render" (dict "value" .Values.persistence.annotations "context" $) | nindent 10 }}
{{- end }}
spec:
accessModes:
{{- range .Values.persistence.accessModes }}
- {{ . | quote }}
{{- end }}
resources:
requests:
storage: {{ .Values.persistence.size | quote }}
{{- if .Values.persistence.volumeClaimTemplates.selector }}
selector: {{- include "common.tplvalues.render" (dict "value" .Values.persistence.volumeClaimTemplates.selector "context" $) | nindent 10 }}
{{- end }}
{{ include "common.storage.class" (dict "persistence" .Values.persistence "global" .Values.global) }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,30 @@
{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) (not (eq .Values.architecture "replicaset")) (not .Values.useStatefulSet) }}
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: {{ include "mongodb.fullname" . }}
namespace: {{ include "mongodb.namespace" . }}
labels: {{- include "common.labels.standard" . | nindent 4 }}
app.kubernetes.io/component: mongodb
{{- if .Values.commonLabels }}
{{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
{{- end }}
{{- if or .Values.persistence.annotations .Values.commonAnnotations }}
annotations:
{{- if .Values.commonAnnotations }}
{{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
{{- end }}
{{- if .Values.persistence.annotations }}
{{- include "common.tplvalues.render" ( dict "value" .Values.persistence.annotations "context" $) | nindent 4 }}
{{- end }}
{{- end }}
spec:
accessModes:
{{- range .Values.persistence.accessModes }}
- {{ . | quote }}
{{- end }}
resources:
requests:
storage: {{ .Values.persistence.size | quote }}
{{ include "common.storage.class" (dict "persistence" .Values.persistence "global" .Values.global) }}
{{- end }}

View File

@ -0,0 +1,46 @@
{{- if not (eq .Values.architecture "replicaset") }}
apiVersion: v1
kind: Service
metadata:
name: {{ include "mongodb.fullname" . }}
namespace: {{ include "mongodb.namespace" . }}
labels: {{- include "common.labels.standard" . | nindent 4 }}
app.kubernetes.io/component: mongodb
{{- if .Values.commonLabels }}
{{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
{{- end }}
{{- if or .Values.service.annotations .Values.commonAnnotations }}
annotations:
{{- if .Values.service.annotations }}
{{- include "common.tplvalues.render" ( dict "value" .Values.service.annotations "context" $) | nindent 4 }}
{{- end }}
{{- if .Values.commonAnnotations }}
{{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
{{- end }}
{{- end }}
spec:
type: {{ .Values.service.type }}
{{- if and (eq .Values.service.type "ClusterIP") .Values.service.clusterIP }}
clusterIP: {{ .Values.service.clusterIP }}
{{- end }}
{{- if and (eq .Values.service.type "LoadBalancer") .Values.service.loadBalancerIP }}
loadBalancerIP: {{ .Values.service.loadBalancerIP }}
{{- end }}
{{- if .Values.service.externalIPs }}
externalIPs: {{ toYaml .Values.service.externalIPs | nindent 4 }}
{{- end }}
{{- if .Values.service.loadBalancerSourceRanges }}
loadBalancerSourceRanges: {{- toYaml .Values.service.loadBalancerSourceRanges | nindent 4 }}
{{- end }}
ports:
- name: {{ .Values.service.portName }}
port: {{ .Values.service.port }}
targetPort: mongodb
{{- if and (or (eq .Values.service.type "LoadBalancer") (eq .Values.service.type "NodePort")) .Values.service.nodePort }}
nodePort: {{ .Values.service.nodePort }}
{{- else if eq .Values.service.type "ClusterIP" }}
nodePort: null
{{- end }}
selector: {{- include "common.labels.matchLabels" . | nindent 4 }}
app.kubernetes.io/component: mongodb
{{- end }}

173
mongodb/values.schema.json Normal file
View File

@ -0,0 +1,173 @@
{
"$schema": "http://json-schema.org/schema#",
"type": "object",
"properties": {
"architecture": {
"type": "string",
"title": "MongoDB&reg; architecture",
"form": true,
"description": "Allowed values: `standalone` or `replicaset`"
},
"auth": {
"type": "object",
"title": "Authentication configuration",
"form": true,
"properties": {
"enabled": {
"type": "boolean",
"title": "Enable Authentication",
"form": true
},
"rootUser": {
"type": "string",
"title": "MongoDB&reg; admin user",
"form": true,
"description": "Name of the admin user. Default is root"
},
"rootPassword": {
"type": "string",
"title": "MongoDB&reg; admin password",
"form": true,
"description": "Defaults to a random 10-character alphanumeric string if not set",
"hidden": {
"value": false,
"path": "auth/enabled"
}
},
"database": {
"type": "string",
"title": "MongoDB&reg; custom database",
"description": "Name of the custom database to be created during the 1st initialization of MongoDB&reg;",
"form": true
},
"username": {
"type": "string",
"title": "MongoDB&reg; custom user",
"description": "Name of the custom user to be created during the 1st initialization of MongoDB&reg;. This user only has permissions on the MongoDB&reg; custom database",
"form": true
},
"password": {
"type": "string",
"title": "Password for MongoDB&reg; custom user",
"form": true,
"description": "Defaults to a random 10-character alphanumeric string if not set",
"hidden": {
"value": false,
"path": "auth/enabled"
}
},
"replicaSetKey": {
"type": "string",
"title": "Key used for replica set authentication",
"form": true,
"description": "Defaults to a random 10-character alphanumeric string if not set",
"hidden": {
"value": "standalone",
"path": "architecture"
}
}
}
},
"replicaCount": {
"type": "integer",
"form": true,
"title": "Number of MongoDB&reg; replicas",
"hidden": {
"value": "standalone",
"path": "architecture"
}
},
"configuration": {
"type": "string",
"title": "MongoDB&reg; Custom Configuration",
"form": true,
"render": "textArea"
},
"arbiter": {
"type": "object",
"title": "Arbiter configuration",
"form": true,
"properties": {
"configuration": {
"type": "string",
"title": "Arbiter Custom Configuration",
"form": true,
"render": "textArea",
"hidden": {
"value": "standalone",
"path": "architecture"
}
}
}
},
"persistence": {
"type": "object",
"title": "Persistence configuration",
"form": true,
"properties": {
"enabled": {
"type": "boolean",
"form": true,
"title": "Enable persistence",
"description": "Enable persistence using Persistent Volume Claims"
},
"size": {
"type": "string",
"title": "Persistent Volume Size",
"form": true,
"render": "slider",
"sliderMin": 1,
"sliderMax": 100,
"sliderUnit": "Gi",
"hidden": {
"value": false,
"path": "persistence/enabled"
}
}
}
},
"volumePermissions": {
"type": "object",
"hidden": {
"value": false,
"path": "persistence/enabled"
},
"properties": {
"enabled": {
"type": "boolean",
"form": true,
"title": "Enable Init Containers",
"description": "Use an init container to set required folder permissions on the data volume before mounting it in the final destination"
}
}
},
"metrics": {
"type": "object",
"form": true,
"title": "Prometheus metrics details",
"properties": {
"enabled": {
"type": "boolean",
"title": "Create Prometheus metrics exporter",
"description": "Create a side-car container to expose Prometheus metrics",
"form": true
},
"serviceMonitor": {
"type": "object",
"properties": {
"enabled": {
"type": "boolean",
"title": "Create Prometheus Operator ServiceMonitor",
"description": "Create a ServiceMonitor to track metrics using Prometheus Operator",
"form": true,
"hidden": {
"value": false,
"path": "metrics/enabled"
}
}
}
}
}
}
}
}

1702
mongodb/values.yaml Normal file

File diff suppressed because it is too large Load Diff