This commit is contained in:
marsal wang
2023-07-26 10:07:34 +08:00
parent f884cb1020
commit 1e5a703cce
5384 changed files with 618283 additions and 4002 deletions

View File

@ -0,0 +1,23 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@ -0,0 +1,6 @@
dependencies:
- name: common
repository: https://charts.bitnami.com/bitnami
version: 1.17.1
digest: sha256:dacc73770a5640c011e067ff8840ddf89631fc19016c8d0a9e5ea160e7da8690
generated: "2023-03-24T13:14:04.3252942+08:00"

View File

@ -0,0 +1,29 @@
apiVersion: v2
name: canal-server
description: A Helm chart for Kubernetes
dependencies:
- name: common
repository: https://charts.bitnami.com/bitnami
tags:
- bitnami-common
version: 1.x.x
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.1.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: "1.16.0"

Binary file not shown.

Binary file not shown.

View File

@ -0,0 +1,22 @@
1. Get the application URL by running these commands:
{{- if .Values.ingress.enabled }}
{{- range $host := .Values.ingress.hosts }}
{{- range .paths }}
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }}
{{- end }}
{{- end }}
{{- else if contains "NodePort" .Values.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "canal-server.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "canal-server.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "canal-server.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
echo http://$SERVICE_IP:{{ .Values.service.port }}
{{- else if contains "ClusterIP" .Values.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "canal-server.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}")
echo "Visit http://127.0.0.1:8080 to use your application"
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT
{{- end }}

View File

@ -0,0 +1,62 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "canal-server.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "canal-server.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "canal-server.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "canal-server.labels" -}}
helm.sh/chart: {{ include "canal-server.chart" . }}
{{ include "canal-server.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "canal-server.selectorLabels" -}}
app.kubernetes.io/name: {{ include "canal-server.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Create the name of the service account to use
*/}}
{{- define "canal-server.serviceAccountName" -}}
{{- if .Values.serviceAccount.create }}
{{- default (include "canal-server.fullname" .) .Values.serviceAccount.name }}
{{- else }}
{{- default "default" .Values.serviceAccount.name }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,72 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "canal-server.fullname" . }}
labels:
{{- include "canal-server.labels" . | nindent 4 }}
spec:
{{- if not .Values.autoscaling.enabled }}
replicas: {{ .Values.replicaCount }}
{{- end }}
selector:
matchLabels:
{{- include "canal-server.selectorLabels" . | nindent 6 }}
template:
metadata:
{{- with .Values.podAnnotations }}
annotations:
{{- toYaml . | nindent 8 }}
{{- end }}
labels:
{{- include "canal-server.selectorLabels" . | nindent 8 }}
spec:
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ include "canal-server.serviceAccountName" . }}
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
containers:
- name: {{ .Chart.Name }}
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
ports:
- name: admin
containerPort: 11110
protocol: TCP
- name: http
containerPort: 11111
protocol: TCP
- name: metrics-pull
containerPort: 11112
protocol: TCP
- name: http2
containerPort: 9100
protocol: TCP
livenessProbe:
tcpSocket:
port: 11110
readinessProbe:
tcpSocket:
port: 11110
env:
- name: TZ
value: "Asia/Shanghai"
{{- toYaml .Values.extraEnvVars | nindent 12 }}
resources:
{{- toYaml .Values.resources | nindent 12 }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}

View File

@ -0,0 +1,28 @@
{{- if .Values.autoscaling.enabled }}
apiVersion: autoscaling/v2beta1
kind: HorizontalPodAutoscaler
metadata:
name: {{ include "canal-server.fullname" . }}
labels:
{{- include "canal-server.labels" . | nindent 4 }}
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: {{ include "canal-server.fullname" . }}
minReplicas: {{ .Values.autoscaling.minReplicas }}
maxReplicas: {{ .Values.autoscaling.maxReplicas }}
metrics:
{{- if .Values.autoscaling.targetCPUUtilizationPercentage }}
- type: Resource
resource:
name: cpu
targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }}
{{- end }}
{{- if .Values.autoscaling.targetMemoryUtilizationPercentage }}
- type: Resource
resource:
name: memory
targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,61 @@
{{- if .Values.ingress.enabled -}}
{{- $fullName := include "canal-server.fullname" . -}}
{{- $svcPort := .Values.service.port -}}
{{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }}
{{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }}
{{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}}
{{- end }}
{{- end }}
{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}}
apiVersion: networking.k8s.io/v1
{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
apiVersion: networking.k8s.io/v1beta1
{{- else -}}
apiVersion: extensions/v1beta1
{{- end }}
kind: Ingress
metadata:
name: {{ $fullName }}
labels:
{{- include "canal-server.labels" . | nindent 4 }}
{{- with .Values.ingress.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }}
ingressClassName: {{ .Values.ingress.className }}
{{- end }}
{{- if .Values.ingress.tls }}
tls:
{{- range .Values.ingress.tls }}
- hosts:
{{- range .hosts }}
- {{ . | quote }}
{{- end }}
secretName: {{ .secretName }}
{{- end }}
{{- end }}
rules:
{{- range .Values.ingress.hosts }}
- host: {{ .host | quote }}
http:
paths:
{{- range .paths }}
- path: {{ .path }}
{{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }}
pathType: {{ .pathType }}
{{- end }}
backend:
{{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }}
service:
name: {{ $fullName }}
port:
number: {{ $svcPort }}
{{- else }}
serviceName: {{ $fullName }}
servicePort: {{ $svcPort }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,15 @@
apiVersion: v1
kind: Service
metadata:
name: {{ include "canal-server.fullname" . }}
labels:
{{- include "canal-server.labels" . | nindent 4 }}
spec:
type: {{ .Values.service.type }}
ports:
- port: {{ .Values.service.port }}
targetPort: 11110
protocol: TCP
name: http
selector:
{{- include "canal-server.selectorLabels" . | nindent 4 }}

View File

@ -0,0 +1,12 @@
{{- if .Values.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "canal-server.serviceAccountName" . }}
labels:
{{- include "canal-server.labels" . | nindent 4 }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,15 @@
apiVersion: v1
kind: Pod
metadata:
name: "{{ include "canal-server.fullname" . }}-test-connection"
labels:
{{- include "canal-server.labels" . | nindent 4 }}
annotations:
"helm.sh/hook": test
spec:
containers:
- name: wget
image: busybox
command: ['wget']
args: ['{{ include "canal-server.fullname" . }}:{{ .Values.service.port }}']
restartPolicy: Never

View File

@ -0,0 +1,116 @@
# Default values for canal-server.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
image:
repository: canal/canal-server
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: "v1.1.5"
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
serviceAccount:
# Specifies whether a service account should be created
create: false
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
podAnnotations: {}
podSecurityContext: {}
# fsGroup: 2000
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
service:
type: ClusterIP
port: 11111
ingress:
enabled: false
className: ""
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
hosts:
- host: chart-example.local
paths:
- path: /
pathType: ImplementationSpecific
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
extraEnvVars:
- name: "canal.instance.master.address"
value: "mysql:3306"
- name: "canal.instance.dbUsername"
value: "root"
- name: "canal.instance.dbPassword"
value: "gkxl650"
- name: "canal.instance.gtidon"
value: "false"
- name: "canal.instance.connectionCharset"
value: "UTF-8"
- name: "canal.instance.tsdb.enable"
value: "true"
- name: "canal.instance.enableDruid"
value: "false"
- name: "canal.instance.filter.regex"
value: 'zd_rescue\\\\\\\\user_order_his.*,zd_rescue\\\\\\\\.task_order_his.*,zd_rescue\\\\\\\\task_order_cost_his.*,zd_rescue.supplier_account_record,zd_rescue.customer_order_account'
- name: "canal.mq.topic"
value: "canal_example"
- name: "canal.serverMode"
value: "rabbitMQ"
- name: "canal.instance.parser.parallel"
value: "true"
- name: "rabbitmq.host"
value: "rabbitmq:5672"
- name: "rabbitmq.virtual.host"
value: "canal"
- name: "rabbitmq.exchange"
value: "canal_exchange"
- name: "rabbitmq.username"
value: "root"
- name: "rabbitmq.password"
value: "gkxl650"
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 100
targetCPUUtilizationPercentage: 80
# targetMemoryUtilizationPercentage: 80
nodeSelector: {}
tolerations: []
affinity: {}

27
helm/ceph/README.md Normal file
View File

@ -0,0 +1,27 @@
# rdb
部署 ceph-csi-provisioner
kubectl apply -f ceph/ceph-csi/rbd/kubernetes/
部署 ceph-sc
kubectl apply -f ceph/ceph-csi/rbd/deploy/
kubectl apply -k ceph/external-snapshotter/snapshot-controller/
kubectl apply -k ceph/external-snapshotter/crd/
helm upgrade -i ceph-csi-rbd ceph/ceph-csi-rbd --values local-values/ceph/rbd.yaml -n ceph-csi
helm upgrade -i ceph-csi-cephfs ceph/ceph-csi-cephfs --values local-values/ceph/fs.yaml -n ceph-csi
helm upgrade -i ceph-csi-rbd ceph/ceph-csi-fs -n ceph-csi --dry-run
helm upgrade -i --set storageClass.create=true --set storageClass.create=true --set secret.create=true ceph-csi-rbd ceph/ceph-csi-cephfs/ -n ceph-csi --dry-run

View File

@ -0,0 +1,21 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj

View File

@ -0,0 +1,15 @@
---
apiVersion: v1
appVersion: canary
description: "Container Storage Interface (CSI) driver,
provisioner, snapshotter and attacher for Ceph cephfs"
name: ceph-csi-cephfs
version: 3-canary
keywords:
- ceph
- cephfs
- ceph-csi
home: https://github.com/ceph/ceph-csi
sources:
- https://github.com/ceph/ceph-csi/tree/devel/charts/ceph-csi-cephfs
icon: https://raw.githubusercontent.com/ceph/ceph-csi/devel/assets/ceph-logo.png

View File

@ -0,0 +1,168 @@
# ceph-csi-cephfs
The ceph-csi-cephfs chart adds cephFS volume support to your cluster.
## Install from release repo
Add chart repository to install helm charts from it
```console
helm repo add ceph-csi https://ceph.github.io/csi-charts
```
## Install from local Chart
we need to enter into the directory where all charts are present
```console
cd charts
```
**Note:** charts directory is present in root of the ceph-csi project
### Install Chart
To install the Chart into your Kubernetes cluster
- For helm 2.x
```bash
helm install --namespace "ceph-csi-cephfs" --name "ceph-csi-cephfs" ceph-csi/ceph-csi-cephfs
```
- For helm 3.x
Create the namespace where Helm should install the components with
```bash
kubectl create namespace ceph-csi-cephfs
```
Run the installation
```bash
helm install --namespace "ceph-csi-cephfs" "ceph-csi-cephfs" ceph-csi/ceph-csi-cephfs
```
After installation succeeds, you can get a status of Chart
```bash
helm status "ceph-csi-cephfs"
```
### Delete Chart
If you want to delete your Chart, use this command
- For helm 2.x
```bash
helm delete --purge "ceph-csi-cephfs"
```
- For helm 3.x
```bash
helm uninstall "ceph-csi-cephfs" --namespace "ceph-csi-cephfs"
```
If you want to delete the namespace, use this command
```bash
kubectl delete namespace ceph-csi-cephfs
```
### Configuration
The following table lists the configurable parameters of the ceph-csi-cephfs
charts and their default values.
| Parameter | Description | Default |
| ---------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------- |
| `rbac.create` | Specifies whether RBAC resources should be created | `true` |
| `serviceAccounts.nodeplugin.create` | Specifies whether a nodeplugin ServiceAccount should be created | `true` |
| `serviceAccounts.nodeplugin.name` | The name of the nodeplugin ServiceAccount to use. If not set and create is true, a name is generated using the fullname | "" |
| `serviceAccounts.provisioner.create` | Specifies whether a provisioner ServiceAccount should be created | `true` |
| `serviceAccounts.provisioner.name` | The name of the provisioner ServiceAccount of provisioner to use. If not set and create is true, a name is generated using the fullname | "" |
| `csiConfig` | Configuration for the CSI to connect to the cluster | [] |
| `logLevel` | Set logging level for csi containers. Supported values from 0 to 5. 0 for general useful logs, 5 for trace level verbosity. | `5` |
| `nodeplugin.name` | Specifies the nodeplugin name | `nodeplugin` |
| `nodeplugin.updateStrategy` | Specifies the update Strategy. If you are using ceph-fuse client set this value to OnDelete | `RollingUpdate` |
| `nodeplugin.priorityClassName` | Set user created priorityclassName for csi plugin pods. default is system-node-critical which is highest priority | `system-node-critical` |
| `nodeplugin.profiling.enabled` | Specifies whether profiling should be enabled | `false` |
| `nodeplugin.registrar.image.repository` | Node-Registrar image repository URL | `k8s.gcr.io/sig-storage/csi-node-driver-registrar` |
| `nodeplugin.registrar.image.tag` | Image tag | `v2.2.0` |
| `nodeplugin.registrar.image.pullPolicy` | Image pull policy | `IfNotPresent` |
| `nodeplugin.plugin.image.repository` | Nodeplugin image repository URL | `quay.io/cephcsi/cephcsi` |
| `nodeplugin.plugin.image.tag` | Image tag | `canary` |
| `nodeplugin.plugin.image.pullPolicy` | Image pull policy | `IfNotPresent` |
| `nodeplugin.nodeSelector` | Kubernetes `nodeSelector` to add to the Daemonset | `{}` |
| `nodeplugin.tolerations` | List of Kubernetes `tolerations` to add to the Daemonset | `{}` |
| `nodeplugin.forcecephkernelclient` | Set to true to enable Ceph Kernel clients on kernel < 4.17 which support quotas | `true` |
| `nodeplugin.podSecurityPolicy.enabled` | If true, create & use [Pod Security Policy resources](https://kubernetes.io/docs/concepts/policy/pod-security-policy/). | `false` |
| `provisioner.name` | Specifies the name of provisioner | `provisioner` |
| `provisioner.replicaCount` | Specifies the replicaCount | `3` |
| `provisioner.timeout` | GRPC timeout for waiting for creation or deletion of a volume | `60s` |
| `provisioner.priorityClassName` | Set user created priorityclassName for csi provisioner pods. Default is `system-cluster-critical` which is less priority than `system-node-critical` | `system-cluster-critical` |
| `provisioner.profiling.enabled` | Specifies whether profiling should be enabled | `false` |
| `provisioner.provisioner.image.repository` | Specifies the csi-provisioner image repository URL | `k8s.gcr.io/sig-storage/csi-provisioner` |
| `provisioner.provisioner.image.tag` | Specifies image tag | `v2.2.2` |
| `provisioner.provisioner.image.pullPolicy` | Specifies pull policy | `IfNotPresent` |
| `provisioner.attacher.image.repository` | Specifies the csi-attacher image repository URL | `k8s.gcr.io/sig-storage/csi-attacher` |
| `provisioner.attacher.image.tag` | Specifies image tag | `v3.2.1` |
| `provisioner.attacher.image.pullPolicy` | Specifies pull policy | `IfNotPresent` |
| `provisioner.attacher.name` | Specifies the name of csi-attacher sidecar | `attacher` |
| `provisioner.attacher.enabled` | Specifies whether attacher sidecar is enabled | `true` |
| `provisioner.resizer.image.repository` | Specifies the csi-resizer image repository URL | `k8s.gcr.io/sig-storage/csi-resizer` |
| `provisioner.resizer.image.tag` | Specifies image tag | `v1.2.0` |
| `provisioner.resizer.image.pullPolicy` | Specifies pull policy | `IfNotPresent` |
| `provisioner.resizer.name` | Specifies the name of csi-resizer sidecar | `resizer` |
| `provisioner.resizer.enabled` | Specifies whether resizer sidecar is enabled | `true` |
| `provisioner.snapshotter.image.repository` | Specifies the csi-snapshotter image repository URL | `k8s.gcr.io/sig-storage/csi-snapshotter` |
| `provisioner.snapshotter.image.tag` | Specifies image tag | `v4.1.1` |
| `provisioner.snapshotter.image.pullPolicy` | Specifies pull policy | `IfNotPresent` |
| `provisioner.nodeSelector` | Specifies the node selector for provisioner deployment | `{}` |
| `provisioner.tolerations` | Specifies the tolerations for provisioner deployment | `{}` |
| `provisioner.affinity` | Specifies the affinity for provisioner deployment | `{}` |
| `provisioner.podSecurityPolicy.enabled` | Specifies whether podSecurityPolicy is enabled | `false` |
| `topology.enabled` | Specifies whether topology based provisioning support should be exposed by CSI | `false` |
| `topology.domainLabels` | DomainLabels define which node labels to use as domains for CSI nodeplugins to advertise their domains | `{}` |
| `provisionerSocketFile` | The filename of the provisioner socket | `csi-provisioner.sock` |
| `pluginSocketFile` | The filename of the plugin socket | `csi.sock` |
| `kubeletDir` | Kubelet working directory | `/var/lib/kubelet` |
| `driverName` | Name of the csi-driver | `cephfs.csi.ceph.com` |
| `configMapName` | Name of the configmap which contains cluster configuration | `ceph-csi-config` |
| `externallyManagedConfigmap` | Specifies the use of an externally provided configmap | `false` |
| `storageClass.create` | Specifies whether the StorageClass should be created | `false` |
| `storageClass.name` | Specifies the cephFS StorageClass name | `csi-cephfs-sc` |
| `storageClass.annotations` | Specifies the annotations for the cephFS storageClass | `[]` |
| `storageClass.clusterID` | String representing a Ceph cluster to provision storage from | `<cluster-ID>` |
| `storageClass.fsName` | CephFS filesystem name into which the volume shall be created | `myfs` |
| `storageClass.pool` | Ceph pool into which volume data shall be stored | `""` |
| `storageClass.fuseMountOptions` | Comma separated string of Ceph-fuse mount options | `""` |
| `storageclass.kernelMountOptions` | Comma separated string of CephFS kernel mount options | `""` |
| `storageClass.mounter` | The driver can use either ceph-fuse (fuse) or ceph kernelclient (kernel) | `""` |
| `storageClass.volumeNamePrefix` | Prefix to use for naming subvolumes | `""` |
| `storageClass.provisionerSecret` | The secrets have to contain user and/or Ceph admin credentials. | `csi-cephfs-secret` |
| `storageClass.provisionerSecretNamespace` | Specifies the provisioner secret namespace | `""` |
| `storageClass.controllerExpandSecret` | Specifies the controller expand secret name | `csi-cephfs-secret` |
| `storageClass.controllerExpandSecretNamespace` | Specifies the controller expand secret namespace | `""` |
| `storageClass.nodeStageSecret` | Specifies the node stage secret name | `csi-cephfs-secret` |
| `storageClass.nodeStageSecretNamespace` | Specifies the node stage secret namespace | `""` |
| `storageClass.reclaimPolicy` | Specifies the reclaim policy of the StorageClass | `Delete` |
| `storageClass.allowVolumeExpansion` | Specifies whether volume expansion should be allowed | `true` |
| `storageClass.mountOptions` | Specifies the mount options | `[]` |
| `secret.create` | Specifies whether the secret should be created | `false` |
| `secret.name` | Specifies the cephFS secret name | `csi-cephfs-secret` |
| `secret.adminID` | Specifies the admin ID of the cephFS secret | `<plaintext ID>` |
| `secret.adminKey` | Specifies the key that corresponds to the adminID | `<Ceph auth key corresponding to ID above>` |
### Command Line
You can pass the settings with helm command line parameters.
Specify each parameter using the --set key=value argument to helm install.
For Example:
```bash
helm install --set configMapName=ceph-csi-config --set provisioner.podSecurityPolicy.enabled=true
```

View File

@ -0,0 +1,2 @@
Examples on how to configure a storage class and start using the driver are here:
https://github.com/ceph/ceph-csi/tree/devel/examples/cephfs

View File

@ -0,0 +1,90 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "ceph-csi-cephfs.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "ceph-csi-cephfs.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "ceph-csi-cephfs.nodeplugin.fullname" -}}
{{- if .Values.nodeplugin.fullnameOverride -}}
{{- .Values.nodeplugin.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- printf "%s-%s" .Release.Name .Values.nodeplugin.name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s-%s" .Release.Name $name .Values.nodeplugin.name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "ceph-csi-cephfs.provisioner.fullname" -}}
{{- if .Values.provisioner.fullnameOverride -}}
{{- .Values.provisioner.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- printf "%s-%s" .Release.Name .Values.provisioner.name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s-%s" .Release.Name $name .Values.provisioner.name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "ceph-csi-cephfs.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create the name of the service account to use
*/}}
{{- define "ceph-csi-cephfs.serviceAccountName.nodeplugin" -}}
{{- if .Values.serviceAccounts.nodeplugin.create -}}
{{ default (include "ceph-csi-cephfs.nodeplugin.fullname" .) .Values.serviceAccounts.nodeplugin.name }}
{{- else -}}
{{ default "default" .Values.serviceAccounts.nodeplugin.name }}
{{- end -}}
{{- end -}}
{{/*
Create the name of the service account to use
*/}}
{{- define "ceph-csi-cephfs.serviceAccountName.provisioner" -}}
{{- if .Values.serviceAccounts.provisioner.create -}}
{{ default (include "ceph-csi-cephfs.provisioner.fullname" .) .Values.serviceAccounts.provisioner.name }}
{{- else -}}
{{ default "default" .Values.serviceAccounts.provisioner.name }}
{{- end -}}
{{- end -}}

View File

@ -0,0 +1,15 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: ceph-config
namespace: {{ .Release.Namespace }}
labels:
app: {{ include "ceph-csi-cephfs.name" . }}
chart: {{ include "ceph-csi-cephfs.chart" . }}
component: {{ .Values.nodeplugin.name }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
data:
ceph.conf: |
{{ tpl .Values.cephconf . | indent 4 }}
keyring: ""

View File

@ -0,0 +1,11 @@
{{ if semverCompare ">=1.18.0-beta.1" .Capabilities.KubeVersion.Version }}
apiVersion: storage.k8s.io/v1
{{ else }}
apiVersion: storage.k8s.io/v1beta1
{{ end }}
kind: CSIDriver
metadata:
name: {{ .Values.driverName }}
spec:
attachRequired: true
podInfoOnMount: false

View File

@ -0,0 +1,16 @@
{{- if not .Values.externallyManagedConfigmap }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ .Values.configMapName | quote }}
namespace: {{ .Release.Namespace }}
labels:
app: {{ include "ceph-csi-cephfs.name" . }}
chart: {{ include "ceph-csi-cephfs.chart" . }}
component: {{ .Values.provisioner.name }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
data:
config.json: |-
{{ toJson .Values.csiConfig | indent 4 -}}
{{- end }}

View File

@ -0,0 +1,18 @@
{{- if .Values.rbac.create -}}
{{- if .Values.topology.enabled }}
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ include "ceph-csi-cephfs.nodeplugin.fullname" . }}
labels:
app: {{ include "ceph-csi-cephfs.name" . }}
chart: {{ include "ceph-csi-cephfs.chart" . }}
component: {{ .Values.nodeplugin.name }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
rules:
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get"]
{{- end }}
{{- end -}}

View File

@ -0,0 +1,22 @@
{{- if .Values.rbac.create -}}
{{- if .Values.topology.enabled }}
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ include "ceph-csi-cephfs.nodeplugin.fullname" . }}
labels:
app: {{ include "ceph-csi-cephfs.name" . }}
chart: {{ include "ceph-csi-cephfs.chart" . }}
component: {{ .Values.nodeplugin.name }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
subjects:
- kind: ServiceAccount
name: {{ include "ceph-csi-cephfs.serviceAccountName.nodeplugin" . }}
namespace: {{ .Release.Namespace }}
roleRef:
kind: ClusterRole
name: {{ include "ceph-csi-cephfs.nodeplugin.fullname" . }}
apiGroup: rbac.authorization.k8s.io
{{- end }}
{{- end -}}

View File

@ -0,0 +1,213 @@
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: {{ include "ceph-csi-cephfs.nodeplugin.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
app: {{ include "ceph-csi-cephfs.name" . }}
chart: {{ include "ceph-csi-cephfs.chart" . }}
component: {{ .Values.nodeplugin.name }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
selector:
matchLabels:
app: {{ include "ceph-csi-cephfs.name" . }}
component: {{ .Values.nodeplugin.name }}
release: {{ .Release.Name }}
updateStrategy:
type: {{ .Values.nodeplugin.updateStrategy }}
template:
metadata:
labels:
app: {{ include "ceph-csi-cephfs.name" . }}
chart: {{ include "ceph-csi-cephfs.chart" . }}
component: {{ .Values.nodeplugin.name }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
serviceAccountName: {{ include "ceph-csi-cephfs.serviceAccountName.nodeplugin" . }}
{{- if .Values.nodeplugin.priorityClassName }}
priorityClassName: {{ .Values.nodeplugin.priorityClassName }}
{{- end }}
hostNetwork: true
# to use e.g. Rook orchestrated cluster, and mons' FQDN is
# resolved through k8s service, set dns policy to cluster first
dnsPolicy: ClusterFirstWithHostNet
containers:
- name: driver-registrar
# This is necessary only for systems with SELinux, where
# non-privileged sidecar containers cannot access unix domain socket
# created by privileged CSI driver container.
securityContext:
privileged: true
image: "{{ .Values.nodeplugin.registrar.image.repository }}:{{ .Values.nodeplugin.registrar.image.tag }}"
imagePullPolicy: {{ .Values.nodeplugin.registrar.image.pullPolicy }}
args:
- "--v={{ .Values.logLevel }}"
- "--csi-address=/csi/{{ .Values.pluginSocketFile }}"
- "--kubelet-registration-path={{ .Values.kubeletDir }}/plugins/{{ .Values.driverName }}/{{ .Values.pluginSocketFile }}"
env:
- name: KUBE_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: registration-dir
mountPath: /registration
resources:
{{ toYaml .Values.nodeplugin.registrar.resources | indent 12 }}
- name: csi-cephfsplugin
image: "{{ .Values.nodeplugin.plugin.image.repository }}:{{ .Values.nodeplugin.plugin.image.tag }}"
imagePullPolicy: {{ .Values.nodeplugin.plugin.image.pullPolicy }}
args:
- "--nodeid=$(NODE_ID)"
- "--type=cephfs"
- "--nodeserver=true"
- "--pidlimit=-1"
{{- if .Values.nodeplugin.forcecephkernelclient }}
- "--forcecephkernelclient={{ .Values.nodeplugin.forcecephkernelclient }}"
{{- end }}
- "--endpoint=$(CSI_ENDPOINT)"
- "--v={{ .Values.logLevel }}"
- "--drivername=$(DRIVER_NAME)"
{{- if .Values.topology.enabled }}
- "--domainlabels={{ .Values.topology.domainLabels | join "," }}"
{{- end }}
{{- if .Values.nodeplugin.profiling.enabled }}
- "--enableprofiling={{ .Values.nodeplugin.profiling.enabled }}"
{{- end }}
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: DRIVER_NAME
value: {{ .Values.driverName }}
- name: NODE_ID
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: CSI_ENDPOINT
value: "unix:///csi/{{ .Values.pluginSocketFile }}"
securityContext:
privileged: true
capabilities:
add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: mountpoint-dir
mountPath: {{ .Values.kubeletDir }}/pods
mountPropagation: Bidirectional
- name: plugin-dir
mountPath: {{ .Values.kubeletDir }}/plugins
mountPropagation: "Bidirectional"
- mountPath: /dev
name: host-dev
- mountPath: /run/mount
name: host-mount
- mountPath: /sys
name: host-sys
- mountPath: /etc/selinux
name: etc-selinux
readOnly: true
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- name: ceph-config
mountPath: /etc/ceph/
- name: ceph-csi-config
mountPath: /etc/ceph-csi-config/
- name: keys-tmp-dir
mountPath: /tmp/csi/keys
resources:
{{ toYaml .Values.nodeplugin.plugin.resources | indent 12 }}
{{- if .Values.nodeplugin.httpMetrics.enabled }}
- name: liveness-prometheus
securityContext:
privileged: true
image: "{{ .Values.nodeplugin.plugin.image.repository }}:{{ .Values.nodeplugin.plugin.image.tag }}"
imagePullPolicy: {{ .Values.nodeplugin.plugin.image.pullPolicy }}
args:
- "--type=liveness"
- "--endpoint=$(CSI_ENDPOINT)"
- "--metricsport={{ .Values.nodeplugin.httpMetrics.containerPort }}"
- "--metricspath=/metrics"
- "--polltime=60s"
- "--timeout=3s"
env:
- name: CSI_ENDPOINT
value: "unix:///csi/{{ .Values.pluginSocketFile }}"
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
volumeMounts:
- name: socket-dir
mountPath: /csi
resources:
{{ toYaml .Values.nodeplugin.plugin.resources | indent 12 }}
{{- end }}
volumes:
- name: socket-dir
hostPath:
path: "{{ .Values.kubeletDir }}/plugins/{{ .Values.driverName }}"
type: DirectoryOrCreate
- name: registration-dir
hostPath:
path: {{ .Values.kubeletDir }}/plugins_registry
type: Directory
- name: mountpoint-dir
hostPath:
path: {{ .Values.kubeletDir }}/pods
type: DirectoryOrCreate
- name: plugin-dir
hostPath:
path: {{ .Values.kubeletDir }}/plugins
type: Directory
- name: host-sys
hostPath:
path: /sys
- name: etc-selinux
hostPath:
path: /etc/selinux
- name: host-mount
hostPath:
path: /run/mount
- name: lib-modules
hostPath:
path: /lib/modules
- name: host-dev
hostPath:
path: /dev
- name: ceph-config
configMap:
name: ceph-config
- name: ceph-csi-config
configMap:
name: {{ .Values.configMapName | quote }}
{{- if .Values.configMapKey }}
items:
- key: {{ .Values.configMapKey | quote }}
path: config.json
{{- end }}
- name: keys-tmp-dir
emptyDir: {
medium: "Memory"
}
{{- if .Values.nodeplugin.affinity }}
affinity:
{{ toYaml .Values.nodeplugin.affinity | indent 8 -}}
{{- end -}}
{{- if .Values.nodeplugin.nodeSelector }}
nodeSelector:
{{ toYaml .Values.nodeplugin.nodeSelector | indent 8 -}}
{{- end -}}
{{- if .Values.nodeplugin.tolerations }}
tolerations:
{{ toYaml .Values.nodeplugin.tolerations | indent 8 -}}
{{- end -}}

View File

@ -0,0 +1,41 @@
{{- if .Values.nodeplugin.httpMetrics.service.enabled -}}
apiVersion: v1
kind: Service
metadata:
{{- if .Values.nodeplugin.httpMetrics.service.annotations }}
annotations:
{{ toYaml .Values.nodeplugin.httpMetrics.service.annotations | indent 4 }}
{{- end }}
name: {{ include "ceph-csi-cephfs.nodeplugin.fullname" . }}-http-metrics
namespace: {{ .Release.Namespace }}
labels:
app: {{ include "ceph-csi-cephfs.fullname" . }}
chart: {{ include "ceph-csi-cephfs.chart" . }}
component: {{ .Values.nodeplugin.name }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
{{- if .Values.nodeplugin.httpMetrics.service.clusterIP }}
clusterIP: "{{ .Values.nodeplugin.httpMetrics.service.clusterIP }}"
{{- end }}
{{- if .Values.nodeplugin.httpMetrics.service.externalIPs }}
externalIPs:
{{ toYaml .Values.nodeplugin.httpMetrics.service.externalIPs | indent 4 }}
{{- end }}
{{- if .Values.nodeplugin.httpMetrics.service.loadBalancerIP }}
loadBalancerIP: "{{ .Values.nodeplugin.httpMetrics.service.loadBalancerIP }}"
{{- end }}
{{- if .Values.nodeplugin.httpMetrics.service.loadBalancerSourceRanges }}
loadBalancerSourceRanges:
{{ toYaml .Values.nodeplugin.httpMetrics.service.loadBalancerSourceRanges | indent 4 }}
{{- end }}
ports:
- name: http-metrics
port: {{ .Values.nodeplugin.httpMetrics.service.servicePort }}
targetPort: {{ .Values.nodeplugin.httpMetrics.containerPort }}
selector:
app: {{ include "ceph-csi-cephfs.name" . }}
component: {{ .Values.nodeplugin.name }}
release: {{ .Release.Name }}
type: "{{ .Values.nodeplugin.httpMetrics.service.type }}"
{{- end -}}

View File

@ -0,0 +1,46 @@
{{- if .Values.nodeplugin.podSecurityPolicy.enabled -}}
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: {{ include "ceph-csi-cephfs.nodeplugin.fullname" . }}
labels:
app: {{ include "ceph-csi-cephfs.fullname" . }}
chart: {{ include "ceph-csi-cephfs.chart" . }}
component: {{ .Values.nodeplugin.name }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
allowPrivilegeEscalation: true
allowedCapabilities:
- 'SYS_ADMIN'
fsGroup:
rule: RunAsAny
privileged: true
hostNetwork: true
hostPID: true
runAsUser:
rule: RunAsAny
seLinux:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
volumes:
- 'configMap'
- 'emptyDir'
- 'projected'
- 'secret'
- 'hostPath'
allowedHostPaths:
- pathPrefix: '/dev'
readOnly: false
- pathPrefix: '/run/mount'
readOnly: false
- pathPrefix: '/sys'
readOnly: false
- pathPrefix: '/etc/selinux'
readOnly: true
- pathPrefix: '/lib/modules'
readOnly: true
- pathPrefix: '{{ .Values.kubeletDir }}'
readOnly: false
{{- end }}

View File

@ -0,0 +1,18 @@
{{- if and .Values.rbac.create .Values.nodeplugin.podSecurityPolicy.enabled -}}
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ include "ceph-csi-cephfs.nodeplugin.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
app: {{ include "ceph-csi-cephfs.fullname" . }}
chart: {{ include "ceph-csi-cephfs.chart" . }}
component: {{ .Values.nodeplugin.name }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
rules:
- apiGroups: ['policy']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames: ['{{ include "ceph-csi-cephfs.nodeplugin.fullname" . }}']
{{- end -}}

View File

@ -0,0 +1,21 @@
{{- if and .Values.rbac.create .Values.nodeplugin.podSecurityPolicy.enabled -}}
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ include "ceph-csi-cephfs.nodeplugin.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
app: {{ include "ceph-csi-cephfs.fullname" . }}
chart: {{ include "ceph-csi-cephfs.chart" . }}
component: {{ .Values.nodeplugin.name }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
subjects:
- kind: ServiceAccount
name: {{ include "ceph-csi-cephfs.serviceAccountName.nodeplugin" . }}
namespace: {{ .Release.Namespace }}
roleRef:
kind: Role
name: {{ include "ceph-csi-cephfs.nodeplugin.fullname" . }}
apiGroup: rbac.authorization.k8s.io
{{- end -}}

View File

@ -0,0 +1,13 @@
{{- if .Values.serviceAccounts.nodeplugin.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "ceph-csi-cephfs.serviceAccountName.nodeplugin" . }}
namespace: {{ .Release.Namespace }}
labels:
app: {{ include "ceph-csi-cephfs.name" . }}
chart: {{ include "ceph-csi-cephfs.chart" . }}
component: {{ .Values.nodeplugin.name }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
{{- end -}}

View File

@ -0,0 +1,63 @@
{{- if .Values.rbac.create -}}
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ include "ceph-csi-cephfs.provisioner.fullname" . }}
labels:
app: {{ include "ceph-csi-cephfs.name" . }}
chart: {{ include "ceph-csi-cephfs.chart" . }}
component: {{ .Values.provisioner.name }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete","patch"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshots"]
verbs: ["get", "list"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents"]
verbs: ["create", "get", "list", "watch", "update", "delete"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents/status"]
verbs: ["update"]
{{- if .Values.provisioner.attacher.enabled }}
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch", "update", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments/status"]
verbs: ["patch"]
{{- end -}}
{{- if semverCompare ">=1.15" .Capabilities.KubeVersion.Version -}}
{{- if .Values.provisioner.resizer.enabled }}
- apiGroups: [""]
resources: ["persistentvolumeclaims/status"]
verbs: ["update", "patch"]
{{- end -}}
{{- end -}}
{{- if .Values.topology.enabled }}
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["csinodes"]
verbs: ["get", "list", "watch"]
{{- end }}
{{- end -}}

View File

@ -0,0 +1,20 @@
{{- if .Values.rbac.create -}}
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ include "ceph-csi-cephfs.provisioner.fullname" . }}
labels:
app: {{ include "ceph-csi-cephfs.name" . }}
chart: {{ include "ceph-csi-cephfs.chart" . }}
component: {{ .Values.provisioner.name }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
subjects:
- kind: ServiceAccount
name: {{ include "ceph-csi-cephfs.serviceAccountName.provisioner" . }}
namespace: {{ .Release.Namespace }}
roleRef:
kind: ClusterRole
name: {{ include "ceph-csi-cephfs.provisioner.fullname" . }}
apiGroup: rbac.authorization.k8s.io
{{- end -}}

View File

@ -0,0 +1,241 @@
kind: Deployment
apiVersion: apps/v1
metadata:
name: {{ include "ceph-csi-cephfs.provisioner.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
app: {{ include "ceph-csi-cephfs.name" . }}
chart: {{ include "ceph-csi-cephfs.chart" . }}
component: {{ .Values.provisioner.name }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
replicas: {{ .Values.provisioner.replicaCount }}
strategy:
type: {{ .Values.provisioner.strategy.type }}
{{- if eq .Values.provisioner.strategy.type "RollingUpdate" }}
rollingUpdate:
maxUnavailable: {{ .Values.provisioner.strategy.rollingUpdate.maxUnavailable }}
{{- end }}
selector:
matchLabels:
app: {{ include "ceph-csi-cephfs.name" . }}
component: {{ .Values.provisioner.name }}
release: {{ .Release.Name }}
template:
metadata:
labels:
app: {{ include "ceph-csi-cephfs.name" . }}
chart: {{ include "ceph-csi-cephfs.chart" . }}
component: {{ .Values.provisioner.name }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
{{- if gt (int .Values.provisioner.replicaCount) 1 }}
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app
operator: In
values:
- {{ include "ceph-csi-cephfs.name" . }}
- key: component
operator: In
values:
- {{ .Values.provisioner.name }}
topologyKey: "kubernetes.io/hostname"
{{- end }}
serviceAccountName: {{ include "ceph-csi-cephfs.serviceAccountName.provisioner" . }}
{{- if .Values.provisioner.priorityClassName }}
priorityClassName: {{ .Values.provisioner.priorityClassName }}
{{- end }}
containers:
- name: csi-provisioner
image: "{{ .Values.provisioner.provisioner.image.repository }}:{{ .Values.provisioner.provisioner.image.tag }}"
imagePullPolicy: {{ .Values.provisioner.provisioner.image.pullPolicy }}
args:
- "--csi-address=$(ADDRESS)"
- "--v={{ .Values.logLevel }}"
- "--timeout={{ .Values.provisioner.timeout }}"
- "--leader-election=true"
- "--retry-interval-start=500ms"
- "--extra-create-metadata=true"
{{- if .Values.topology.enabled }}
- "--feature-gates=Topology=true"
{{- end }}
env:
- name: ADDRESS
value: "unix:///csi/{{ .Values.provisionerSocketFile }}"
volumeMounts:
- name: socket-dir
mountPath: /csi
resources:
{{ toYaml .Values.provisioner.provisioner.resources | indent 12 }}
- name: csi-snapshotter
image: {{ .Values.provisioner.snapshotter.image.repository }}:{{ .Values.provisioner.snapshotter.image.tag }}
imagePullPolicy: {{ .Values.provisioner.snapshotter.image.pullPolicy }}
args:
- "--csi-address=$(ADDRESS)"
- "--v={{ .Values.logLevel }}"
- "--timeout={{ .Values.provisioner.timeout }}"
- "--leader-election=true"
env:
- name: ADDRESS
value: "unix:///csi/{{ .Values.provisionerSocketFile }}"
volumeMounts:
- name: socket-dir
mountPath: /csi
resources:
{{ toYaml .Values.provisioner.snapshotter.resources | indent 12 }}
{{- if .Values.provisioner.attacher.enabled }}
- name: csi-attacher
image: "{{ .Values.provisioner.attacher.image.repository }}:{{ .Values.provisioner.attacher.image.tag }}"
imagePullPolicy: {{ .Values.provisioner.attacher.image.pullPolicy }}
args:
- "--v={{ .Values.logLevel }}"
- "--csi-address=$(ADDRESS)"
- "--leader-election=true"
- "--retry-interval-start=500ms"
env:
- name: ADDRESS
value: "unix:///csi/{{ .Values.provisionerSocketFile }}"
volumeMounts:
- name: socket-dir
mountPath: /csi
resources:
{{ toYaml .Values.nodeplugin.plugin.resources | indent 12 }}
{{- end }}
{{- if semverCompare ">=1.15" .Capabilities.KubeVersion.Version -}}
{{- if .Values.provisioner.resizer.enabled }}
- name: csi-resizer
image: "{{ .Values.provisioner.resizer.image.repository }}:{{ .Values.provisioner.resizer.image.tag }}"
imagePullPolicy: {{ .Values.provisioner.resizer.image.pullPolicy }}
args:
- "--v={{ .Values.logLevel }}"
- "--csi-address=$(ADDRESS)"
- "--timeout={{ .Values.provisioner.timeout }}"
- "--leader-election"
- "--retry-interval-start=500ms"
- "--handle-volume-inuse-error=false"
env:
- name: ADDRESS
value: "unix:///csi/{{ .Values.provisionerSocketFile }}"
volumeMounts:
- name: socket-dir
mountPath: /csi
resources:
{{ toYaml .Values.provisioner.resizer.resources | indent 12 }}
{{- end }}
{{- end }}
- name: csi-cephfsplugin
image: "{{ .Values.nodeplugin.plugin.image.repository }}:{{ .Values.nodeplugin.plugin.image.tag }}"
imagePullPolicy: {{ .Values.nodeplugin.plugin.image.pullPolicy }}
args:
- "--nodeid=$(NODE_ID)"
- "--type=cephfs"
- "--controllerserver=true"
- "--pidlimit=-1"
- "--endpoint=$(CSI_ENDPOINT)"
- "--v={{ .Values.logLevel }}"
- "--drivername=$(DRIVER_NAME)"
{{- if .Values.provisioner.profiling.enabled }}
- "--enableprofiling={{ .Values.provisioner.profiling.enabled }}"
{{- end }}
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: DRIVER_NAME
value: {{ .Values.driverName }}
- name: NODE_ID
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: CSI_ENDPOINT
value: "unix:///csi/{{ .Values.provisionerSocketFile }}"
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: host-sys
mountPath: /sys
- name: lib-modules
mountPath: /lib/modules
readOnly: true
- name: host-dev
mountPath: /dev
- name: ceph-config
mountPath: /etc/ceph/
- name: ceph-csi-config
mountPath: /etc/ceph-csi-config/
- name: keys-tmp-dir
mountPath: /tmp/csi/keys
resources:
{{ toYaml .Values.nodeplugin.plugin.resources | indent 12 }}
{{- if .Values.provisioner.httpMetrics.enabled }}
- name: liveness-prometheus
image: "{{ .Values.nodeplugin.plugin.image.repository }}:{{ .Values.nodeplugin.plugin.image.tag }}"
imagePullPolicy: {{ .Values.nodeplugin.plugin.image.pullPolicy }}
args:
- "--type=liveness"
- "--endpoint=$(CSI_ENDPOINT)"
- "--metricsport={{ .Values.provisioner.httpMetrics.containerPort }}"
- "--metricspath=/metrics"
- "--polltime=60s"
- "--timeout=3s"
env:
- name: CSI_ENDPOINT
value: "unix:///csi/{{ .Values.provisionerSocketFile }}"
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
volumeMounts:
- name: socket-dir
mountPath: /csi
resources:
{{ toYaml .Values.nodeplugin.plugin.resources | indent 12 }}
{{- end }}
volumes:
- name: socket-dir
emptyDir: {
medium: "Memory"
}
- name: host-sys
hostPath:
path: /sys
- name: lib-modules
hostPath:
path: /lib/modules
- name: host-dev
hostPath:
path: /dev
- name: ceph-config
configMap:
name: ceph-config
- name: ceph-csi-config
configMap:
name: {{ .Values.configMapName | quote }}
{{- if .Values.configMapKey }}
items:
- key: {{ .Values.configMapKey | quote }}
path: config.json
{{- end }}
- name: keys-tmp-dir
emptyDir: {
medium: "Memory"
}
{{- if .Values.provisioner.affinity }}
affinity:
{{ toYaml .Values.provisioner.affinity | indent 8 -}}
{{- end -}}
{{- if .Values.provisioner.nodeSelector }}
nodeSelector:
{{ toYaml .Values.provisioner.nodeSelector | indent 8 -}}
{{- end -}}
{{- if .Values.provisioner.tolerations }}
tolerations:
{{ toYaml .Values.provisioner.tolerations | indent 8 -}}
{{- end -}}

View File

@ -0,0 +1,41 @@
{{- if .Values.provisioner.httpMetrics.service.enabled -}}
apiVersion: v1
kind: Service
metadata:
{{- if .Values.provisioner.httpMetrics.service.annotations }}
annotations:
{{ toYaml .Values.provisioner.httpMetrics.service.annotations | indent 4 }}
{{- end }}
name: {{ include "ceph-csi-cephfs.provisioner.fullname" . }}-http-metrics
namespace: {{ .Release.Namespace }}
labels:
app: {{ include "ceph-csi-cephfs.fullname" . }}
chart: {{ include "ceph-csi-cephfs.chart" . }}
component: {{ .Values.provisioner.name }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
{{- if .Values.provisioner.httpMetrics.service.clusterIP }}
clusterIP: "{{ .Values.provisioner.httpMetrics.service.clusterIP }}"
{{- end }}
{{- if .Values.provisioner.httpMetrics.service.externalIPs }}
externalIPs:
{{ toYaml .Values.provisioner.httpMetrics.service.externalIPs | indent 4 }}
{{- end }}
{{- if .Values.provisioner.httpMetrics.service.loadBalancerIP }}
loadBalancerIP: "{{ .Values.provisioner.httpMetrics.service.loadBalancerIP }}"
{{- end }}
{{- if .Values.provisioner.httpMetrics.service.loadBalancerSourceRanges }}
loadBalancerSourceRanges:
{{ toYaml .Values.provisioner.httpMetrics.service.loadBalancerSourceRanges | indent 4 }}
{{- end }}
ports:
- name: http-metrics
port: {{ .Values.provisioner.httpMetrics.service.servicePort }}
targetPort: {{ .Values.provisioner.httpMetrics.containerPort }}
selector:
app: {{ include "ceph-csi-cephfs.name" . }}
component: {{ .Values.provisioner.name }}
release: {{ .Release.Name }}
type: "{{ .Values.provisioner.httpMetrics.service.type }}"
{{- end -}}

View File

@ -0,0 +1,34 @@
{{- if .Values.provisioner.podSecurityPolicy.enabled -}}
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: {{ include "ceph-csi-cephfs.provisioner.fullname" . }}
labels:
app: {{ include "ceph-csi-cephfs.name" . }}
chart: {{ include "ceph-csi-cephfs.chart" . }}
component: {{ .Values.provisioner.name }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
fsGroup:
rule: RunAsAny
runAsUser:
rule: RunAsAny
seLinux:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
volumes:
- 'configMap'
- 'emptyDir'
- 'projected'
- 'secret'
- 'hostPath'
allowedHostPaths:
- pathPrefix: '/dev'
readOnly: false
- pathPrefix: '/sys'
readOnly: false
- pathPrefix: '/lib/modules'
readOnly: true
{{- end }}

View File

@ -0,0 +1,26 @@
{{- if .Values.rbac.create -}}
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ include "ceph-csi-cephfs.provisioner.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
app: {{ include "ceph-csi-cephfs.name" . }}
chart: {{ include "ceph-csi-cephfs.chart" . }}
component: {{ .Values.provisioner.name }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
rules:
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "watch", "list", "delete", "update", "create"]
{{- if .Values.provisioner.podSecurityPolicy.enabled }}
- apiGroups: ['policy']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames: ['{{ include "ceph-csi-cephfs.provisioner.fullname" . }}']
{{- end -}}
{{- end -}}

View File

@ -0,0 +1,21 @@
{{- if .Values.rbac.create -}}
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ include "ceph-csi-cephfs.provisioner.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
app: {{ include "ceph-csi-cephfs.name" . }}
chart: {{ include "ceph-csi-cephfs.chart" . }}
component: {{ .Values.provisioner.name }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
subjects:
- kind: ServiceAccount
name: {{ include "ceph-csi-cephfs.serviceAccountName.provisioner" . }}
namespace: {{ .Release.Namespace }}
roleRef:
kind: Role
name: {{ include "ceph-csi-cephfs.provisioner.fullname" . }}
apiGroup: rbac.authorization.k8s.io
{{- end -}}

View File

@ -0,0 +1,13 @@
{{- if .Values.serviceAccounts.provisioner.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "ceph-csi-cephfs.serviceAccountName.provisioner" . }}
namespace: {{ .Release.Namespace }}
labels:
app: {{ include "ceph-csi-cephfs.name" . }}
chart: {{ include "ceph-csi-cephfs.chart" . }}
component: {{ .Values.provisioner.name }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
{{- end -}}

View File

@ -0,0 +1,15 @@
{{- if .Values.secret.create -}}
apiVersion: v1
kind: Secret
metadata:
name: {{ .Values.secret.name }}
namespace: {{ .Release.Namespace }}
labels:
app: {{ include "ceph-csi-cephfs.name" . }}
chart: {{ include "ceph-csi-cephfs.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
stringData:
adminID: {{ .Values.secret.adminID }}
adminKey: {{ .Values.secret.adminKey }}
{{- end -}}

View File

@ -0,0 +1,61 @@
{{- if .Values.storageClass.create -}}
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: {{ .Values.storageClass.name }}
namespace: {{ .Release.Namespace }}
{{- if .Values.storageClass.annotations }}
annotations:
{{ toYaml .Values.storageClass.annotations | indent 4 }}
{{- end }}
labels:
app: {{ include "ceph-csi-cephfs.name" . }}
chart: {{ include "ceph-csi-cephfs.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
provisioner: {{ .Values.driverName }}
parameters:
clusterID: {{ .Values.storageClass.clusterID }}
fsName: {{ .Values.storageClass.fsName }}
{{- if .Values.storageClass.pool }}
pool: {{ .Values.storageClass.pool }}
{{- end }}
{{- if .Values.storageClass.fuseMountOptions }}
fuseMountOptions: "{{ .Values.storageClass.fuseMountOptions }}"
{{- end }}
{{- if .Values.storageClass.kernelMountOptions }}
kernelMountOptions: "{{ .Values.storageClass.kernelMountOptions }}"
{{- end }}
{{- if .Values.storageClass.mounter }}
mounter: "{{ .Values.storageClass.mounter }}"
{{- end }}
{{- if .Values.storageClass.volumeNamePrefix }}
volumeNamePrefix: "{{ .Values.storageClass.volumeNamePrefix }}"
{{- end }}
csi.storage.k8s.io/provisioner-secret-name: {{ .Values.storageClass.provisionerSecret }}
{{- if .Values.storageClass.provisionerSecretNamespace }}
csi.storage.k8s.io/provisioner-secret-namespace: {{ .Values.storageClass.provisionerSecretNamespace }}
{{ else }}
csi.storage.k8s.io/provisioner-secret-namespace: {{ .Release.Namespace }}
{{- end }}
csi.storage.k8s.io/controller-expand-secret-name: {{ .Values.storageClass.controllerExpandSecret }}
{{- if .Values.storageClass.controllerExpandSecretNamespace }}
csi.storage.k8s.io/controller-expand-secret-namespace: {{ .Values.storageClass.controllerExpandSecretNamespace }}
{{ else }}
csi.storage.k8s.io/controller-expand-secret-namespace: {{ .Release.Namespace }}
{{- end }}
csi.storage.k8s.io/node-stage-secret-name: {{ .Values.storageClass.nodeStageSecret }}
{{- if .Values.storageClass.nodeStageSecretNamespace }}
csi.storage.k8s.io/node-stage-secret-namespace: {{ .Values.storageClass.nodeStageSecretNamespace }}
{{ else }}
csi.storage.k8s.io/node-stage-secret-namespace: {{ .Release.Namespace }}
{{- end }}
reclaimPolicy: {{ .Values.storageClass.reclaimPolicy }}
allowVolumeExpansion: {{ .Values.storageClass.allowVolumeExpansion }}
{{- if .Values.storageClass.mountOptions }}
mountOptions:
{{- range .Values.storageClass.mountOptions }}
- {{ . }}
{{- end }}
{{- end }}
{{- end -}}

View File

@ -0,0 +1,321 @@
---
rbac:
# Specifies whether RBAC resources should be created
create: true
serviceAccounts:
nodeplugin:
# Specifies whether a ServiceAccount should be created
create: true
# The name of the ServiceAccount to use.
# If not set and create is true, a name is generated using the fullname
name:
provisioner:
# Specifies whether a ServiceAccount should be created
create: true
# The name of the ServiceAccount to use.
# If not set and create is true, a name is generated using the fullname
name:
# Configuration for the CSI to connect to the cluster
# Ref: https://github.com/ceph/ceph-csi/blob/devel/examples/README.md
# Example:
# csiConfig:
# - clusterID: "<cluster-id>"
# monitors:
# - "<MONValue1>"
# - "<MONValue2>"
# cephFS:
# subvolumeGroup: "csi"
csiConfig: []
# Set logging level for csi containers.
# Supported values from 0 to 5. 0 for general useful logs,
# 5 for trace level verbosity.
logLevel: 5
nodeplugin:
name: nodeplugin
# if you are using ceph-fuse client set this value to OnDelete
updateStrategy: RollingUpdate
# set user created priorityclassName for csi plugin pods. default is
# system-node-critical which is highest priority
priorityClassName: system-node-critical
httpMetrics:
# Metrics only available for cephcsi/cephcsi => 1.2.0
# Specifies whether http metrics should be exposed
enabled: true
# The port of the container to expose the metrics
containerPort: 8081
service:
# Specifies whether a service should be created for the metrics
enabled: true
# The port to use for the service
servicePort: 8080
type: ClusterIP
# Annotations for the service
# Example:
# annotations:
# prometheus.io/scrape: "true"
# prometheus.io/port: "9080"
annotations: {}
clusterIP: ""
## List of IP addresses at which the stats-exporter service is available
## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
##
externalIPs: []
loadBalancerIP: ""
loadBalancerSourceRanges: []
profiling:
enabled: false
registrar:
image:
repository: k8s.gcr.io/sig-storage/csi-node-driver-registrar
tag: v2.3.0
pullPolicy: IfNotPresent
resources: {}
plugin:
image:
repository: quay.io/cephcsi/cephcsi
tag: canary
pullPolicy: IfNotPresent
resources: {}
nodeSelector: {}
tolerations: []
affinity: {}
# Set to true to enable Ceph Kernel clients
# on kernel < 4.17 which support quotas
# forcecephkernelclient: true
# If true, create & use Pod Security Policy resources
# https://kubernetes.io/docs/concepts/policy/pod-security-policy/
podSecurityPolicy:
enabled: false
provisioner:
name: provisioner
replicaCount: 3
strategy:
# RollingUpdate strategy replaces old pods with new ones gradually,
# without incurring downtime.
type: RollingUpdate
rollingUpdate:
# maxUnavailable is the maximum number of pods that can be
# unavailable during the update process.
maxUnavailable: 50%
# Timeout for waiting for creation or deletion of a volume
timeout: 60s
# set user created priorityclassName for csi provisioner pods. default is
# system-cluster-critical which is less priority than system-node-critical
priorityClassName: system-cluster-critical
httpMetrics:
# Metrics only available for cephcsi/cephcsi => 1.2.0
# Specifies whether http metrics should be exposed
enabled: true
# The port of the container to expose the metrics
containerPort: 8081
service:
# Specifies whether a service should be created for the metrics
enabled: true
# The port to use for the service
servicePort: 8080
type: ClusterIP
# Annotations for the service
# Example:
# annotations:
# prometheus.io/scrape: "true"
# prometheus.io/port: "9080"
annotations: {}
clusterIP: ""
## List of IP addresses at which the stats-exporter service is available
## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
##
externalIPs: []
loadBalancerIP: ""
loadBalancerSourceRanges: []
profiling:
enabled: false
provisioner:
image:
repository: k8s.gcr.io/sig-storage/csi-provisioner
tag: v3.0.0
pullPolicy: IfNotPresent
resources: {}
attacher:
name: attacher
enabled: true
image:
repository: k8s.gcr.io/sig-storage/csi-attacher
tag: v3.3.0
pullPolicy: IfNotPresent
resources: {}
resizer:
name: resizer
enabled: true
image:
repository: k8s.gcr.io/sig-storage/csi-resizer
tag: v1.3.0
pullPolicy: IfNotPresent
resources: {}
snapshotter:
image:
repository: k8s.gcr.io/sig-storage/csi-snapshotter
tag: v4.2.0
pullPolicy: IfNotPresent
resources: {}
nodeSelector: {}
tolerations: []
affinity: {}
# If true, create & use Pod Security Policy resources
# https://kubernetes.io/docs/concepts/policy/pod-security-policy/
podSecurityPolicy:
enabled: false
topology:
# Specifies whether topology based provisioning support should
# be exposed by CSI
enabled: false
# domainLabels define which node labels to use as domains
# for CSI nodeplugins to advertise their domains
# NOTE: the value here serves as an example and needs to be
# updated with node labels that define domains of interest
domainLabels:
- failure-domain/region
- failure-domain/zone
storageClass:
# Specifies whether the Storage class should be created
create: false
name: csi-cephfs-sc
# Annotations for the storage class
# Example:
# annotations:
# storageclass.kubernetes.io/is-default-class: "true"
annotations: {}
# String representing a Ceph cluster to provision storage from.
# Should be unique across all Ceph clusters in use for provisioning,
# cannot be greater than 36 bytes in length, and should remain immutable for
# the lifetime of the StorageClass in use.
clusterID: <cluster-ID>
# (required) CephFS filesystem name into which the volume shall be created
# eg: fsName: myfs
fsName: myfs
# (optional) Ceph pool into which volume data shall be stored
# pool: <cephfs-data-pool>
# For eg:
# pool: "replicapool"
pool: ""
# (optional) Comma separated string of Ceph-fuse mount options.
# For eg:
# fuseMountOptions: debug
fuseMountOptions: ""
# (optional) Comma separated string of Cephfs kernel mount options.
# Check man mount.ceph for mount options. For eg:
# kernelMountOptions: readdir_max_bytes=1048576,norbytes
kernelMountOptions: ""
# (optional) The driver can use either ceph-fuse (fuse) or
# ceph kernelclient (kernel).
# If omitted, default volume mounter will be used - this is
# determined by probing for ceph-fuse and mount.ceph
# mounter: kernel
mounter: ""
# (optional) Prefix to use for naming subvolumes.
# If omitted, defaults to "csi-vol-".
# volumeNamePrefix: "foo-bar-"
volumeNamePrefix: ""
# The secrets have to contain user and/or Ceph admin credentials.
provisionerSecret: csi-cephfs-secret
# If the Namespaces are not specified, the secrets are assumed to
# be in the Release namespace.
provisionerSecretNamespace: ""
controllerExpandSecret: csi-cephfs-secret
controllerExpandSecretNamespace: ""
nodeStageSecret: csi-cephfs-secret
nodeStageSecretNamespace: ""
reclaimPolicy: Delete
allowVolumeExpansion: true
mountOptions: []
# Mount Options
# Example:
# mountOptions:
# - discard
secret:
# Specifies whether the secret should be created
create: false
name: csi-cephfs-secret
# Key values correspond to a user name and its key, as defined in the
# ceph cluster. User ID should have required access to the 'pool'
# specified in the storage class
adminID: <plaintext ID>
adminKey: <Ceph auth key corresponding to ID above>
# This is a sample configmap that helps define a Ceph configuration as required
# by the CSI plugins.
# Sample ceph.conf available at
# https://github.com/ceph/ceph/blob/master/src/sample.ceph.conf Detailed
# documentation is available at
# https://docs.ceph.com/en/latest/rados/configuration/ceph-conf/
cephconf: |
[global]
auth_cluster_required = cephx
auth_service_required = cephx
auth_client_required = cephx
# Workaround for http://tracker.ceph.com/issues/23446
fuse_set_user_groups = false
# ceph-fuse which uses libfuse2 by default has write buffer size of 2KiB
# adding 'fuse_big_writes = true' option by default to override this limit
# see https://github.com/ceph/ceph-csi/issues/1928
fuse_big_writes = true
#########################################################
# Variables for 'internal' use please use with caution! #
#########################################################
# The filename of the provisioner socket
provisionerSocketFile: csi-provisioner.sock
# The filename of the plugin socket
pluginSocketFile: csi.sock
# kubelet working directory,can be set using `--root-dir` when starting kubelet.
kubeletDir: /var/lib/kubelet
# Name of the csi-driver
driverName: cephfs.csi.ceph.com
# Name of the configmap used for state
configMapName: ceph-csi-config
# Key to use in the Configmap if not config.json
# configMapKey:
# Use an externally provided configmap
externallyManagedConfigmap: false

View File

@ -0,0 +1,21 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj

View File

@ -0,0 +1,15 @@
---
apiVersion: v1
appVersion: canary
description: "Container Storage Interface (CSI) driver,
provisioner, snapshotter, and attacher for Ceph RBD"
name: ceph-csi-rbd
version: 3-canary
keywords:
- ceph
- rbd
- ceph-csi
home: https://github.com/ceph/ceph-csi
sources:
- https://github.com/ceph/ceph-csi/tree/devel/charts/ceph-csi-rbd
icon: https://raw.githubusercontent.com/ceph/ceph-csi/devel/assets/ceph-logo.png

View File

@ -0,0 +1,188 @@
# ceph-csi-rbd
The ceph-csi-rbd chart adds rbd volume support to your cluster.
## Install from release repo
Add chart repository to install helm charts from it
```console
helm repo add ceph-csi https://ceph.github.io/csi-charts
```
## Install from local Chart
we need to enter into the directory where all charts are present
```console
cd charts
```
**Note:** charts directory is present in root of the ceph-csi project
### Install chart
To install the Chart into your Kubernetes cluster
- For helm 2.x
```bash
helm install --namespace "ceph-csi-rbd" --name "ceph-csi-rbd" ceph-csi/ceph-csi-rbd
```
- For helm 3.x
Create the namespace where Helm should install the components with
```bash
kubectl create namespace "ceph-csi-rbd"
```
Run the installation
```bash
helm install --namespace "ceph-csi-rbd" "ceph-csi-rbd" ceph-csi/ceph-csi-rbd
```
After installation succeeds, you can get a status of Chart
```bash
helm status "ceph-csi-rbd"
```
### Delete Chart
If you want to delete your Chart, use this command
- For helm 2.x
```bash
helm delete --purge "ceph-csi-rbd"
```
- For helm 3.x
```bash
helm uninstall "ceph-csi-rbd" --namespace "ceph-csi-rbd"
```
If you want to delete the namespace, use this command
```bash
kubectl delete namespace ceph-csi-rbd
```
### Configuration
The following table lists the configurable parameters of the ceph-csi-cephfs
charts and their default values.
| Parameter | Description | Default |
| ---------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------- |
| `rbac.create` | Specifies whether RBAC resources should be created | `true` |
| `serviceAccounts.nodeplugin.create` | Specifies whether a nodeplugin ServiceAccount should be created | `true` |
| `serviceAccounts.nodeplugin.name` | The name of the nodeplugin ServiceAccount to use. If not set and create is true, a name is generated using the fullname | "" |
| `serviceAccounts.provisioner.create` | Specifies whether a provisioner ServiceAccount should be created | `true` |
| `serviceAccounts.provisioner.name` | The name of the provisioner ServiceAccount to use. If not set and create is true, a name is generated using the fullname | "" |
| `csiConfig` | Configuration for the CSI to connect to the cluster | [] |
| `csiMapping` | Configuration details of clusterID,PoolID,FscID mapping | [] |
| `encryptionKMSConfig` | Configuration for the encryption KMS | `{}` |
| `logLevel` | Set logging level for csi containers. Supported values from 0 to 5. 0 for general useful logs, 5 for trace level verbosity. | `5` |
| `nodeplugin.name` | Specifies the nodeplugins name | `nodeplugin` |
| `nodeplugin.updateStrategy` | Specifies the update Strategy. If you are using ceph-fuse client set this value to OnDelete | `RollingUpdate` |
| `nodeplugin.priorityClassName` | Set user created priorityclassName for csi plugin pods. default is system-node-critical which is highest priority | `system-node-critical` |
| `nodeplugin.profiling.enabled` | Specifies whether profiling should be enabled | `false` |
| `nodeplugin.registrar.image.repository` | Node Registrar image repository URL | `k8s.gcr.io/sig-storage/csi-node-driver-registrar` |
| `nodeplugin.registrar.image.tag` | Image tag | `v2.2.0` |
| `nodeplugin.registrar.image.pullPolicy` | Image pull policy | `IfNotPresent` |
| `nodeplugin.plugin.image.repository` | Nodeplugin image repository URL | `quay.io/cephcsi/cephcsi` |
| `nodeplugin.plugin.image.tag` | Image tag | `canary` |
| `nodeplugin.plugin.image.pullPolicy` | Image pull policy | `IfNotPresent` |
| `nodeplugin.nodeSelector` | Kubernetes `nodeSelector` to add to the Daemonset | `{}` |
| `nodeplugin.tolerations` | List of Kubernetes `tolerations` to add to the Daemonset | `{}` |
| `nodeplugin.podSecurityPolicy.enabled` | If true, create & use [Pod Security Policy resources](https://kubernetes.io/docs/concepts/policy/pod-security-policy/). | `false` |
| `provisioner.name` | Specifies the name of provisioner | `provisioner` |
| `provisioner.replicaCount` | Specifies the replicaCount | `3` |
| `provisioner.defaultFSType` | Specifies the default Fstype | `ext4` |
| `provisioner.deployController` | It enables or disables the deployment of controller which generates the OMAP data if it is not present | `true` |
| `provisioner.hardMaxCloneDepth` | Hard limit for maximum number of nested volume clones that are taken before a flatten occurs | `8` |
| `provisioner.softMaxCloneDepth` | Soft limit for maximum number of nested volume clones that are taken before a flatten occurs | `4` |
| `provisioner.maxSnapshotsOnImage` | Maximum number of snapshots allowed on rbd image without flattening | `450` |
| `provisioner.minSnapshotsOnImage` | Minimum number of snapshots allowed on rbd image to trigger flattening | `250` |
| `provisioner.skipForceFlatten` | Skip image flattening if kernel support mapping of rbd images which has the deep-flatten feature | `false` |
| `provisioner.timeout` | GRPC timeout for waiting for creation or deletion of a volume | `60s` |
| `provisioner.priorityClassName` | Set user created priorityclassName for csi provisioner pods. Default is `system-cluster-critical` which is less priority than `system-node-critical` | `system-cluster-critical` |
| `provisioner.profiling.enabled` | Specifies whether profiling should be enabled | `false` |
| `provisioner.provisioner.image.repository` | Specifies the csi-provisioner image repository URL | `k8s.gcr.io/sig-storage/csi-provisioner` |
| `provisioner.provisioner.image.tag` | Specifies image tag | `v2.2.2` |
| `provisioner.provisioner.image.pullPolicy` | Specifies pull policy | `IfNotPresent` |
| `provisioner.attacher.image.repository` | Specifies the csi-attacher image repository URL | `k8s.gcr.io/sig-storage/csi-attacher` |
| `provisioner.attacher.image.tag` | Specifies image tag | `v3.2.1` |
| `provisioner.attacher.image.pullPolicy` | Specifies pull policy | `IfNotPresent` |
| `provisioner.attacher.name` | Specifies the name of csi-attacher sidecar | `attacher` |
| `provisioner.attacher.enabled` | Specifies whether attacher sidecar is enabled | `true` |
| `provisioner.resizer.image.repository` | Specifies the csi-resizer image repository URL | `k8s.gcr.io/sig-storage/csi-resizer` |
| `provisioner.resizer.image.tag` | Specifies image tag | `v1.2.0` |
| `provisioner.resizer.image.pullPolicy` | Specifies pull policy | `IfNotPresent` |
| `provisioner.resizer.name` | Specifies the name of csi-resizer sidecar | `resizer` |
| `provisioner.resizer.enabled` | Specifies whether resizer sidecar is enabled | `true` |
| `provisioner.snapshotter.image.repository` | Specifies the csi-snapshotter image repository URL | `k8s.gcr.io/sig-storage/csi-snapshotter` |
| `provisioner.snapshotter.image.tag` | Specifies image tag | `v4.1.1` |
| `provisioner.snapshotter.image.pullPolicy` | Specifies pull policy | `IfNotPresent` |
| `provisioner.nodeSelector` | Specifies the node selector for provisioner deployment | `{}` |
| `provisioner.tolerations` | Specifies the tolerations for provisioner deployment | `{}` |
| `provisioner.affinity` | Specifies the affinity for provisioner deployment | `{}` |
| `provisioner.podSecurityPolicy.enabled` | Specifies whether podSecurityPolicy is enabled | `false` |
| `topology.enabled` | Specifies whether topology based provisioning support should be exposed by CSI | `false` |
| `topology.domainLabels` | DomainLabels define which node labels to use as domains for CSI nodeplugins to advertise their domains | `{}` |
| `provisionerSocketFile` | The filename of the provisioner socket | `csi-provisioner.sock` |
| `pluginSocketFile` | The filename of the plugin socket | `csi.sock` |
| `kubeletDir` | kubelet working directory | `/var/lib/kubelet` |
| `cephLogDirHostPath` | Host path location for ceph client processes logging, ex: rbd-nbd | `/var/log/ceph` |
| `driverName` | Name of the csi-driver | `rbd.csi.ceph.com` |
| `configMapName` | Name of the configmap which contains cluster configuration | `ceph-csi-config` |
| `externallyManagedConfigmap` | Specifies the use of an externally provided configmap | `false` |
| `kmsConfigMapName` | Name of the configmap used for encryption kms configuration | `ceph-csi-encryption-kms-config` |
| `storageClass.create` | Specifies whether the StorageClass should be created | `false` |
| `storageClass.name` | Specifies the rbd StorageClass name | `csi-rbd-sc` |
| `storageClass.annotations` | Specifies the annotations for the rbd StorageClass | `[]` |
| `storageClass.clusterID` | String representing a Ceph cluster to provision storage from | `<cluster-ID>` |
| `storageClass.dataPool` | Specifies the erasure coded pool | `""` |
| `storageClass.pool` | Ceph pool into which the RBD image shall be created | `replicapool` |
| `storageClass.thickProvision` | Specifies whether thick provision should be enabled | `false` |
| `storageclass.imageFeatures` | Specifies RBD image features | `layering` |
| `storageclass.tryOtherMounters` | Specifies whether to try other mounters in case if the current mounter fails to mount the rbd image for any reason | `false` |
| `storageClass.mounter` | Specifies RBD mounter | `""` |
| `storageClass.cephLogDir` | ceph client log location, it is the target bindmount path used inside container | `"/var/log/ceph"` |
| `storageClass.cephLogStrategy` | ceph client log strategy, available options `remove` or `compress` or `preserve` | `"remove"` |
| `storageClass.volumeNamePrefix` | Prefix to use for naming RBD images | `""` |
| `storageClass.encrypted` | Specifies whether volume should be encrypted. Set it to true if you want to enable encryption | `""` |
| `storageClass.encryptionKMSID` | Specifies the encryption kms id | `""` |
| `storageClass.topologyConstrainedPools` | Add topology constrained pools configuration, if topology based pools are setup, and topology constrained provisioning is required | `[]` |
| `storageClass.mapOptions` | Specifies comma-separated list of map options | `""` |
| `storageClass.unmapOtpions` | Specifies comma-separated list of unmap options | `""` |
| `storageClass.provisionerSecret` | The secrets have to contain user and/or Ceph admin credentials. | `csi-rbd-secret` |
| `storageClass.provisionerSecretNamespace` | Specifies the provisioner secret namespace | `""` |
| `storageClass.controllerExpandSecret` | Specifies the controller expand secret name | `csi-rbd-secret` |
| `storageClass.controllerExpandSecretNamespace` | Specifies the controller expand secret namespace | `""` |
| `storageClass.nodeStageSecret` | Specifies the node stage secret name | `csi-rbd-secret` |
| `storageClass.nodeStageSecretNamespace` | Specifies the node stage secret namespace | `""` |
| `storageClass.fstype` | Specify the filesystem type of the volume | `ext4` |
| `storageClass.reclaimPolicy` | Specifies the reclaim policy of the StorageClass | `Delete` |
| `storageClass.allowVolumeExpansion` | Specifies whether volume expansion should be allowed | `true` |
| `storageClass.mountOptions` | Specifies the mount options for storageClass | `[]` |
| `secret.create` | Specifies whether the secret should be created | `false` |
| `secret.name` | Specifies the rbd secret name | `csi-rbd-secret` |
| `secret.userID` | Specifies the user ID of the rbd secret | `<plaintext ID>` |
| `secret.userKey` | Specifies the key that corresponds to the userID | `<Ceph auth key corresponding to ID above>` |
| `secret.encryptionPassphrase` | Specifies the encryption passphrase of the secret | `test_passphrase` |
### Command Line
You can pass the settings with helm command line parameters.
Specify each parameter using the --set key=value argument to helm install.
For Example:
```bash
helm install --set configMapName=ceph-csi-config --set provisioner.podSecurityPolicy.enabled=true
```

View File

@ -0,0 +1,2 @@
Examples on how to configure a storage class and start using the driver are here:
https://github.com/ceph/ceph-csi/tree/devel/examples/rbd

View File

@ -0,0 +1,90 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "ceph-csi-rbd.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "ceph-csi-rbd.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "ceph-csi-rbd.nodeplugin.fullname" -}}
{{- if .Values.nodeplugin.fullnameOverride -}}
{{- .Values.nodeplugin.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- printf "%s-%s" .Release.Name .Values.nodeplugin.name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s-%s" .Release.Name $name .Values.nodeplugin.name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "ceph-csi-rbd.provisioner.fullname" -}}
{{- if .Values.provisioner.fullnameOverride -}}
{{- .Values.provisioner.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- printf "%s-%s" .Release.Name .Values.provisioner.name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s-%s" .Release.Name $name .Values.provisioner.name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "ceph-csi-rbd.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create the name of the service account to use
*/}}
{{- define "ceph-csi-rbd.serviceAccountName.nodeplugin" -}}
{{- if .Values.serviceAccounts.nodeplugin.create -}}
{{ default (include "ceph-csi-rbd.nodeplugin.fullname" .) .Values.serviceAccounts.nodeplugin.name }}
{{- else -}}
{{ default "default" .Values.serviceAccounts.nodeplugin.name }}
{{- end -}}
{{- end -}}
{{/*
Create the name of the service account to use
*/}}
{{- define "ceph-csi-rbd.serviceAccountName.provisioner" -}}
{{- if .Values.serviceAccounts.provisioner.create -}}
{{ default (include "ceph-csi-rbd.provisioner.fullname" .) .Values.serviceAccounts.provisioner.name }}
{{- else -}}
{{ default "default" .Values.serviceAccounts.provisioner.name }}
{{- end -}}
{{- end -}}

View File

@ -0,0 +1,15 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: ceph-rbd-config
namespace: {{ .Release.Namespace }}
labels:
app: {{ include "ceph-csi-rbd.name" . }}
chart: {{ include "ceph-csi-rbd.chart" . }}
component: {{ .Values.nodeplugin.name }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
data:
ceph.conf: |
{{ tpl .Values.cephconf . | indent 4 }}
keyring: ""

View File

@ -0,0 +1,11 @@
{{ if semverCompare ">=1.18.0-beta.1" .Capabilities.KubeVersion.Version }}
apiVersion: storage.k8s.io/v1
{{ else }}
apiVersion: storage.k8s.io/v1beta1
{{ end }}
kind: CSIDriver
metadata:
name: {{ .Values.driverName }}
spec:
attachRequired: true
podInfoOnMount: false

View File

@ -0,0 +1,18 @@
{{- if not .Values.externallyManagedConfigmap }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ .Values.configMapName | quote }}
namespace: {{ .Release.Namespace }}
labels:
app: {{ include "ceph-csi-rbd.name" . }}
chart: {{ include "ceph-csi-rbd.chart" . }}
component: {{ .Values.nodeplugin.name }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
data:
config.json: |-
{{ toJson .Values.csiConfig | indent 4 }}
cluster-mapping.json: |-
{{ toJson .Values.csiMapping | indent 4 }}
{{- end }}

View File

@ -0,0 +1,14 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ .Values.kmsConfigMapName | quote }}
namespace: {{ .Release.Namespace }}
labels:
app: {{ include "ceph-csi-rbd.name" . }}
chart: {{ include "ceph-csi-rbd.chart" . }}
component: {{ .Values.nodeplugin.name }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
data:
config.json: |-
{{ toJson .Values.encryptionKMSConfig | indent 4 -}}

View File

@ -0,0 +1,34 @@
{{- if .Values.rbac.create -}}
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ include "ceph-csi-rbd.nodeplugin.fullname" . }}
labels:
app: {{ include "ceph-csi-rbd.name" . }}
chart: {{ include "ceph-csi-rbd.chart" . }}
component: {{ .Values.nodeplugin.name }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
rules:
{{- if .Values.topology.enabled }}
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get"]
{{- end }}
# allow to read Vault Token and connection options from the Tenants namespace
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get"]
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get"]
- apiGroups: [""]
resources: ["serviceaccounts"]
verbs: ["get"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["list", "get"]
{{- end -}}

View File

@ -0,0 +1,20 @@
{{- if .Values.rbac.create -}}
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ include "ceph-csi-rbd.nodeplugin.fullname" . }}
labels:
app: {{ include "ceph-csi-rbd.name" . }}
chart: {{ include "ceph-csi-rbd.chart" . }}
component: {{ .Values.nodeplugin.name }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
subjects:
- kind: ServiceAccount
name: {{ include "ceph-csi-rbd.serviceAccountName.nodeplugin" . }}
namespace: {{ .Release.Namespace }}
roleRef:
kind: ClusterRole
name: {{ include "ceph-csi-rbd.nodeplugin.fullname" . }}
apiGroup: rbac.authorization.k8s.io
{{- end -}}

View File

@ -0,0 +1,227 @@
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: {{ include "ceph-csi-rbd.nodeplugin.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
app: {{ include "ceph-csi-rbd.name" . }}
chart: {{ include "ceph-csi-rbd.chart" . }}
component: {{ .Values.nodeplugin.name }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
selector:
matchLabels:
app: {{ include "ceph-csi-rbd.name" . }}
component: {{ .Values.nodeplugin.name }}
release: {{ .Release.Name }}
updateStrategy:
type: {{ .Values.nodeplugin.updateStrategy }}
template:
metadata:
labels:
app: {{ include "ceph-csi-rbd.name" . }}
chart: {{ include "ceph-csi-rbd.chart" . }}
component: {{ .Values.nodeplugin.name }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
serviceAccountName: {{ include "ceph-csi-rbd.serviceAccountName.nodeplugin" . }}
hostNetwork: true
hostPID: true
{{- if .Values.nodeplugin.priorityClassName }}
priorityClassName: {{ .Values.nodeplugin.priorityClassName }}
{{- end }}
# to use e.g. Rook orchestrated cluster, and mons' FQDN is
# resolved through k8s service, set dns policy to cluster first
dnsPolicy: ClusterFirstWithHostNet
containers:
- name: driver-registrar
# This is necessary only for systems with SELinux, where
# non-privileged sidecar containers cannot access unix domain socket
# created by privileged CSI driver container.
securityContext:
privileged: true
image: "{{ .Values.nodeplugin.registrar.image.repository }}:{{ .Values.nodeplugin.registrar.image.tag }}"
imagePullPolicy: {{ .Values.nodeplugin.registrar.image.pullPolicy }}
args:
- "--v={{ .Values.logLevel }}"
- "--csi-address=/csi/{{ .Values.pluginSocketFile }}"
- "--kubelet-registration-path={{ .Values.kubeletDir }}/plugins/{{ .Values.driverName }}/{{ .Values.pluginSocketFile }}"
env:
- name: KUBE_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: registration-dir
mountPath: /registration
resources:
{{ toYaml .Values.nodeplugin.registrar.resources | indent 12 }}
- name: csi-rbdplugin
image: "{{ .Values.nodeplugin.plugin.image.repository }}:{{ .Values.nodeplugin.plugin.image.tag }}"
imagePullPolicy: {{ .Values.nodeplugin.plugin.image.pullPolicy }}
args:
- "--nodeid=$(NODE_ID)"
- "--pluginpath={{ .Values.kubeletDir }}/plugins"
- "--stagingpath={{ .Values.kubeletDir }}/plugins/kubernetes.io/csi/pv/"
- "--type=rbd"
- "--nodeserver=true"
- "--pidlimit=-1"
- "--endpoint=$(CSI_ENDPOINT)"
- "--csi-addons-endpoint=$(CSI_ADDONS_ENDPOINT)"
- "--v={{ .Values.logLevel }}"
- "--drivername=$(DRIVER_NAME)"
{{- if .Values.topology.enabled }}
- "--domainlabels={{ .Values.topology.domainLabels | join "," }}"
{{- end }}
{{- if .Values.nodeplugin.profiling.enabled }}
- "--enableprofiling={{ .Values.nodeplugin.profiling.enabled }}"
{{- end }}
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: DRIVER_NAME
value: {{ .Values.driverName }}
- name: NODE_ID
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: CSI_ENDPOINT
value: "unix:///csi/{{ .Values.pluginSocketFile }}"
- name: CSI_ADDONS_ENDPOINT
value: "unix:///csi/csi-addons.sock"
securityContext:
privileged: true
capabilities:
add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true
volumeMounts:
- name: socket-dir
mountPath: /csi
- mountPath: /dev
name: host-dev
- mountPath: /run/mount
name: host-mount
- mountPath: /sys
name: host-sys
- mountPath: /etc/selinux
name: etc-selinux
readOnly: true
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- name: ceph-csi-config
mountPath: /etc/ceph-csi-config/
- name: ceph-config
mountPath: /etc/ceph/
- name: ceph-csi-encryption-kms-config
mountPath: /etc/ceph-csi-encryption-kms-config/
- name: plugin-dir
mountPath: {{ .Values.kubeletDir }}/plugins
mountPropagation: "Bidirectional"
- name: mountpoint-dir
mountPath: {{ .Values.kubeletDir }}/pods
mountPropagation: "Bidirectional"
- name: keys-tmp-dir
mountPath: /tmp/csi/keys
- name: ceph-logdir
mountPath: /var/log/ceph
resources:
{{ toYaml .Values.nodeplugin.plugin.resources | indent 12 }}
{{- if .Values.nodeplugin.httpMetrics.enabled }}
- name: liveness-prometheus
securityContext:
privileged: true
image: "{{ .Values.nodeplugin.plugin.image.repository }}:{{ .Values.nodeplugin.plugin.image.tag }}"
imagePullPolicy: {{ .Values.nodeplugin.plugin.image.pullPolicy }}
args:
- "--type=liveness"
- "--endpoint=$(CSI_ENDPOINT)"
- "--metricsport={{ .Values.nodeplugin.httpMetrics.containerPort }}"
- "--metricspath=/metrics"
- "--polltime=60s"
- "--timeout=3s"
env:
- name: CSI_ENDPOINT
value: "unix:///csi/{{ .Values.pluginSocketFile }}"
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
volumeMounts:
- name: socket-dir
mountPath: /csi
resources:
{{ toYaml .Values.nodeplugin.plugin.resources | indent 12 }}
{{- end }}
volumes:
- name: socket-dir
hostPath:
path: "{{ .Values.kubeletDir }}/plugins/{{ .Values.driverName }}"
type: DirectoryOrCreate
- name: registration-dir
hostPath:
path: {{ .Values.kubeletDir }}/plugins_registry
type: Directory
- name: plugin-dir
hostPath:
path: {{ .Values.kubeletDir }}/plugins
type: Directory
- name: mountpoint-dir
hostPath:
path: {{ .Values.kubeletDir }}/pods
type: DirectoryOrCreate
- name: ceph-logdir
hostPath:
path: {{ .Values.cephLogDirHostPath }}
type: DirectoryOrCreate
- name: host-dev
hostPath:
path: /dev
- name: host-mount
hostPath:
path: /run/mount
- name: host-sys
hostPath:
path: /sys
- name: etc-selinux
hostPath:
path: /etc/selinux
- name: lib-modules
hostPath:
path: /lib/modules
- name: ceph-config
configMap:
name: ceph-rbd-config
- name: ceph-csi-config
configMap:
name: {{ .Values.configMapName | quote }}
{{- if .Values.configMapKey }}
items:
- key: {{ .Values.configMapKey | quote }}
path: config.json
{{- end }}
- name: ceph-csi-encryption-kms-config
configMap:
name: {{ .Values.kmsConfigMapName | quote }}
- name: keys-tmp-dir
emptyDir: {
medium: "Memory"
}
{{- if .Values.nodeplugin.affinity }}
affinity:
{{ toYaml .Values.nodeplugin.affinity | indent 8 -}}
{{- end -}}
{{- if .Values.nodeplugin.nodeSelector }}
nodeSelector:
{{ toYaml .Values.nodeplugin.nodeSelector | indent 8 -}}
{{- end -}}
{{- if .Values.nodeplugin.tolerations }}
tolerations:
{{ toYaml .Values.nodeplugin.tolerations | indent 8 -}}
{{- end -}}

View File

@ -0,0 +1,41 @@
{{- if .Values.nodeplugin.httpMetrics.service.enabled -}}
apiVersion: v1
kind: Service
metadata:
{{- if .Values.nodeplugin.httpMetrics.service.annotations }}
annotations:
{{ toYaml .Values.nodeplugin.httpMetrics.service.annotations | indent 4 }}
{{- end }}
name: {{ include "ceph-csi-rbd.nodeplugin.fullname" . }}-http-metrics
namespace: {{ .Release.Namespace }}
labels:
app: {{ include "ceph-csi-rbd.fullname" . }}
chart: {{ include "ceph-csi-rbd.chart" . }}
component: {{ .Values.nodeplugin.name }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
{{- if .Values.nodeplugin.httpMetrics.service.clusterIP }}
clusterIP: "{{ .Values.nodeplugin.httpMetrics.service.clusterIP }}"
{{- end }}
{{- if .Values.nodeplugin.httpMetrics.service.externalIPs }}
externalIPs:
{{ toYaml .Values.nodeplugin.httpMetrics.service.externalIPs | indent 4 }}
{{- end }}
{{- if .Values.nodeplugin.httpMetrics.service.loadBalancerIP }}
loadBalancerIP: "{{ .Values.nodeplugin.httpMetrics.service.loadBalancerIP }}"
{{- end }}
{{- if .Values.nodeplugin.httpMetrics.service.loadBalancerSourceRanges }}
loadBalancerSourceRanges:
{{ toYaml .Values.nodeplugin.httpMetrics.service.loadBalancerSourceRanges | indent 4 }}
{{- end }}
ports:
- name: http-metrics
port: {{ .Values.nodeplugin.httpMetrics.service.servicePort }}
targetPort: {{ .Values.nodeplugin.httpMetrics.containerPort }}
selector:
app: {{ include "ceph-csi-rbd.name" . }}
component: {{ .Values.nodeplugin.name }}
release: {{ .Release.Name }}
type: "{{ .Values.nodeplugin.httpMetrics.service.type }}"
{{- end -}}

View File

@ -0,0 +1,48 @@
{{- if .Values.nodeplugin.podSecurityPolicy.enabled -}}
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: {{ include "ceph-csi-rbd.nodeplugin.fullname" . }}
labels:
app: {{ include "ceph-csi-rbd.name" . }}
chart: {{ include "ceph-csi-rbd.chart" . }}
component: {{ .Values.nodeplugin.name }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
allowPrivilegeEscalation: true
allowedCapabilities:
- 'SYS_ADMIN'
fsGroup:
rule: RunAsAny
privileged: true
hostNetwork: true
hostPID: true
runAsUser:
rule: RunAsAny
seLinux:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
volumes:
- 'configMap'
- 'emptyDir'
- 'projected'
- 'secret'
- 'hostPath'
allowedHostPaths:
- pathPrefix: '/dev'
readOnly: false
- pathPrefix: '/run/mount'
readOnly: false
- pathPrefix: '/sys'
readOnly: false
- pathPrefix: '/etc/selinux'
readOnly: true
- pathPrefix: '/lib/modules'
readOnly: true
- pathPrefix: '{{ .Values.cephLogDirHostPath }}'
readOnly: false
- pathPrefix: '{{ .Values.kubeletDir }}'
readOnly: false
{{- end }}

View File

@ -0,0 +1,18 @@
{{- if and .Values.rbac.create .Values.nodeplugin.podSecurityPolicy.enabled -}}
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ include "ceph-csi-rbd.nodeplugin.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
app: {{ include "ceph-csi-rbd.name" . }}
chart: {{ include "ceph-csi-rbd.chart" . }}
component: {{ .Values.nodeplugin.name }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
rules:
- apiGroups: ['policy']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames: ['{{ include "ceph-csi-rbd.nodeplugin.fullname" . }}']
{{- end -}}

View File

@ -0,0 +1,21 @@
{{- if and .Values.rbac.create .Values.nodeplugin.podSecurityPolicy.enabled -}}
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ include "ceph-csi-rbd.nodeplugin.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
app: {{ include "ceph-csi-rbd.name" . }}
chart: {{ include "ceph-csi-rbd.chart" . }}
component: {{ .Values.nodeplugin.name }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
subjects:
- kind: ServiceAccount
name: {{ include "ceph-csi-rbd.serviceAccountName.nodeplugin" . }}
namespace: {{ .Release.Namespace }}
roleRef:
kind: Role
name: {{ include "ceph-csi-rbd.nodeplugin.fullname" . }}
apiGroup: rbac.authorization.k8s.io
{{- end -}}

View File

@ -0,0 +1,13 @@
{{- if .Values.serviceAccounts.nodeplugin.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "ceph-csi-rbd.serviceAccountName.nodeplugin" . }}
namespace: {{ .Release.Namespace }}
labels:
app: {{ include "ceph-csi-rbd.name" . }}
chart: {{ include "ceph-csi-rbd.chart" . }}
component: {{ .Values.nodeplugin.name }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
{{- end -}}

View File

@ -0,0 +1,71 @@
{{- if .Values.rbac.create -}}
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ include "ceph-csi-rbd.provisioner.fullname" . }}
labels:
app: {{ include "ceph-csi-rbd.name" . }}
chart: {{ include "ceph-csi-rbd.chart" . }}
component: {{ .Values.provisioner.name }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "update", "delete", "patch"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "create", "update"]
{{- if .Values.provisioner.attacher.enabled }}
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch", "update", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments/status"]
verbs: ["patch"]
{{- end }}
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshots"]
verbs: ["get", "list"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents"]
verbs: ["create", "get", "list", "watch", "update", "delete"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents/status"]
verbs: ["update"]
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get"]
- apiGroups: [""]
resources: ["serviceaccounts"]
verbs: ["get"]
{{- if .Values.provisioner.resizer.enabled }}
- apiGroups: [""]
resources: ["persistentvolumeclaims/status"]
verbs: ["update", "patch"]
{{- end }}
{{- if .Values.topology.enabled }}
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list","watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["csinodes"]
verbs: ["get", "list", "watch"]
{{- end }}
{{- end -}}

View File

@ -0,0 +1,20 @@
{{- if .Values.rbac.create -}}
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ include "ceph-csi-rbd.provisioner.fullname" . }}
labels:
app: {{ include "ceph-csi-rbd.name" . }}
chart: {{ include "ceph-csi-rbd.chart" . }}
component: {{ .Values.provisioner.name }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
subjects:
- kind: ServiceAccount
name: {{ include "ceph-csi-rbd.serviceAccountName.provisioner" . }}
namespace: {{ .Release.Namespace }}
roleRef:
kind: ClusterRole
name: {{ include "ceph-csi-rbd.provisioner.fullname" . }}
apiGroup: rbac.authorization.k8s.io
{{- end -}}

View File

@ -0,0 +1,281 @@
kind: Deployment
apiVersion: apps/v1
metadata:
name: {{ include "ceph-csi-rbd.provisioner.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
app: {{ include "ceph-csi-rbd.name" . }}
chart: {{ include "ceph-csi-rbd.chart" . }}
component: {{ .Values.provisioner.name }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
replicas: {{ .Values.provisioner.replicaCount }}
strategy:
type: {{ .Values.provisioner.strategy.type }}
{{- if eq .Values.provisioner.strategy.type "RollingUpdate" }}
rollingUpdate:
maxUnavailable: {{ .Values.provisioner.strategy.rollingUpdate.maxUnavailable }}
{{- end }}
selector:
matchLabels:
app: {{ include "ceph-csi-rbd.name" . }}
component: {{ .Values.provisioner.name }}
release: {{ .Release.Name }}
template:
metadata:
labels:
app: {{ include "ceph-csi-rbd.name" . }}
chart: {{ include "ceph-csi-rbd.chart" . }}
component: {{ .Values.provisioner.name }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
{{- if gt (int .Values.provisioner.replicaCount) 1 }}
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app
operator: In
values:
- {{ include "ceph-csi-rbd.name" . }}
- key: component
operator: In
values:
- {{ .Values.provisioner.name }}
topologyKey: "kubernetes.io/hostname"
{{- end }}
serviceAccountName: {{ include "ceph-csi-rbd.serviceAccountName.provisioner" . }}
{{- if .Values.provisioner.priorityClassName }}
priorityClassName: {{ .Values.provisioner.priorityClassName }}
{{- end }}
containers:
- name: csi-provisioner
image: "{{ .Values.provisioner.provisioner.image.repository }}:{{ .Values.provisioner.provisioner.image.tag }}"
imagePullPolicy: {{ .Values.provisioner.provisioner.image.pullPolicy }}
args:
- "--csi-address=$(ADDRESS)"
- "--v={{ .Values.logLevel }}"
- "--timeout={{ .Values.provisioner.timeout }}"
- "--leader-election=true"
- "--retry-interval-start=500ms"
- "--default-fstype={{ .Values.provisioner.defaultFSType }}"
- "--extra-create-metadata=true"
{{- if .Values.topology.enabled }}
- "--feature-gates=Topology=true"
{{- end }}
env:
- name: ADDRESS
value: "unix:///csi/{{ .Values.provisionerSocketFile }}"
volumeMounts:
- name: socket-dir
mountPath: /csi
resources:
{{ toYaml .Values.provisioner.provisioner.resources | indent 12 }}
{{- if .Values.provisioner.resizer.enabled }}
- name: csi-resizer
image: "{{ .Values.provisioner.resizer.image.repository }}:{{ .Values.provisioner.resizer.image.tag }}"
imagePullPolicy: {{ .Values.provisioner.resizer.image.pullPolicy }}
args:
- "--v={{ .Values.logLevel }}"
- "--csi-address=$(ADDRESS)"
- "--timeout={{ .Values.provisioner.timeout }}"
- "--leader-election"
- "--retry-interval-start=500ms"
- "--handle-volume-inuse-error=false"
env:
- name: ADDRESS
value: "unix:///csi/{{ .Values.provisionerSocketFile }}"
volumeMounts:
- name: socket-dir
mountPath: /csi
resources:
{{ toYaml .Values.provisioner.resizer.resources | indent 12 }}
{{- end }}
- name: csi-snapshotter
image: {{ .Values.provisioner.snapshotter.image.repository }}:{{ .Values.provisioner.snapshotter.image.tag }}
imagePullPolicy: {{ .Values.provisioner.snapshotter.image.pullPolicy }}
args:
- "--csi-address=$(ADDRESS)"
- "--v={{ .Values.logLevel }}"
- "--timeout={{ .Values.provisioner.timeout }}"
- "--leader-election=true"
env:
- name: ADDRESS
value: "unix:///csi/{{ .Values.provisionerSocketFile }}"
volumeMounts:
- name: socket-dir
mountPath: /csi
resources:
{{ toYaml .Values.provisioner.snapshotter.resources | indent 12 }}
{{- if .Values.provisioner.attacher.enabled }}
- name: csi-attacher
image: "{{ .Values.provisioner.attacher.image.repository }}:{{ .Values.provisioner.attacher.image.tag }}"
imagePullPolicy: {{ .Values.provisioner.attacher.image.pullPolicy }}
args:
- "--v={{ .Values.logLevel }}"
- "--csi-address=$(ADDRESS)"
- "--leader-election=true"
- "--retry-interval-start=500ms"
env:
- name: ADDRESS
value: "unix:///csi/{{ .Values.provisionerSocketFile }}"
volumeMounts:
- name: socket-dir
mountPath: /csi
resources:
{{ toYaml .Values.provisioner.attacher.resources | indent 12 }}
{{- end }}
- name: csi-rbdplugin
image: "{{ .Values.nodeplugin.plugin.image.repository }}:{{ .Values.nodeplugin.plugin.image.tag }}"
imagePullPolicy: {{ .Values.nodeplugin.plugin.image.pullPolicy }}
args:
- "--nodeid=$(NODE_ID)"
- "--type=rbd"
- "--controllerserver=true"
- "--pidlimit=-1"
- "--endpoint=$(CSI_ENDPOINT)"
- "--csi-addons-endpoint=$(CSI_ADDONS_ENDPOINT)"
- "--v={{ .Values.logLevel }}"
- "--drivername=$(DRIVER_NAME)"
- "--rbdhardmaxclonedepth={{ .Values.provisioner.hardMaxCloneDepth }}"
- "--rbdsoftmaxclonedepth={{ .Values.provisioner.softMaxCloneDepth }}"
- "--maxsnapshotsonimage={{ .Values.provisioner.maxSnapshotsOnImage }}"
- "--minsnapshotsonimage={{ .Values.provisioner.minSnapshotsOnImage }}"
{{- if .Values.provisioner.skipForceFlatten }}
- "--skipforceflatten={{ .Values.provisioner.skipForceFlatten }}"
{{- end }}
{{- if .Values.provisioner.profiling.enabled }}
- "--enableprofiling={{ .Values.provisioner.profiling.enabled }}"
{{- end }}
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: DRIVER_NAME
value: {{ .Values.driverName }}
- name: NODE_ID
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: CSI_ENDPOINT
value: "unix:///csi/{{ .Values.provisionerSocketFile }}"
- name: CSI_ADDONS_ENDPOINT
value: "unix:///csi/csi-addons.sock"
volumeMounts:
- name: socket-dir
mountPath: /csi
- mountPath: /dev
name: host-dev
- mountPath: /sys
name: host-sys
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- name: ceph-csi-config
mountPath: /etc/ceph-csi-config/
- name: ceph-config
mountPath: /etc/ceph/
- name: ceph-csi-encryption-kms-config
mountPath: /etc/ceph-csi-encryption-kms-config/
- name: keys-tmp-dir
mountPath: /tmp/csi/keys
resources:
{{ toYaml .Values.nodeplugin.plugin.resources | indent 12 }}
{{- if .Values.provisioner.deployController }}
- name: csi-rbdplugin-controller
image: "{{ .Values.nodeplugin.plugin.image.repository }}:{{ .Values.nodeplugin.plugin.image.tag }}"
imagePullPolicy: {{ .Values.nodeplugin.plugin.image.pullPolicy }}
args:
- "--type=controller"
- "--v={{ .Values.logLevel }}"
- "--drivername=$(DRIVER_NAME)"
- "--drivernamespace=$(DRIVER_NAMESPACE)"
env:
- name: DRIVER_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: DRIVER_NAME
value: {{ .Values.driverName }}
volumeMounts:
- name: ceph-csi-config
mountPath: /etc/ceph-csi-config/
- name: keys-tmp-dir
mountPath: /tmp/csi/keys
- name: ceph-config
mountPath: /etc/ceph/
resources:
{{ toYaml .Values.nodeplugin.plugin.resources | indent 12 }}
{{- end }}
{{- if .Values.provisioner.httpMetrics.enabled }}
- name: liveness-prometheus
image: "{{ .Values.nodeplugin.plugin.image.repository }}:{{ .Values.nodeplugin.plugin.image.tag }}"
imagePullPolicy: {{ .Values.nodeplugin.plugin.image.pullPolicy }}
args:
- "--type=liveness"
- "--endpoint=$(CSI_ENDPOINT)"
- "--metricsport={{ .Values.provisioner.httpMetrics.containerPort }}"
- "--metricspath=/metrics"
- "--polltime=60s"
- "--timeout=3s"
env:
- name: CSI_ENDPOINT
value: "unix:///csi/{{ .Values.provisionerSocketFile }}"
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
volumeMounts:
- name: socket-dir
mountPath: /csi
resources:
{{ toYaml .Values.nodeplugin.plugin.resources | indent 12 }}
{{- end }}
volumes:
- name: socket-dir
emptyDir: {
medium: "Memory"
}
- name: host-dev
hostPath:
path: /dev
- name: host-sys
hostPath:
path: /sys
- name: lib-modules
hostPath:
path: /lib/modules
- name: ceph-config
configMap:
name: ceph-rbd-config
- name: ceph-csi-config
configMap:
name: {{ .Values.configMapName | quote }}
{{- if .Values.configMapKey }}
items:
- key: {{ .Values.configMapKey | quote }}
path: config.json
{{- end }}
- name: ceph-csi-encryption-kms-config
configMap:
name: {{ .Values.kmsConfigMapName | quote }}
- name: keys-tmp-dir
emptyDir: {
medium: "Memory"
}
{{- if .Values.provisioner.affinity }}
affinity:
{{ toYaml .Values.provisioner.affinity | indent 8 -}}
{{- end -}}
{{- if .Values.provisioner.nodeSelector }}
nodeSelector:
{{ toYaml .Values.provisioner.nodeSelector | indent 8 -}}
{{- end -}}
{{- if .Values.provisioner.tolerations }}
tolerations:
{{ toYaml .Values.provisioner.tolerations | indent 8 -}}
{{- end -}}

View File

@ -0,0 +1,41 @@
{{- if .Values.provisioner.httpMetrics.service.enabled -}}
apiVersion: v1
kind: Service
metadata:
{{- if .Values.provisioner.httpMetrics.service.annotations }}
annotations:
{{ toYaml .Values.provisioner.httpMetrics.service.annotations | indent 4 }}
{{- end }}
name: {{ include "ceph-csi-rbd.provisioner.fullname" . }}-http-metrics
namespace: {{ .Release.Namespace }}
labels:
app: {{ include "ceph-csi-rbd.fullname" . }}
chart: {{ include "ceph-csi-rbd.chart" . }}
component: {{ .Values.provisioner.name }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
{{- if .Values.provisioner.httpMetrics.service.clusterIP }}
clusterIP: "{{ .Values.provisioner.httpMetrics.service.clusterIP }}"
{{- end }}
{{- if .Values.provisioner.httpMetrics.service.externalIPs }}
externalIPs:
{{ toYaml .Values.provisioner.httpMetrics.service.externalIPs | indent 4 }}
{{- end }}
{{- if .Values.provisioner.httpMetrics.service.loadBalancerIP }}
loadBalancerIP: "{{ .Values.provisioner.httpMetrics.service.loadBalancerIP }}"
{{- end }}
{{- if .Values.provisioner.httpMetrics.service.loadBalancerSourceRanges }}
loadBalancerSourceRanges:
{{ toYaml .Values.provisioner.httpMetrics.service.loadBalancerSourceRanges | indent 4 }}
{{- end }}
ports:
- name: http-metrics
port: {{ .Values.provisioner.httpMetrics.service.servicePort }}
targetPort: {{ .Values.provisioner.httpMetrics.containerPort }}
selector:
app: {{ include "ceph-csi-rbd.name" . }}
component: {{ .Values.provisioner.name }}
release: {{ .Release.Name }}
type: "{{ .Values.provisioner.httpMetrics.service.type }}"
{{- end -}}

View File

@ -0,0 +1,34 @@
{{- if .Values.provisioner.podSecurityPolicy.enabled -}}
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: {{ include "ceph-csi-rbd.provisioner.fullname" . }}
labels:
app: {{ include "ceph-csi-rbd.name" . }}
chart: {{ include "ceph-csi-rbd.chart" . }}
component: {{ .Values.provisioner.name }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
fsGroup:
rule: RunAsAny
runAsUser:
rule: RunAsAny
seLinux:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
volumes:
- 'configMap'
- 'emptyDir'
- 'projected'
- 'secret'
- 'hostPath'
allowedHostPaths:
- pathPrefix: '/dev'
readOnly: false
- pathPrefix: '/sys'
readOnly: false
- pathPrefix: '/lib/modules'
readOnly: true
{{- end }}

View File

@ -0,0 +1,26 @@
{{- if .Values.rbac.create -}}
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ include "ceph-csi-rbd.provisioner.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
app: {{ include "ceph-csi-rbd.name" . }}
chart: {{ include "ceph-csi-rbd.chart" . }}
component: {{ .Values.provisioner.name }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
rules:
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list", "watch", "create","update", "delete"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "watch", "list", "delete", "update", "create"]
{{- if .Values.provisioner.podSecurityPolicy.enabled }}
- apiGroups: ['policy']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames: ['{{ include "ceph-csi-rbd.provisioner.fullname" . }}']
{{- end -}}
{{- end -}}

View File

@ -0,0 +1,21 @@
{{- if .Values.rbac.create -}}
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ include "ceph-csi-rbd.provisioner.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
app: {{ include "ceph-csi-rbd.name" . }}
chart: {{ include "ceph-csi-rbd.chart" . }}
component: {{ .Values.provisioner.name }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
subjects:
- kind: ServiceAccount
name: {{ include "ceph-csi-rbd.serviceAccountName.provisioner" . }}
namespace: {{ .Release.Namespace }}
roleRef:
kind: Role
name: {{ include "ceph-csi-rbd.provisioner.fullname" . }}
apiGroup: rbac.authorization.k8s.io
{{- end -}}

View File

@ -0,0 +1,13 @@
{{- if .Values.serviceAccounts.provisioner.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "ceph-csi-rbd.serviceAccountName.provisioner" . }}
namespace: {{ .Release.Namespace }}
labels:
app: {{ include "ceph-csi-rbd.name" . }}
chart: {{ include "ceph-csi-rbd.chart" . }}
component: {{ .Values.provisioner.name }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
{{- end -}}

View File

@ -0,0 +1,17 @@
{{- if .Values.secret.create -}}
apiVersion: v1
kind: Secret
metadata:
name: {{ .Values.secret.name }}
namespace: {{ .Release.Namespace }}
labels:
app: {{ include "ceph-csi-rbd.name" . }}
chart: {{ include "ceph-csi-rbd.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
stringData:
userID: {{ .Values.secret.userID }}
userKey: {{ .Values.secret.userKey }}
encryptionPassphrase: {{ .Values.secret.encryptionPassphrase }}
{{- end -}}

View File

@ -0,0 +1,83 @@
{{- if .Values.storageClass.create -}}
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: {{ .Values.storageClass.name }}
namespace: {{ .Release.Namespace }}
{{- if .Values.storageClass.annotations }}
annotations:
{{ toYaml .Values.storageClass.annotations | indent 4 }}
{{- end }}
labels:
app: {{ include "ceph-csi-rbd.name" . }}
chart: {{ include "ceph-csi-rbd.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
provisioner: {{ .Values.driverName }}
parameters:
clusterID: {{ .Values.storageClass.clusterID }}
pool: {{ .Values.storageClass.pool }}
imageFeatures: {{ .Values.storageClass.imageFeatures }}
thickProvision: {{ .Values.storageClass.thickProvision | quote}}
{{- if .Values.storageClass.tryOtherMounters }}
tryOtherMounters: {{ .Values.storageClass.tryOtherMounters | quote}}
{{- end }}
{{- if .Values.storageClass.mounter }}
mounter: {{ .Values.storageClass.mounter }}
{{- end }}
{{- if .Values.storageClass.cephLogDir }}
cephLogDir: {{ .Values.storageClass.cephLogDir }}
{{- end }}
{{- if .Values.storageClass.cephLogStrategy }}
cephLogStrategy: {{ .Values.storageClass.cephLogStrategy }}
{{- end }}
{{- if .Values.storageClass.dataPool }}
dataPool: {{ .Values.storageClass.dataPool }}
{{- end }}
{{- if .Values.storageClass.volumeNamePrefix }}
volumeNamePrefix: "{{ .Values.storageClass.volumeNamePrefix }}"
{{- end }}
{{- if .Values.storageClass.encrypted }}
encrypted: "{{ .Values.storageClass.encrypted }}"
{{- end }}
{{- if .Values.storageClass.encryptionKMSID }}
encryptionKMSID: {{ .Values.storageClass.encryptionKMSID }}
{{- end }}
{{- if .Values.storageClass.topologyConstrainedPools }}
topologyConstrainedPools:
{{ toYaml .Values.storageClass.topologyConstrainedPools | indent 4 -}}
{{- end }}
{{- if .Values.storageClass.mapOptions }}
mapOptions: {{ .Values.storageClass.mapOptions }}
{{- end }}
{{- if .Values.storageClass.unmapOptions }}
unmapOptions: {{ .Values.storageClass.unmapOptions }}
{{- end }}
csi.storage.k8s.io/provisioner-secret-name: {{ .Values.storageClass.provisionerSecret }}
{{- if .Values.storageClass.provisionerSecretNamespace }}
csi.storage.k8s.io/provisioner-secret-namespace: {{ .Values.storageClass.provisionerSecretNamespace }}
{{ else }}
csi.storage.k8s.io/provisioner-secret-namespace: {{ .Release.Namespace }}
{{- end }}
csi.storage.k8s.io/controller-expand-secret-name: {{ .Values.storageClass.controllerExpandSecret }}
{{- if .Values.storageClass.controllerExpandSecretNamespace }}
csi.storage.k8s.io/controller-expand-secret-namespace: {{ .Values.storageClass.controllerExpandSecretNamespace }}
{{ else }}
csi.storage.k8s.io/controller-expand-secret-namespace: {{ .Release.Namespace }}
{{- end }}
csi.storage.k8s.io/node-stage-secret-name: {{ .Values.storageClass.nodeStageSecret }}
{{- if .Values.storageClass.nodeStageSecretNamespace }}
csi.storage.k8s.io/node-stage-secret-namespace: {{ .Values.storageClass.nodeStageSecretNamespace }}
{{ else }}
csi.storage.k8s.io/node-stage-secret-namespace: {{ .Release.Namespace }}
{{- end }}
csi.storage.k8s.io/fstype: {{ .Values.storageClass.fstype }}
reclaimPolicy: {{ .Values.storageClass.reclaimPolicy }}
allowVolumeExpansion: {{ .Values.storageClass.allowVolumeExpansion }}
{{- if .Values.storageClass.mountOptions }}
mountOptions:
{{- range .Values.storageClass.mountOptions }}
- {{ . }}
{{- end }}
{{- end }}
{{- end -}}

View File

@ -0,0 +1,459 @@
---
rbac:
# Specifies whether RBAC resources should be created
create: true
serviceAccounts:
nodeplugin:
# Specifies whether a ServiceAccount should be created
create: true
# The name of the ServiceAccount to use.
# If not set and create is true, a name is generated using the fullname
name:
provisioner:
# Specifies whether a ServiceAccount should be created
create: true
# The name of the ServiceAccount to use.
# If not set and create is true, a name is generated using the fullname
name:
# Configuration for the CSI to connect to the cluster
# Ref: https://github.com/ceph/ceph-csi/blob/devel/examples/README.md
# Example:
# csiConfig:
# - clusterID: "<cluster-id>"
# monitors:
# - "<MONValue1>"
# - "<MONValue2>"
csiConfig: []
# Configuration details of clusterID,PoolID and FscID mapping
# csiMapping:
# - clusterIDMapping:
# clusterID on site1: clusterID on site2
# RBDPoolIDMapping:
# - poolID on site1: poolID on site2
# CephFSFscIDMapping:
# - CephFS FscID on site1: CephFS FscID on site2
csiMapping: []
# Configuration for the encryption KMS
# Ref: https://github.com/ceph/ceph-csi/blob/devel/docs/deploy-rbd.md
# Example:
# encryptionKMSConfig:
# vault-unique-id-1:
# encryptionKMSType: vault
# vaultAddress: https://vault.example.com
# vaultAuthPath: /v1/auth/kubernetes/login
# vaultRole: csi-kubernetes
# vaultPassphraseRoot: /v1/secret
# vaultPassphrasePath: ceph-csi/
# vaultCAVerify: "false"
encryptionKMSConfig: {}
# Set logging level for csi containers.
# Supported values from 0 to 5. 0 for general useful logs,
# 5 for trace level verbosity.
logLevel: 5
nodeplugin:
name: nodeplugin
# set user created priorityclassName for csi plugin pods. default is
# system-node-critical which is high priority
priorityClassName: system-node-critical
# if you are using rbd-nbd client set this value to OnDelete
updateStrategy: RollingUpdate
httpMetrics:
# Metrics only available for cephcsi/cephcsi => 1.2.0
# Specifies whether http metrics should be exposed
enabled: true
# The port of the container to expose the metrics
containerPort: 8080
service:
# Specifies whether a service should be created for the metrics
enabled: true
# The port to use for the service
servicePort: 8080
type: ClusterIP
# Annotations for the service
# Example:
# annotations:
# prometheus.io/scrape: "true"
# prometheus.io/port: "8080"
annotations: {}
clusterIP: ""
## List of IP addresses at which the stats-exporter service is available
## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
##
externalIPs: []
loadBalancerIP: ""
loadBalancerSourceRanges: []
profiling:
# enable profiling to check for memory leaks
enabled: false
registrar:
image:
repository: k8s.gcr.io/sig-storage/csi-node-driver-registrar
tag: v2.3.0
pullPolicy: IfNotPresent
resources: {}
plugin:
image:
repository: quay.io/cephcsi/cephcsi
tag: canary
pullPolicy: IfNotPresent
resources: {}
nodeSelector: {}
tolerations: []
affinity: {}
# If true, create & use Pod Security Policy resources
# https://kubernetes.io/docs/concepts/policy/pod-security-policy/
podSecurityPolicy:
enabled: false
provisioner:
name: provisioner
replicaCount: 3
strategy:
# RollingUpdate strategy replaces old pods with new ones gradually,
# without incurring downtime.
type: RollingUpdate
rollingUpdate:
# maxUnavailable is the maximum number of pods that can be
# unavailable during the update process.
maxUnavailable: 50%
# if fstype is not specified in storageclass, ext4 is default
defaultFSType: ext4
# deployController to enable or disable the deployment of controller which
# generates the OMAP data if its not Present.
deployController: true
# Timeout for waiting for creation or deletion of a volume
timeout: 60s
# Hard limit for maximum number of nested volume clones that are taken before
# a flatten occurs
hardMaxCloneDepth: 8
# Soft limit for maximum number of nested volume clones that are taken before
# a flatten occurs
softMaxCloneDepth: 4
# Maximum number of snapshots allowed on rbd image without flattening
maxSnapshotsOnImage: 450
# Minimum number of snapshots allowed on rbd image to trigger flattening
minSnapshotsOnImage: 250
# skip image flattening if kernel support mapping of rbd images
# which has the deep-flatten feature
# skipForceFlatten: false
# set user created priorityclassName for csi provisioner pods. default is
# system-cluster-critical which is less priority than system-node-critical
priorityClassName: system-cluster-critical
httpMetrics:
# Metrics only available for cephcsi/cephcsi => 1.2.0
# Specifies whether http metrics should be exposed
enabled: true
# The port of the container to expose the metrics
containerPort: 8080
service:
# Specifies whether a service should be created for the metrics
enabled: true
# The port to use for the service
servicePort: 8080
type: ClusterIP
# Annotations for the service
# Example:
# annotations:
# prometheus.io/scrape: "true"
# prometheus.io/port: "8080"
annotations: {}
clusterIP: ""
## List of IP addresses at which the stats-exporter service is available
## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
##
externalIPs: []
loadBalancerIP: ""
loadBalancerSourceRanges: []
profiling:
# enable profiling to check for memory leaks
enabled: false
provisioner:
image:
repository: k8s.gcr.io/sig-storage/csi-provisioner
tag: v3.0.0
pullPolicy: IfNotPresent
resources: {}
attacher:
name: attacher
enabled: true
image:
repository: k8s.gcr.io/sig-storage/csi-attacher
tag: v3.3.0
pullPolicy: IfNotPresent
resources: {}
resizer:
name: resizer
enabled: true
image:
repository: k8s.gcr.io/sig-storage/csi-resizer
tag: v1.3.0
pullPolicy: IfNotPresent
resources: {}
snapshotter:
image:
repository: k8s.gcr.io/sig-storage/csi-snapshotter
tag: v4.2.0
pullPolicy: IfNotPresent
resources: {}
nodeSelector: {}
tolerations: []
affinity: {}
# If true, create & use Pod Security Policy resources
# https://kubernetes.io/docs/concepts/policy/pod-security-policy/
podSecurityPolicy:
enabled: false
topology:
# Specifies whether topology based provisioning support should
# be exposed by CSI
enabled: false
# domainLabels define which node labels to use as domains
# for CSI nodeplugins to advertise their domains
# NOTE: the value here serves as an example and needs to be
# updated with node labels that define domains of interest
domainLabels:
- failure-domain/region
- failure-domain/zone
storageClass:
# Specifies whether the storageclass should be created
create: false
name: csi-rbd-sc
# Annotations for the storage class
# Example:
# annotations:
# storageclass.kubernetes.io/is-default-class: "true"
annotations: {}
# (required) String representing a Ceph cluster to provision storage from.
# Should be unique across all Ceph clusters in use for provisioning,
# cannot be greater than 36 bytes in length, and should remain immutable for
# the lifetime of the StorageClass in use.
clusterID: <cluster-ID>
# (optional) If you want to use erasure coded pool with RBD, you need to
# create two pools. one erasure coded and one replicated.
# You need to specify the replicated pool here in the `pool` parameter, it is
# used for the metadata of the images.
# The erasure coded pool must be set as the `dataPool` parameter below.
# dataPool: <ec-data-pool>
dataPool: ""
# (required) Ceph pool into which the RBD image shall be created
# eg: pool: replicapool
pool: replicapool
# Set thickProvision to true if you want RBD images to be fully allocated on
# creation (thin provisioning is the default).
thickProvision: false
# (required) RBD image features, CSI creates image with image-format 2
# CSI RBD currently supports `layering`, `journaling`, `exclusive-lock`,
# `object-map`, `fast-diff` features. If `journaling` is enabled, must
# enable `exclusive-lock` too.
# imageFeatures: layering,journaling,exclusive-lock,object-map,fast-diff
imageFeatures: "layering"
# (optional) Specifies whether to try other mounters in case if the current
# mounter fails to mount the rbd image for any reason. True means fallback
# to next mounter, default is set to false.
# Note: tryOtherMounters is currently useful to fallback from krbd to rbd-nbd
# in case if any of the specified imageFeatures is not supported by krbd
# driver on node scheduled for application pod launch, but in the future this
# should work with any mounter type.
# tryOtherMounters: false
# (optional) uncomment the following to use rbd-nbd as mounter
# on supported nodes
# mounter: rbd-nbd
mounter: ""
# (optional) ceph client log location, eg: rbd-nbd
# By default host-path /var/log/ceph of node is bind-mounted into
# csi-rbdplugin pod at /var/log/ceph mount path. This is to configure
# target bindmount path used inside container for ceph clients logging.
# See docs/rbd-nbd.md for available configuration options.
# cephLogDir: /var/log/ceph
cephLogDir: ""
# (optional) ceph client log strategy
# By default, log file belonging to a particular volume will be deleted
# on unmap, but you can choose to just compress instead of deleting it
# or even preserve the log file in text format as it is.
# Available options `remove` or `compress` or `preserve`
# cephLogStrategy: remove
cephLogStrategy: ""
# (optional) Prefix to use for naming RBD images.
# If omitted, defaults to "csi-vol-".
# volumeNamePrefix: "foo-bar-"
volumeNamePrefix: ""
# (optional) Instruct the plugin it has to encrypt the volume
# By default it is disabled. Valid values are "true" or "false".
# A string is expected here, i.e. "true", not true.
# encrypted: "true"
encrypted: ""
# (optional) Use external key management system for encryption passphrases by
# specifying a unique ID matching KMS ConfigMap. The ID is only used for
# correlation to configmap entry.
encryptionKMSID: ""
# Add topology constrained pools configuration, if topology based pools
# are setup, and topology constrained provisioning is required.
# For further information read TODO<doc>
# topologyConstrainedPools: |
# [{"poolName":"pool0",
# "dataPool":"ec-pool0" # optional, erasure-coded pool for data
# "domainSegments":[
# {"domainLabel":"region","value":"east"},
# {"domainLabel":"zone","value":"zone1"}]},
# {"poolName":"pool1",
# "dataPool":"ec-pool1" # optional, erasure-coded pool for data
# "domainSegments":[
# {"domainLabel":"region","value":"east"},
# {"domainLabel":"zone","value":"zone2"}]},
# {"poolName":"pool2",
# "dataPool":"ec-pool2" # optional, erasure-coded pool for data
# "domainSegments":[
# {"domainLabel":"region","value":"west"},
# {"domainLabel":"zone","value":"zone1"}]}
# ]
topologyConstrainedPools: []
# (optional) mapOptions is a comma-separated list of map options.
# For krbd options refer
# https://docs.ceph.com/docs/master/man/8/rbd/#kernel-rbd-krbd-options
# For nbd options refer
# https://docs.ceph.com/docs/master/man/8/rbd-nbd/#options
# Format:
# mapOptions: "<mounter>:op1,op2;<mounter>:op1,op2"
# An empty mounter field is treated as krbd type for compatibility.
# eg:
# mapOptions: "krbd:lock_on_read,queue_depth=1024;nbd:try-netlink"
mapOptions: ""
# (optional) unmapOptions is a comma-separated list of unmap options.
# For krbd options refer
# https://docs.ceph.com/docs/master/man/8/rbd/#kernel-rbd-krbd-options
# For nbd options refer
# https://docs.ceph.com/docs/master/man/8/rbd-nbd/#options
# Format:
# unmapOptions: "<mounter>:op1,op2;<mounter>:op1,op2"
# An empty mounter field is treated as krbd type for compatibility.
# eg:
# unmapOptions: "krbd:force;nbd:force"
unmapOptions: ""
# The secrets have to contain Ceph credentials with required access
# to the 'pool'.
provisionerSecret: csi-rbd-secret
# If Namespaces are left empty, the secrets are assumed to be in the
# Release namespace.
provisionerSecretNamespace: ""
controllerExpandSecret: csi-rbd-secret
controllerExpandSecretNamespace: ""
nodeStageSecret: csi-rbd-secret
nodeStageSecretNamespace: ""
# Specify the filesystem type of the volume. If not specified,
# csi-provisioner will set default as `ext4`.
fstype: ext4
reclaimPolicy: Delete
allowVolumeExpansion: true
mountOptions: []
# Mount Options
# Example:
# mountOptions:
# - discard
secret:
# Specifies whether the secret should be created
create: false
name: csi-rbd-secret
# Key values correspond to a user name and its key, as defined in the
# ceph cluster. User ID should have required access to the 'pool'
# specified in the storage class
userID: <plaintext ID>
userKey: <Ceph auth key corresponding to userID above>
# Encryption passphrase
encryptionPassphrase: test_passphrase
# This is a sample configmap that helps define a Ceph configuration as required
# by the CSI plugins.
# Sample ceph.conf available at
# https://github.com/ceph/ceph/blob/master/src/sample.ceph.conf Detailed
# documentation is available at
# https://docs.ceph.com/en/latest/rados/configuration/ceph-conf/
cephconf: |
[global]
auth_cluster_required = cephx
auth_service_required = cephx
auth_client_required = cephx
# Workaround for http://tracker.ceph.com/issues/23446
fuse_set_user_groups = false
# ceph-fuse which uses libfuse2 by default has write buffer size of 2KiB
# adding 'fuse_big_writes = true' option by default to override this limit
# see https://github.com/ceph/ceph-csi/issues/1928
fuse_big_writes = true
#########################################################
# Variables for 'internal' use please use with caution! #
#########################################################
# The filename of the provisioner socket
provisionerSocketFile: csi-provisioner.sock
# The filename of the plugin socket
pluginSocketFile: csi.sock
# kubelet working directory,can be set using `--root-dir` when starting kubelet.
kubeletDir: /var/lib/kubelet
# Host path location for ceph client processes logging, ex: rbd-nbd
cephLogDirHostPath: /var/log/ceph
# Name of the csi-driver
driverName: rbd.csi.ceph.com
# Name of the configmap used for state
configMapName: ceph-csi-config
# Key to use in the Configmap if not config.json
# configMapKey:
# Use an externally provided configmap
externallyManagedConfigmap: false
# Name of the configmap used for encryption kms configuration
kmsConfigMapName: ceph-csi-encryption-kms-config

View File

@ -0,0 +1,21 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj

View File

@ -0,0 +1,9 @@
dependencies:
- name: kibana
repository: https://charts.bitnami.com/bitnami
version: 10.2.16
- name: common
repository: https://charts.bitnami.com/bitnami
version: 2.2.4
digest: sha256:4d5edc18f6f4c69d66d3e2c1723fcd7f722141237052c3852562232c141bfcf3
generated: "2023-03-18T21:45:26.026217282Z"

View File

@ -0,0 +1,28 @@
annotations:
category: Analytics
licenses: Apache-2.0
apiVersion: v2
appVersion: 8.6.2
dependencies:
- condition: global.kibanaEnabled
name: kibana
repository: https://charts.bitnami.com/bitnami
version: 10.x.x
- name: common
repository: https://charts.bitnami.com/bitnami
tags:
- bitnami-common
version: 2.x.x
description: Elasticsearch is a distributed search and analytics engine. It is used for web search, log monitoring, and real-time analytics. Ideal for Big Data applications.
home: https://github.com/bitnami/charts/tree/main/bitnami/elasticsearch
icon: https://bitnami.com/assets/stacks/elasticsearch/img/elasticsearch-stack-220x234.png
keywords:
- elasticsearch
maintainers:
- name: Bitnami
url: https://github.com/bitnami/charts
name: elasticsearch
sources:
- https://github.com/bitnami/containers/tree/main/bitnami/elasticsearch
- https://www.elastic.co/products/elasticsearch
version: 19.6.0

View File

@ -0,0 +1,969 @@
<!--- app-name: Elasticsearch -->
# Bitnami Elasticsearch Stack
Elasticsearch is a distributed search and analytics engine. It is used for web search, log monitoring, and real-time analytics. Ideal for Big Data applications.
[Overview of Elasticsearch](https://www.elastic.co/products/elasticsearch)
Trademarks: This software listing is packaged by Bitnami. The respective trademarks mentioned in the offering are owned by the respective companies, and use of them does not imply any affiliation or endorsement.
## TL;DR
```console
helm repo add my-repo https://charts.bitnami.com/bitnami
helm install my-release my-repo/elasticsearch
```
## Introduction
This chart bootstraps a [Elasticsearch](https://github.com/bitnami/containers/tree/main/bitnami/elasticsearch) deployment on a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
Bitnami charts can be used with [Kubeapps](https://kubeapps.dev/) for deployment and management of Helm Charts in clusters.
## Prerequisites
- Kubernetes 1.19+
- Helm 3.2.0+
- PV provisioner support in the underlying infrastructure
## Installing the Chart
To install the chart with the release name `my-release`:
```console
helm repo add my-repo https://charts.bitnami.com/bitnami
helm install my-release my-repo/elasticsearch
```
These commands deploy Elasticsearch on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation.
> **Tip**: List all releases using `helm list`
## Uninstalling the Chart
To uninstall/delete the `my-release` release:
```console
helm delete my-release
```
The command removes all the Kubernetes components associated with the chart and deletes the release. Remove also the chart using `--purge` option:
```console
helm delete --purge my-release
```
## Parameters
### Global parameters
| Name | Description | Value |
| -------------------------------------------- | ----------------------------------------------------------------------------------------------------- | --------------- |
| `global.imageRegistry` | Global Docker image registry | `""` |
| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` |
| `global.storageClass` | Global StorageClass for Persistent Volume(s) | `""` |
| `global.elasticsearch.service.name` | Elasticsearch service name to be used in the Kibana subchart (ignored if kibanaEnabled=false) | `elasticsearch` |
| `global.elasticsearch.service.ports.restAPI` | Elasticsearch service restAPI port to be used in the Kibana subchart (ignored if kibanaEnabled=false) | `9200` |
| `global.kibanaEnabled` | Whether or not to enable Kibana | `false` |
### Common parameters
| Name | Description | Value |
| ------------------------ | --------------------------------------------------------------------------------------- | --------------- |
| `kubeVersion` | Override Kubernetes version | `""` |
| `nameOverride` | String to partially override common.names.fullname | `""` |
| `fullnameOverride` | String to fully override common.names.fullname | `""` |
| `commonLabels` | Labels to add to all deployed objects | `{}` |
| `commonAnnotations` | Annotations to add to all deployed objects | `{}` |
| `clusterDomain` | Kubernetes cluster domain name | `cluster.local` |
| `extraDeploy` | Array of extra objects to deploy with the release | `[]` |
| `namespaceOverride` | String to fully override common.names.namespace | `""` |
| `diagnosticMode.enabled` | Enable diagnostic mode (all probes will be disabled and the command will be overridden) | `false` |
| `diagnosticMode.command` | Command to override all containers in the deployment | `["sleep"]` |
| `diagnosticMode.args` | Args to override all containers in the deployment | `["infinity"]` |
### Elasticsearch cluster Parameters
| Name | Description | Value |
| ------------------------------------------ | --------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------ |
| `clusterName` | Elasticsearch cluster name | `elastic` |
| `containerPorts.restAPI` | Elasticsearch REST API port | `9200` |
| `containerPorts.transport` | Elasticsearch Transport port | `9300` |
| `plugins` | Comma, semi-colon or space separated list of plugins to install at initialization | `""` |
| `snapshotRepoPath` | File System snapshot repository path | `""` |
| `config` | Override elasticsearch configuration | `{}` |
| `extraConfig` | Append extra configuration to the elasticsearch node configuration | `{}` |
| `extraHosts` | A list of external hosts which are part of this cluster | `[]` |
| `extraVolumes` | A list of volumes to be added to the pod | `[]` |
| `extraVolumeMounts` | A list of volume mounts to be added to the pod | `[]` |
| `initScripts` | Dictionary of init scripts. Evaluated as a template. | `{}` |
| `initScriptsCM` | ConfigMap with the init scripts. Evaluated as a template. | `""` |
| `initScriptsSecret` | Secret containing `/docker-entrypoint-initdb.d` scripts to be executed at initialization time that contain sensitive data. Evaluated as a template. | `""` |
| `extraEnvVars` | Array containing extra env vars to be added to all pods (evaluated as a template) | `[]` |
| `extraEnvVarsCM` | ConfigMap containing extra env vars to be added to all pods (evaluated as a template) | `""` |
| `extraEnvVarsSecret` | Secret containing extra env vars to be added to all pods (evaluated as a template) | `""` |
| `sidecars` | Add additional sidecar containers to the all elasticsearch node pod(s) | `[]` |
| `initContainers` | Add additional init containers to the all elasticsearch node pod(s) | `[]` |
| `useIstioLabels` | Use this variable to add Istio labels to all pods | `true` |
| `image.registry` | Elasticsearch image registry | `docker.io` |
| `image.repository` | Elasticsearch image repository | `bitnami/elasticsearch` |
| `image.tag` | Elasticsearch image tag (immutable tags are recommended) | `8.6.2-debian-11-r10` |
| `image.digest` | Elasticsearch image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
| `image.pullPolicy` | Elasticsearch image pull policy | `IfNotPresent` |
| `image.pullSecrets` | Elasticsearch image pull secrets | `[]` |
| `image.debug` | Enable Elasticsearch image debug mode | `false` |
| `security.enabled` | Enable X-Pack Security settings | `false` |
| `security.elasticPassword` | Password for 'elastic' user | `""` |
| `security.existingSecret` | Name of the existing secret containing the Elasticsearch password and | `""` |
| `security.fipsMode` | Configure elasticsearch with FIPS 140 compliant mode | `false` |
| `security.tls.restEncryption` | Enable SSL/TLS encryption for Elasticsearch REST API. | `true` |
| `security.tls.autoGenerated` | Create self-signed TLS certificates. | `false` |
| `security.tls.verificationMode` | Verification mode for SSL communications. | `full` |
| `security.tls.master.existingSecret` | Existing secret containing the certificates for the master nodes | `""` |
| `security.tls.data.existingSecret` | Existing secret containing the certificates for the data nodes | `""` |
| `security.tls.ingest.existingSecret` | Existing secret containing the certificates for the ingest nodes | `""` |
| `security.tls.coordinating.existingSecret` | Existing secret containing the certificates for the coordinating nodes | `""` |
| `security.tls.keystoreFilename` | Name of the keystore file | `elasticsearch.keystore.jks` |
| `security.tls.truststoreFilename` | Name of the truststore | `elasticsearch.truststore.jks` |
| `security.tls.usePemCerts` | Use this variable if your secrets contain PEM certificates instead of JKS/PKCS12 | `false` |
| `security.tls.passwordsSecret` | Existing secret containing the Keystore and Truststore passwords, or key password if PEM certs are used | `""` |
| `security.tls.keystorePassword` | Password to access the JKS/PKCS12 keystore or PEM key when they are password-protected. | `""` |
| `security.tls.truststorePassword` | Password to access the JKS/PKCS12 truststore when they are password-protected. | `""` |
| `security.tls.keyPassword` | Password to access the PEM key when they are password-protected. | `""` |
| `security.tls.secretKeystoreKey` | Name of the secret key containing the Keystore password | `""` |
| `security.tls.secretTruststoreKey` | Name of the secret key containing the Truststore password | `""` |
| `security.tls.secretKey` | Name of the secret key containing the PEM key password | `""` |
### Traffic Exposure Parameters
| Name | Description | Value |
| ---------------------------------- | -------------------------------------------------------------------------------------------------------------------------------- | ------------------------ |
| `service.type` | Elasticsearch service type | `ClusterIP` |
| `service.ports.restAPI` | Elasticsearch service REST API port | `9200` |
| `service.ports.transport` | Elasticsearch service transport port | `9300` |
| `service.nodePorts.restAPI` | Node port for REST API | `""` |
| `service.nodePorts.transport` | Node port for REST API | `""` |
| `service.clusterIP` | Elasticsearch service Cluster IP | `""` |
| `service.loadBalancerIP` | Elasticsearch service Load Balancer IP | `""` |
| `service.loadBalancerSourceRanges` | Elasticsearch service Load Balancer sources | `[]` |
| `service.externalTrafficPolicy` | Elasticsearch service external traffic policy | `Cluster` |
| `service.annotations` | Additional custom annotations for Elasticsearch service | `{}` |
| `service.extraPorts` | Extra ports to expose in Elasticsearch service (normally used with the `sidecars` value) | `[]` |
| `service.sessionAffinity` | Session Affinity for Kubernetes service, can be "None" or "ClientIP" | `None` |
| `service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` |
| `ingress.enabled` | Enable ingress record generation for Elasticsearch | `false` |
| `ingress.pathType` | Ingress path type | `ImplementationSpecific` |
| `ingress.apiVersion` | Force Ingress API version (automatically detected if not set) | `""` |
| `ingress.hostname` | Default host for the ingress record | `elasticsearch.local` |
| `ingress.path` | Default path for the ingress record | `/` |
| `ingress.annotations` | Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations. | `{}` |
| `ingress.tls` | Enable TLS configuration for the host defined at `ingress.hostname` parameter | `false` |
| `ingress.selfSigned` | Create a TLS secret for this ingress record using self-signed certificates generated by Helm | `false` |
| `ingress.ingressClassName` | IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+) | `""` |
| `ingress.extraHosts` | An array with additional hostname(s) to be covered with the ingress record | `[]` |
| `ingress.extraPaths` | An array with additional arbitrary paths that may need to be added to the ingress under the main host | `[]` |
| `ingress.extraTls` | TLS configuration for additional hostname(s) to be covered with this ingress record | `[]` |
| `ingress.secrets` | Custom TLS certificates as secrets | `[]` |
| `ingress.extraRules` | Additional rules to be covered with this ingress record | `[]` |
### Master-elegible nodes parameters
| Name | Description | Value |
| ---------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------- |
| `master.masterOnly` | Deploy the Elasticsearch master-elegible nodes as master-only nodes. Recommended for high-demand deployments. | `true` |
| `master.replicaCount` | Number of master-elegible replicas to deploy | `2` |
| `master.nameOverride` | String to partially override elasticsearch.master.fullname | `""` |
| `master.fullnameOverride` | String to fully override elasticsearch.master.fullname | `""` |
| `master.servicenameOverride` | String to fully override elasticsearch.master.servicename | `""` |
| `master.annotations` | Annotations for the master statefulset | `{}` |
| `master.updateStrategy.type` | Master-elegible nodes statefulset stategy type | `RollingUpdate` |
| `master.resources.limits` | The resources limits for elasticsearch containers | `{}` |
| `master.resources.requests` | The requested resources for elasticsearch containers | `{}` |
| `master.heapSize` | Elasticsearch master-eligible node heap size. | `128m` |
| `master.podSecurityContext.enabled` | Enabled master-elegible pods' Security Context | `true` |
| `master.podSecurityContext.fsGroup` | Set master-elegible pod's Security Context fsGroup | `1001` |
| `master.containerSecurityContext.enabled` | Enabled master-elegible containers' Security Context | `true` |
| `master.containerSecurityContext.runAsUser` | Set master-elegible containers' Security Context runAsUser | `1001` |
| `master.containerSecurityContext.runAsNonRoot` | Set master-elegible containers' Security Context runAsNonRoot | `true` |
| `master.hostAliases` | master-elegible pods host aliases | `[]` |
| `master.podLabels` | Extra labels for master-elegible pods | `{}` |
| `master.podAnnotations` | Annotations for master-elegible pods | `{}` |
| `master.podAffinityPreset` | Pod affinity preset. Ignored if `master.affinity` is set. Allowed values: `soft` or `hard` | `""` |
| `master.podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `master.affinity` is set. Allowed values: `soft` or `hard` | `""` |
| `master.nodeAffinityPreset.type` | Node affinity preset type. Ignored if `master.affinity` is set. Allowed values: `soft` or `hard` | `""` |
| `master.nodeAffinityPreset.key` | Node label key to match. Ignored if `master.affinity` is set | `""` |
| `master.nodeAffinityPreset.values` | Node label values to match. Ignored if `master.affinity` is set | `[]` |
| `master.affinity` | Affinity for master-elegible pods assignment | `{}` |
| `master.nodeSelector` | Node labels for master-elegible pods assignment | `{}` |
| `master.tolerations` | Tolerations for master-elegible pods assignment | `[]` |
| `master.priorityClassName` | master-elegible pods' priorityClassName | `""` |
| `master.schedulerName` | Name of the k8s scheduler (other than default) for master-elegible pods | `""` |
| `master.terminationGracePeriodSeconds` | In seconds, time the given to the Elasticsearch Master pod needs to terminate gracefully | `""` |
| `master.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` |
| `master.podManagementPolicy` | podManagementPolicy to manage scaling operation of Elasticsearch master pods | `Parallel` |
| `master.startupProbe.enabled` | Enable/disable the startup probe (master nodes pod) | `false` |
| `master.startupProbe.initialDelaySeconds` | Delay before startup probe is initiated (master nodes pod) | `90` |
| `master.startupProbe.periodSeconds` | How often to perform the probe (master nodes pod) | `10` |
| `master.startupProbe.timeoutSeconds` | When the probe times out (master nodes pod) | `5` |
| `master.startupProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (master nodes pod) | `1` |
| `master.startupProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` |
| `master.livenessProbe.enabled` | Enable/disable the liveness probe (master-eligible nodes pod) | `true` |
| `master.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (master-eligible nodes pod) | `90` |
| `master.livenessProbe.periodSeconds` | How often to perform the probe (master-eligible nodes pod) | `10` |
| `master.livenessProbe.timeoutSeconds` | When the probe times out (master-eligible nodes pod) | `5` |
| `master.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (master-eligible nodes pod) | `1` |
| `master.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` |
| `master.readinessProbe.enabled` | Enable/disable the readiness probe (master-eligible nodes pod) | `true` |
| `master.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated (master-eligible nodes pod) | `90` |
| `master.readinessProbe.periodSeconds` | How often to perform the probe (master-eligible nodes pod) | `10` |
| `master.readinessProbe.timeoutSeconds` | When the probe times out (master-eligible nodes pod) | `5` |
| `master.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (master-eligible nodes pod) | `1` |
| `master.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` |
| `master.customStartupProbe` | Override default startup probe | `{}` |
| `master.customLivenessProbe` | Override default liveness probe | `{}` |
| `master.customReadinessProbe` | Override default readiness probe | `{}` |
| `master.command` | Override default container command (useful when using custom images) | `[]` |
| `master.args` | Override default container args (useful when using custom images) | `[]` |
| `master.lifecycleHooks` | for the master-elegible container(s) to automate configuration before or after startup | `{}` |
| `master.extraEnvVars` | Array with extra environment variables to add to master-elegible nodes | `[]` |
| `master.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for master-elegible nodes | `""` |
| `master.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for master-elegible nodes | `""` |
| `master.extraVolumes` | Optionally specify extra list of additional volumes for the master-elegible pod(s) | `[]` |
| `master.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the master-elegible container(s) | `[]` |
| `master.sidecars` | Add additional sidecar containers to the master-elegible pod(s) | `[]` |
| `master.initContainers` | Add additional init containers to the master-elegible pod(s) | `[]` |
| `master.persistence.enabled` | Enable persistence using a `PersistentVolumeClaim` | `true` |
| `master.persistence.storageClass` | Persistent Volume Storage Class | `""` |
| `master.persistence.existingClaim` | Existing Persistent Volume Claim | `""` |
| `master.persistence.existingVolume` | Existing Persistent Volume for use as volume match label selector to the `volumeClaimTemplate`. Ignored when `master.persistence.selector` is set. | `""` |
| `master.persistence.selector` | Configure custom selector for existing Persistent Volume. Overwrites `master.persistence.existingVolume` | `{}` |
| `master.persistence.annotations` | Persistent Volume Claim annotations | `{}` |
| `master.persistence.accessModes` | Persistent Volume Access Modes | `["ReadWriteOnce"]` |
| `master.persistence.size` | Persistent Volume Size | `8Gi` |
| `master.serviceAccount.create` | Specifies whether a ServiceAccount should be created | `false` |
| `master.serviceAccount.name` | Name of the service account to use. If not set and create is true, a name is generated using the fullname template. | `""` |
| `master.serviceAccount.automountServiceAccountToken` | Automount service account token for the server service account | `true` |
| `master.serviceAccount.annotations` | Annotations for service account. Evaluated as a template. Only used if `create` is `true`. | `{}` |
| `master.autoscaling.enabled` | Whether enable horizontal pod autoscale | `false` |
| `master.autoscaling.minReplicas` | Configure a minimum amount of pods | `3` |
| `master.autoscaling.maxReplicas` | Configure a maximum amount of pods | `11` |
| `master.autoscaling.targetCPU` | Define the CPU target to trigger the scaling actions (utilization percentage) | `""` |
| `master.autoscaling.targetMemory` | Define the memory target to trigger the scaling actions (utilization percentage) | `""` |
### Data-only nodes parameters
| Name | Description | Value |
| -------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------- |
| `data.replicaCount` | Number of data-only replicas to deploy | `2` |
| `data.nameOverride` | String to partially override elasticsearch.data.fullname | `""` |
| `data.fullnameOverride` | String to fully override elasticsearch.data.fullname | `""` |
| `data.servicenameOverride` | String to fully override elasticsearch.data.servicename | `""` |
| `data.annotations` | Annotations for the data statefulset | `{}` |
| `data.updateStrategy.type` | Data-only nodes statefulset stategy type | `RollingUpdate` |
| `data.resources.limits` | The resources limits for the data containers | `{}` |
| `data.resources.requests` | The requested resources for the data containers | `{}` |
| `data.heapSize` | Elasticsearch data node heap size. | `1024m` |
| `data.podSecurityContext.enabled` | Enabled data pods' Security Context | `true` |
| `data.podSecurityContext.fsGroup` | Set data pod's Security Context fsGroup | `1001` |
| `data.containerSecurityContext.enabled` | Enabled data containers' Security Context | `true` |
| `data.containerSecurityContext.runAsUser` | Set data containers' Security Context runAsUser | `1001` |
| `data.containerSecurityContext.runAsNonRoot` | Set data containers' Security Context runAsNonRoot | `true` |
| `data.hostAliases` | data pods host aliases | `[]` |
| `data.podLabels` | Extra labels for data pods | `{}` |
| `data.podAnnotations` | Annotations for data pods | `{}` |
| `data.podAffinityPreset` | Pod affinity preset. Ignored if `data.affinity` is set. Allowed values: `soft` or `hard` | `""` |
| `data.podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `data.affinity` is set. Allowed values: `soft` or `hard` | `""` |
| `data.nodeAffinityPreset.type` | Node affinity preset type. Ignored if `data.affinity` is set. Allowed values: `soft` or `hard` | `""` |
| `data.nodeAffinityPreset.key` | Node label key to match. Ignored if `data.affinity` is set | `""` |
| `data.nodeAffinityPreset.values` | Node label values to match. Ignored if `data.affinity` is set | `[]` |
| `data.affinity` | Affinity for data pods assignment | `{}` |
| `data.nodeSelector` | Node labels for data pods assignment | `{}` |
| `data.tolerations` | Tolerations for data pods assignment | `[]` |
| `data.priorityClassName` | data pods' priorityClassName | `""` |
| `data.schedulerName` | Name of the k8s scheduler (other than default) for data pods | `""` |
| `data.terminationGracePeriodSeconds` | In seconds, time the given to the Elasticsearch data pod needs to terminate gracefully | `""` |
| `data.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` |
| `data.podManagementPolicy` | podManagementPolicy to manage scaling operation of Elasticsearch data pods | `Parallel` |
| `data.startupProbe.enabled` | Enable/disable the startup probe (data nodes pod) | `false` |
| `data.startupProbe.initialDelaySeconds` | Delay before startup probe is initiated (data nodes pod) | `90` |
| `data.startupProbe.periodSeconds` | How often to perform the probe (data nodes pod) | `10` |
| `data.startupProbe.timeoutSeconds` | When the probe times out (data nodes pod) | `5` |
| `data.startupProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (data nodes pod) | `1` |
| `data.startupProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` |
| `data.livenessProbe.enabled` | Enable/disable the liveness probe (data nodes pod) | `true` |
| `data.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (data nodes pod) | `90` |
| `data.livenessProbe.periodSeconds` | How often to perform the probe (data nodes pod) | `10` |
| `data.livenessProbe.timeoutSeconds` | When the probe times out (data nodes pod) | `5` |
| `data.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (data nodes pod) | `1` |
| `data.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` |
| `data.readinessProbe.enabled` | Enable/disable the readiness probe (data nodes pod) | `true` |
| `data.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated (data nodes pod) | `90` |
| `data.readinessProbe.periodSeconds` | How often to perform the probe (data nodes pod) | `10` |
| `data.readinessProbe.timeoutSeconds` | When the probe times out (data nodes pod) | `5` |
| `data.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (data nodes pod) | `1` |
| `data.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` |
| `data.customStartupProbe` | Override default startup probe | `{}` |
| `data.customLivenessProbe` | Override default liveness probe | `{}` |
| `data.customReadinessProbe` | Override default readiness probe | `{}` |
| `data.command` | Override default container command (useful when using custom images) | `[]` |
| `data.args` | Override default container args (useful when using custom images) | `[]` |
| `data.lifecycleHooks` | for the data container(s) to automate configuration before or after startup | `{}` |
| `data.extraEnvVars` | Array with extra environment variables to add to data nodes | `[]` |
| `data.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for data nodes | `""` |
| `data.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for data nodes | `""` |
| `data.extraVolumes` | Optionally specify extra list of additional volumes for the data pod(s) | `[]` |
| `data.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the data container(s) | `[]` |
| `data.sidecars` | Add additional sidecar containers to the data pod(s) | `[]` |
| `data.initContainers` | Add additional init containers to the data pod(s) | `[]` |
| `data.persistence.enabled` | Enable persistence using a `PersistentVolumeClaim` | `true` |
| `data.persistence.storageClass` | Persistent Volume Storage Class | `""` |
| `data.persistence.existingClaim` | Existing Persistent Volume Claim | `""` |
| `data.persistence.existingVolume` | Existing Persistent Volume for use as volume match label selector to the `volumeClaimTemplate`. Ignored when `data.persistence.selector` is set. | `""` |
| `data.persistence.selector` | Configure custom selector for existing Persistent Volume. Overwrites `data.persistence.existingVolume` | `{}` |
| `data.persistence.annotations` | Persistent Volume Claim annotations | `{}` |
| `data.persistence.accessModes` | Persistent Volume Access Modes | `["ReadWriteOnce"]` |
| `data.persistence.size` | Persistent Volume Size | `8Gi` |
| `data.serviceAccount.create` | Specifies whether a ServiceAccount should be created | `false` |
| `data.serviceAccount.name` | Name of the service account to use. If not set and create is true, a name is generated using the fullname template. | `""` |
| `data.serviceAccount.automountServiceAccountToken` | Automount service account token for the server service account | `true` |
| `data.serviceAccount.annotations` | Annotations for service account. Evaluated as a template. Only used if `create` is `true`. | `{}` |
| `data.autoscaling.enabled` | Whether enable horizontal pod autoscale | `false` |
| `data.autoscaling.minReplicas` | Configure a minimum amount of pods | `3` |
| `data.autoscaling.maxReplicas` | Configure a maximum amount of pods | `11` |
| `data.autoscaling.targetCPU` | Define the CPU target to trigger the scaling actions (utilization percentage) | `""` |
| `data.autoscaling.targetMemory` | Define the memory target to trigger the scaling actions (utilization percentage) | `""` |
### Coordinating-only nodes parameters
| Name | Description | Value |
| ---------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------- | --------------- |
| `coordinating.replicaCount` | Number of coordinating-only replicas to deploy | `2` |
| `coordinating.nameOverride` | String to partially override elasticsearch.coordinating.fullname | `""` |
| `coordinating.fullnameOverride` | String to fully override elasticsearch.coordinating.fullname | `""` |
| `coordinating.servicenameOverride` | String to fully override elasticsearch.coordinating.servicename | `""` |
| `coordinating.annotations` | Annotations for the coordinating-only statefulset | `{}` |
| `coordinating.updateStrategy.type` | Coordinating-only nodes statefulset stategy type | `RollingUpdate` |
| `coordinating.resources.limits` | The resources limits for the coordinating-only containers | `{}` |
| `coordinating.resources.requests` | The requested resources for the coordinating-only containers | `{}` |
| `coordinating.heapSize` | Elasticsearch coordinating node heap size. | `128m` |
| `coordinating.podSecurityContext.enabled` | Enabled coordinating-only pods' Security Context | `true` |
| `coordinating.podSecurityContext.fsGroup` | Set coordinating-only pod's Security Context fsGroup | `1001` |
| `coordinating.containerSecurityContext.enabled` | Enabled coordinating-only containers' Security Context | `true` |
| `coordinating.containerSecurityContext.runAsUser` | Set coordinating-only containers' Security Context runAsUser | `1001` |
| `coordinating.containerSecurityContext.runAsNonRoot` | Set coordinating-only containers' Security Context runAsNonRoot | `true` |
| `coordinating.hostAliases` | coordinating-only pods host aliases | `[]` |
| `coordinating.podLabels` | Extra labels for coordinating-only pods | `{}` |
| `coordinating.podAnnotations` | Annotations for coordinating-only pods | `{}` |
| `coordinating.podAffinityPreset` | Pod affinity preset. Ignored if `coordinating.affinity` is set. Allowed values: `soft` or `hard` | `""` |
| `coordinating.podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `coordinating.affinity` is set. Allowed values: `soft` or `hard` | `""` |
| `coordinating.nodeAffinityPreset.type` | Node affinity preset type. Ignored if `coordinating.affinity` is set. Allowed values: `soft` or `hard` | `""` |
| `coordinating.nodeAffinityPreset.key` | Node label key to match. Ignored if `coordinating.affinity` is set | `""` |
| `coordinating.nodeAffinityPreset.values` | Node label values to match. Ignored if `coordinating.affinity` is set | `[]` |
| `coordinating.affinity` | Affinity for coordinating-only pods assignment | `{}` |
| `coordinating.nodeSelector` | Node labels for coordinating-only pods assignment | `{}` |
| `coordinating.tolerations` | Tolerations for coordinating-only pods assignment | `[]` |
| `coordinating.priorityClassName` | coordinating-only pods' priorityClassName | `""` |
| `coordinating.schedulerName` | Name of the k8s scheduler (other than default) for coordinating-only pods | `""` |
| `coordinating.terminationGracePeriodSeconds` | In seconds, time the given to the Elasticsearch coordinating pod needs to terminate gracefully | `""` |
| `coordinating.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` |
| `coordinating.podManagementPolicy` | podManagementPolicy to manage scaling operation of Elasticsearch coordinating pods | `Parallel` |
| `coordinating.startupProbe.enabled` | Enable/disable the startup probe (coordinating-only nodes pod) | `false` |
| `coordinating.startupProbe.initialDelaySeconds` | Delay before startup probe is initiated (coordinating-only nodes pod) | `90` |
| `coordinating.startupProbe.periodSeconds` | How often to perform the probe (coordinating-only nodes pod) | `10` |
| `coordinating.startupProbe.timeoutSeconds` | When the probe times out (coordinating-only nodes pod) | `5` |
| `coordinating.startupProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (coordinating-only nodes pod) | `1` |
| `coordinating.startupProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` |
| `coordinating.livenessProbe.enabled` | Enable/disable the liveness probe (coordinating-only nodes pod) | `true` |
| `coordinating.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (coordinating-only nodes pod) | `90` |
| `coordinating.livenessProbe.periodSeconds` | How often to perform the probe (coordinating-only nodes pod) | `10` |
| `coordinating.livenessProbe.timeoutSeconds` | When the probe times out (coordinating-only nodes pod) | `5` |
| `coordinating.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (coordinating-only nodes pod) | `1` |
| `coordinating.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` |
| `coordinating.readinessProbe.enabled` | Enable/disable the readiness probe (coordinating-only nodes pod) | `true` |
| `coordinating.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated (coordinating-only nodes pod) | `90` |
| `coordinating.readinessProbe.periodSeconds` | How often to perform the probe (coordinating-only nodes pod) | `10` |
| `coordinating.readinessProbe.timeoutSeconds` | When the probe times out (coordinating-only nodes pod) | `5` |
| `coordinating.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (coordinating-only nodes pod) | `1` |
| `coordinating.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` |
| `coordinating.customStartupProbe` | Override default startup probe | `{}` |
| `coordinating.customLivenessProbe` | Override default liveness probe | `{}` |
| `coordinating.customReadinessProbe` | Override default readiness probe | `{}` |
| `coordinating.command` | Override default container command (useful when using custom images) | `[]` |
| `coordinating.args` | Override default container args (useful when using custom images) | `[]` |
| `coordinating.lifecycleHooks` | for the coordinating-only container(s) to automate configuration before or after startup | `{}` |
| `coordinating.extraEnvVars` | Array with extra environment variables to add to coordinating-only nodes | `[]` |
| `coordinating.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for coordinating-only nodes | `""` |
| `coordinating.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for coordinating-only nodes | `""` |
| `coordinating.extraVolumes` | Optionally specify extra list of additional volumes for the coordinating-only pod(s) | `[]` |
| `coordinating.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the coordinating-only container(s) | `[]` |
| `coordinating.sidecars` | Add additional sidecar containers to the coordinating-only pod(s) | `[]` |
| `coordinating.initContainers` | Add additional init containers to the coordinating-only pod(s) | `[]` |
| `coordinating.serviceAccount.create` | Specifies whether a ServiceAccount should be created | `false` |
| `coordinating.serviceAccount.name` | Name of the service account to use. If not set and create is true, a name is generated using the fullname template. | `""` |
| `coordinating.serviceAccount.automountServiceAccountToken` | Automount service account token for the server service account | `true` |
| `coordinating.serviceAccount.annotations` | Annotations for service account. Evaluated as a template. Only used if `create` is `true`. | `{}` |
| `coordinating.autoscaling.enabled` | Whether enable horizontal pod autoscale | `false` |
| `coordinating.autoscaling.minReplicas` | Configure a minimum amount of pods | `3` |
| `coordinating.autoscaling.maxReplicas` | Configure a maximum amount of pods | `11` |
| `coordinating.autoscaling.targetCPU` | Define the CPU target to trigger the scaling actions (utilization percentage) | `""` |
| `coordinating.autoscaling.targetMemory` | Define the memory target to trigger the scaling actions (utilization percentage) | `""` |
### Ingest-only nodes parameters
| Name | Description | Value |
| ---------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------- | ---------------------------- |
| `ingest.enabled` | Enable ingest nodes | `true` |
| `ingest.replicaCount` | Number of ingest-only replicas to deploy | `2` |
| `ingest.nameOverride` | String to partially override elasticsearch.ingest.fullname | `""` |
| `ingest.fullnameOverride` | String to fully override elasticsearch.ingest.fullname | `""` |
| `ingest.servicenameOverride` | String to fully override ingest.master.servicename | `""` |
| `ingest.annotations` | Annotations for the ingest statefulset | `{}` |
| `ingest.containerPorts.restAPI` | Elasticsearch REST API port | `9200` |
| `ingest.containerPorts.transport` | Elasticsearch Transport port | `9300` |
| `ingest.updateStrategy.type` | Ingest-only nodes statefulset stategy type | `RollingUpdate` |
| `ingest.resources.limits` | The resources limits for the ingest-only containers | `{}` |
| `ingest.resources.requests` | The requested resources for the ingest-only containers | `{}` |
| `ingest.heapSize` | Elasticsearch ingest-only node heap size. | `128m` |
| `ingest.podSecurityContext.enabled` | Enabled ingest-only pods' Security Context | `true` |
| `ingest.podSecurityContext.fsGroup` | Set ingest-only pod's Security Context fsGroup | `1001` |
| `ingest.containerSecurityContext.enabled` | Enabled ingest-only containers' Security Context | `true` |
| `ingest.containerSecurityContext.runAsUser` | Set ingest-only containers' Security Context runAsUser | `1001` |
| `ingest.containerSecurityContext.runAsNonRoot` | Set ingest-only containers' Security Context runAsNonRoot | `true` |
| `ingest.hostAliases` | ingest-only pods host aliases | `[]` |
| `ingest.podLabels` | Extra labels for ingest-only pods | `{}` |
| `ingest.podAnnotations` | Annotations for ingest-only pods | `{}` |
| `ingest.podAffinityPreset` | Pod affinity preset. Ignored if `ingest.affinity` is set. Allowed values: `soft` or `hard` | `""` |
| `ingest.podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `ingest.affinity` is set. Allowed values: `soft` or `hard` | `""` |
| `ingest.nodeAffinityPreset.type` | Node affinity preset type. Ignored if `ingest.affinity` is set. Allowed values: `soft` or `hard` | `""` |
| `ingest.nodeAffinityPreset.key` | Node label key to match. Ignored if `ingest.affinity` is set | `""` |
| `ingest.nodeAffinityPreset.values` | Node label values to match. Ignored if `ingest.affinity` is set | `[]` |
| `ingest.affinity` | Affinity for ingest-only pods assignment | `{}` |
| `ingest.nodeSelector` | Node labels for ingest-only pods assignment | `{}` |
| `ingest.tolerations` | Tolerations for ingest-only pods assignment | `[]` |
| `ingest.priorityClassName` | ingest-only pods' priorityClassName | `""` |
| `ingest.schedulerName` | Name of the k8s scheduler (other than default) for ingest-only pods | `""` |
| `ingest.terminationGracePeriodSeconds` | In seconds, time the given to the Elasticsearch ingest pod needs to terminate gracefully | `""` |
| `ingest.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` |
| `ingest.podManagementPolicy` | podManagementPolicy to manage scaling operation of Elasticsearch ingest pods | `Parallel` |
| `ingest.startupProbe.enabled` | Enable/disable the startup probe (ingest-only nodes pod) | `false` |
| `ingest.startupProbe.initialDelaySeconds` | Delay before startup probe is initiated (ingest-only nodes pod) | `90` |
| `ingest.startupProbe.periodSeconds` | How often to perform the probe (ingest-only nodes pod) | `10` |
| `ingest.startupProbe.timeoutSeconds` | When the probe times out (ingest-only nodes pod) | `5` |
| `ingest.startupProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (ingest-only nodes pod) | `1` |
| `ingest.startupProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` |
| `ingest.livenessProbe.enabled` | Enable/disable the liveness probe (ingest-only nodes pod) | `true` |
| `ingest.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (ingest-only nodes pod) | `90` |
| `ingest.livenessProbe.periodSeconds` | How often to perform the probe (ingest-only nodes pod) | `10` |
| `ingest.livenessProbe.timeoutSeconds` | When the probe times out (ingest-only nodes pod) | `5` |
| `ingest.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (ingest-only nodes pod) | `1` |
| `ingest.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` |
| `ingest.readinessProbe.enabled` | Enable/disable the readiness probe (ingest-only nodes pod) | `true` |
| `ingest.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated (ingest-only nodes pod) | `90` |
| `ingest.readinessProbe.periodSeconds` | How often to perform the probe (ingest-only nodes pod) | `10` |
| `ingest.readinessProbe.timeoutSeconds` | When the probe times out (ingest-only nodes pod) | `5` |
| `ingest.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (ingest-only nodes pod) | `1` |
| `ingest.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` |
| `ingest.customStartupProbe` | Override default startup probe | `{}` |
| `ingest.customLivenessProbe` | Override default liveness probe | `{}` |
| `ingest.customReadinessProbe` | Override default readiness probe | `{}` |
| `ingest.command` | Override default container command (useful when using custom images) | `[]` |
| `ingest.args` | Override default container args (useful when using custom images) | `[]` |
| `ingest.lifecycleHooks` | for the ingest-only container(s) to automate configuration before or after startup | `{}` |
| `ingest.extraEnvVars` | Array with extra environment variables to add to ingest-only nodes | `[]` |
| `ingest.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for ingest-only nodes | `""` |
| `ingest.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for ingest-only nodes | `""` |
| `ingest.extraVolumes` | Optionally specify extra list of additional volumes for the ingest-only pod(s) | `[]` |
| `ingest.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the ingest-only container(s) | `[]` |
| `ingest.sidecars` | Add additional sidecar containers to the ingest-only pod(s) | `[]` |
| `ingest.initContainers` | Add additional init containers to the ingest-only pod(s) | `[]` |
| `ingest.serviceAccount.create` | Specifies whether a ServiceAccount should be created | `false` |
| `ingest.serviceAccount.name` | Name of the service account to use. If not set and create is true, a name is generated using the fullname template. | `""` |
| `ingest.serviceAccount.automountServiceAccountToken` | Automount service account token for the server service account | `true` |
| `ingest.serviceAccount.annotations` | Annotations for service account. Evaluated as a template. Only used if `create` is `true`. | `{}` |
| `ingest.autoscaling.enabled` | Whether enable horizontal pod autoscale | `false` |
| `ingest.autoscaling.minReplicas` | Configure a minimum amount of pods | `3` |
| `ingest.autoscaling.maxReplicas` | Configure a maximum amount of pods | `11` |
| `ingest.autoscaling.targetCPU` | Define the CPU target to trigger the scaling actions (utilization percentage) | `""` |
| `ingest.autoscaling.targetMemory` | Define the memory target to trigger the scaling actions (utilization percentage) | `""` |
| `ingest.service.enabled` | Enable Ingest-only service | `false` |
| `ingest.service.type` | Elasticsearch ingest-only service type | `ClusterIP` |
| `ingest.service.ports.restAPI` | Elasticsearch service REST API port | `9200` |
| `ingest.service.ports.transport` | Elasticsearch service transport port | `9300` |
| `ingest.service.nodePorts.restAPI` | Node port for REST API | `""` |
| `ingest.service.nodePorts.transport` | Node port for REST API | `""` |
| `ingest.service.clusterIP` | Elasticsearch ingest-only service Cluster IP | `""` |
| `ingest.service.loadBalancerIP` | Elasticsearch ingest-only service Load Balancer IP | `""` |
| `ingest.service.loadBalancerSourceRanges` | Elasticsearch ingest-only service Load Balancer sources | `[]` |
| `ingest.service.externalTrafficPolicy` | Elasticsearch ingest-only service external traffic policy | `Cluster` |
| `ingest.service.extraPorts` | Extra ports to expose (normally used with the `sidecar` value) | `[]` |
| `ingest.service.annotations` | Additional custom annotations for Elasticsearch ingest-only service | `{}` |
| `ingest.service.sessionAffinity` | Session Affinity for Kubernetes service, can be "None" or "ClientIP" | `None` |
| `ingest.service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` |
| `ingest.ingress.enabled` | Enable ingress record generation for Elasticsearch | `false` |
| `ingest.ingress.pathType` | Ingress path type | `ImplementationSpecific` |
| `ingest.ingress.apiVersion` | Force Ingress API version (automatically detected if not set) | `""` |
| `ingest.ingress.hostname` | Default host for the ingress record | `elasticsearch-ingest.local` |
| `ingest.ingress.path` | Default path for the ingress record | `/` |
| `ingest.ingress.annotations` | Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations. | `{}` |
| `ingest.ingress.tls` | Enable TLS configuration for the host defined at `ingress.hostname` parameter | `false` |
| `ingest.ingress.selfSigned` | Create a TLS secret for this ingress record using self-signed certificates generated by Helm | `false` |
| `ingest.ingress.ingressClassName` | IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+) | `""` |
| `ingest.ingress.extraHosts` | An array with additional hostname(s) to be covered with the ingress record | `[]` |
| `ingest.ingress.extraPaths` | An array with additional arbitrary paths that may need to be added to the ingress under the main host | `[]` |
| `ingest.ingress.extraTls` | TLS configuration for additional hostname(s) to be covered with this ingress record | `[]` |
| `ingest.ingress.secrets` | Custom TLS certificates as secrets | `[]` |
| `ingest.ingress.extraRules` | Additional rules to be covered with this ingress record | `[]` |
### Metrics parameters
| Name | Description | Value |
| ----------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------ | -------------------------------- |
| `metrics.enabled` | Enable prometheus exporter | `false` |
| `metrics.nameOverride` | Metrics pod name | `""` |
| `metrics.fullnameOverride` | String to fully override common.names.fullname | `""` |
| `metrics.image.registry` | Metrics exporter image registry | `docker.io` |
| `metrics.image.repository` | Metrics exporter image repository | `bitnami/elasticsearch-exporter` |
| `metrics.image.tag` | Metrics exporter image tag | `1.5.0-debian-11-r80` |
| `metrics.image.digest` | Metrics exporter image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
| `metrics.image.pullPolicy` | Metrics exporter image pull policy | `IfNotPresent` |
| `metrics.image.pullSecrets` | Metrics exporter image pull secrets | `[]` |
| `metrics.annotations` | Annotations for metrics | `{}` |
| `metrics.extraArgs` | Extra arguments to add to the default exporter command | `[]` |
| `metrics.hostAliases` | Add deployment host aliases | `[]` |
| `metrics.schedulerName` | Name of the k8s scheduler (other than default) | `""` |
| `metrics.priorityClassName` | Elasticsearch metrics exporter pods' priorityClassName | `""` |
| `metrics.service.type` | Metrics exporter endpoint service type | `ClusterIP` |
| `metrics.service.port` | Metrics exporter endpoint service port | `9114` |
| `metrics.service.annotations` | Provide any additional annotations which may be required. | `{}` |
| `metrics.podAffinityPreset` | Metrics Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` |
| `metrics.podAntiAffinityPreset` | Metrics Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` |
| `metrics.nodeAffinityPreset.type` | Metrics Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` |
| `metrics.nodeAffinityPreset.key` | Metrics Node label key to match Ignored if `affinity` is set. | `""` |
| `metrics.nodeAffinityPreset.values` | Metrics Node label values to match. Ignored if `affinity` is set. | `[]` |
| `metrics.affinity` | Metrics Affinity for pod assignment | `{}` |
| `metrics.nodeSelector` | Metrics Node labels for pod assignment | `{}` |
| `metrics.tolerations` | Metrics Tolerations for pod assignment | `[]` |
| `metrics.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` |
| `metrics.resources.limits` | The resources limits for the container | `{}` |
| `metrics.resources.requests` | The requested resources for the container | `{}` |
| `metrics.livenessProbe.enabled` | Enable/disable the liveness probe (metrics pod) | `true` |
| `metrics.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (metrics pod) | `60` |
| `metrics.livenessProbe.periodSeconds` | How often to perform the probe (metrics pod) | `10` |
| `metrics.livenessProbe.timeoutSeconds` | When the probe times out (metrics pod) | `5` |
| `metrics.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` |
| `metrics.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (metrics pod) | `1` |
| `metrics.readinessProbe.enabled` | Enable/disable the readiness probe (metrics pod) | `true` |
| `metrics.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated (metrics pod) | `5` |
| `metrics.readinessProbe.periodSeconds` | How often to perform the probe (metrics pod) | `10` |
| `metrics.readinessProbe.timeoutSeconds` | When the probe times out (metrics pod) | `1` |
| `metrics.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` |
| `metrics.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (metrics pod) | `1` |
| `metrics.startupProbe.enabled` | Enable/disable the startup probe (metrics pod) | `false` |
| `metrics.startupProbe.initialDelaySeconds` | Delay before startup probe is initiated (metrics pod) | `5` |
| `metrics.startupProbe.periodSeconds` | How often to perform the probe (metrics pod) | `10` |
| `metrics.startupProbe.timeoutSeconds` | When the probe times out (metrics pod) | `1` |
| `metrics.startupProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` |
| `metrics.startupProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (metrics pod) | `1` |
| `metrics.customStartupProbe` | Custom liveness probe for the Web component | `{}` |
| `metrics.customLivenessProbe` | Custom liveness probe for the Web component | `{}` |
| `metrics.customReadinessProbe` | Custom readiness probe for the Web component | `{}` |
| `metrics.podAnnotations` | Metrics exporter pod Annotation and Labels | `{}` |
| `metrics.podLabels` | Extra labels to add to Pod | `{}` |
| `metrics.podSecurityContext.enabled` | Enabled Elasticsearch metrics exporter pods' Security Context | `true` |
| `metrics.podSecurityContext.fsGroup` | Set Elasticsearch metrics exporter pod's Security Context fsGroup | `1001` |
| `metrics.containerSecurityContext.enabled` | Enabled Elasticsearch metrics exporter containers' Security Context | `true` |
| `metrics.containerSecurityContext.runAsUser` | Set Elasticsearch metrics exporter containers' Security Context runAsUser | `1001` |
| `metrics.containerSecurityContext.runAsNonRoot` | Set Elasticsearch metrics exporter container's Security Context runAsNonRoot | `true` |
| `metrics.command` | Override default container command (useful when using custom images) | `[]` |
| `metrics.args` | Override default container args (useful when using custom images) | `[]` |
| `metrics.extraEnvVars` | Array with extra environment variables to add to Elasticsearch metrics exporter nodes | `[]` |
| `metrics.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for Elasticsearch metrics exporter nodes | `""` |
| `metrics.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for Elasticsearch metrics exporter nodes | `""` |
| `metrics.extraVolumes` | Optionally specify extra list of additional volumes for the Elasticsearch metrics exporter pod(s) | `[]` |
| `metrics.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Elasticsearch metrics exporter container(s) | `[]` |
| `metrics.sidecars` | Add additional sidecar containers to the Elasticsearch metrics exporter pod(s) | `[]` |
| `metrics.initContainers` | Add additional init containers to the Elasticsearch metrics exporter pod(s) | `[]` |
| `metrics.serviceMonitor.enabled` | Create ServiceMonitor Resource for scraping metrics using PrometheusOperator | `false` |
| `metrics.serviceMonitor.namespace` | Namespace which Prometheus is running in | `""` |
| `metrics.serviceMonitor.jobLabel` | The name of the label on the target service to use as the job name in prometheus. | `""` |
| `metrics.serviceMonitor.interval` | Interval at which metrics should be scraped | `""` |
| `metrics.serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `""` |
| `metrics.serviceMonitor.relabelings` | RelabelConfigs to apply to samples before scraping | `[]` |
| `metrics.serviceMonitor.metricRelabelings` | MetricRelabelConfigs to apply to samples before ingestion | `[]` |
| `metrics.serviceMonitor.selector` | ServiceMonitor selector labels | `{}` |
| `metrics.serviceMonitor.labels` | Extra labels for the ServiceMonitor | `{}` |
| `metrics.serviceMonitor.honorLabels` | honorLabels chooses the metric's labels on collisions with target labels | `false` |
| `metrics.prometheusRule.enabled` | Creates a Prometheus Operator PrometheusRule (also requires `metrics.enabled` to be `true` and `metrics.prometheusRule.rules`) | `false` |
| `metrics.prometheusRule.namespace` | Namespace for the PrometheusRule Resource (defaults to the Release Namespace) | `""` |
| `metrics.prometheusRule.additionalLabels` | Additional labels that can be used so PrometheusRule will be discovered by Prometheus | `{}` |
| `metrics.prometheusRule.rules` | Prometheus Rule definitions | `[]` |
### Init Container Parameters
| Name | Description | Value |
| -------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------- |
| `volumePermissions.enabled` | Enable init container that changes volume permissions in the data directory (for cases where the default k8s `runAsUser` and `fsUser` values do not work) | `false` |
| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` |
| `volumePermissions.image.repository` | Init container volume-permissions image name | `bitnami/bitnami-shell` |
| `volumePermissions.image.tag` | Init container volume-permissions image tag | `11-debian-11-r97` |
| `volumePermissions.image.digest` | Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `IfNotPresent` |
| `volumePermissions.image.pullSecrets` | Init container volume-permissions image pull secrets | `[]` |
| `volumePermissions.resources.limits` | The resources limits for the container | `{}` |
| `volumePermissions.resources.requests` | The requested resources for the container | `{}` |
| `sysctlImage.enabled` | Enable kernel settings modifier image | `true` |
| `sysctlImage.registry` | Kernel settings modifier image registry | `docker.io` |
| `sysctlImage.repository` | Kernel settings modifier image repository | `bitnami/bitnami-shell` |
| `sysctlImage.tag` | Kernel settings modifier image tag | `11-debian-11-r97` |
| `sysctlImage.digest` | Kernel settings modifier image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
| `sysctlImage.pullPolicy` | Kernel settings modifier image pull policy | `IfNotPresent` |
| `sysctlImage.pullSecrets` | Kernel settings modifier image pull secrets | `[]` |
| `sysctlImage.resources.limits` | The resources limits for the container | `{}` |
| `sysctlImage.resources.requests` | The requested resources for the container | `{}` |
### Kibana Parameters
| Name | Description | Value |
| ---------------------------- | ------------------------------------------------------------------------- | ------------------------------------------------------- |
| `kibana.elasticsearch.hosts` | Array containing hostnames for the ES instances. Used to generate the URL | `[]` |
| `kibana.elasticsearch.port` | Port to connect Kibana and ES instance. Used to generate the URL | `{{ include "elasticsearch.service.ports.restAPI" . }}` |
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
```console
helm install my-release \
--set name=my-elastic,client.service.port=8080 \
my-repo/elasticsearch
```
The above command sets the Elasticsearch cluster name to `my-elastic` and REST port number to `8080`.
Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example,
```console
helm install my-release -f values.yaml my-repo/elasticsearch
```
> **Tip**: You can use the default [values.yaml](values.yaml).
## Configuration and installation details
### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/)
It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image.
Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist.
### Change ElasticSearch version
To modify the ElasticSearch version used in this chart you can specify a [valid image tag](https://hub.docker.com/r/bitnami/elasticsearch/tags/) using the `image.tag` parameter. For example, `image.tag=X.Y.Z`. This approach is also applicable to other images like exporters.
### Default kernel settings
Currently, Elasticsearch requires some changes in the kernel of the host machine to work as expected. If those values are not set in the underlying operating system, the ES containers fail to boot with ERROR messages. More information about these requirements can be found in the links below:
- [File Descriptor requirements](https://www.elastic.co/guide/en/elasticsearch/reference/current/file-descriptors.html)
- [Virtual memory requirements](https://www.elastic.co/guide/en/elasticsearch/reference/current/vm-max-map-count.html)
This chart uses a **privileged** initContainer to change those settings in the Kernel by running: `sysctl -w vm.max_map_count=262144 && sysctl -w fs.file-max=65536`.
You can disable the initContainer using the `sysctlImage.enabled=false` parameter.
### Enable bundled Kibana
This Elasticsearch chart contains Kibana as subchart, you can enable it just setting the `global.kibanaEnabled=true` parameter.
To see the notes with some operational instructions from the Kibana chart, please use the `--render-subchart-notes` as part of your `helm install` command, in this way you can see the Kibana and ES notes in your terminal.
When enabling the bundled kibana subchart, there are a few gotchas that you should be aware of listed below.
#### Elasticsearch rest Encryption
When enabling elasticsearch' rest endpoint encryption you will also need to set `kibana.elasticsearch.security.tls.enabled` to the SAME value along with some additional values shown below for an "out of the box experience":
```yaml
security:
enabled: true
# PASSWORD must be the same value passed to elasticsearch to get an "out of the box" experience
elasticPassword: "<PASSWORD>"
tls:
# AutoGenerate TLS certs for elastic
autoGenerated: true
kibana:
elasticsearch:
security:
auth:
enabled: true
# default in the elasticsearch chart is elastic
kibanaUsername: "<USERNAME>"
kibanaPassword: "<PASSWORD>"
tls:
# Instruct kibana to connect to elastic over https
enabled: true
# Bit of a catch 22, as you will need to know the name upfront of your release
existingSecret: RELEASENAME-elasticsearch-coordinating-crt # or just 'elasticsearch-coordinating-crt' if the release name happens to be 'elasticsearch'
# As the certs are auto-generated, they are pemCerts so set to true
usePemCerts: true
```
At a bare-minimum, when working with kibana and elasticsearch together the following values MUST be the same, otherwise things will fail:
```yaml
security:
tls:
restEncryption: true
# assumes global.kibanaEnabled=true
kibana:
elasticsearch:
security:
tls:
enabled: true
```
### Adding extra environment variables
In case you want to add extra environment variables (useful for advanced operations like custom init scripts), you can use the `extraEnvVars` property.
```yaml
extraEnvVars:
- name: ELASTICSEARCH_VERSION
value: 7.0
```
Alternatively, you can use a ConfigMap or a Secret with the environment variables. To do so, use the `extraEnvVarsCM` or the `extraEnvVarsSecret` values.
### Using custom init scripts
For advanced operations, the Bitnami Elasticsearch charts allows using custom init scripts that will be mounted inside `/docker-entrypoint.init-db`. You can include the file directly in your `values.yaml` with `initScripts`, or use a ConfigMap or a Secret (in case of sensitive data) for mounting these extra scripts. In this case you use the `initScriptsCM` and `initScriptsSecret` values.
```console
initScriptsCM=special-scripts
initScriptsSecret=special-scripts-sensitive
```
### Snapshot and restore operations
As it's described in the [official documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/snapshots-register-repository.html#snapshots-filesystem-repository), it's necessary to register a snapshot repository before you can perform snapshot and restore operations.
This chart allows you to configure Elasticsearch to use a shared file system to store snapshots. To do so, you need to mount a RWX volume on every Elasticsearch node, and set the parameter `snapshotRepoPath` with the path where the volume is mounted. In the example below, you can find the values to set when using a NFS Perstitent Volume:
```yaml
extraVolumes:
- name: snapshot-repository
nfs:
server: nfs.example.com # Please change this to your NFS server
path: /share1
extraVolumeMounts:
- name: snapshot-repository
mountPath: /snapshots
snapshotRepoPath: "/snapshots"
```
### Sidecars and Init Containers
If you have a need for additional containers to run within the same pod as Elasticsearch components (e.g. an additional metrics or logging exporter), you can do so via the `XXX.sidecars` parameter(s), where XXX is placeholder you need to replace with the actual component(s). Simply define your container according to the Kubernetes container spec.
```yaml
sidecars:
- name: your-image-name
image: your-image
imagePullPolicy: Always
ports:
- name: portname
containerPort: 1234
```
Similarly, you can add extra init containers using the `initContainers` parameter.
```yaml
initContainers:
- name: your-image-name
image: your-image
imagePullPolicy: Always
ports:
- name: portname
```
### Setting Pod's affinity
This chart allows you to set your custom affinity using the `XXX.affinity` parameter(s). Find more information about Pod's affinity in the [kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity).
As an alternative, you can use of the preset configurations for pod affinity, pod anti-affinity, and node affinity available at the [bitnami/common](https://github.com/bitnami/charts/tree/main/bitnami/common#affinities) chart. To do so, set the `XXX.podAffinityPreset`, `XXX.podAntiAffinityPreset`, or `XXX.nodeAffinityPreset` parameters.
## Persistence
The [Bitnami Elasticsearch](https://github.com/bitnami/containers/tree/main/bitnami/elasticsearch) image stores the Elasticsearch data at the `/bitnami/elasticsearch/data` path of the container.
By default, the chart mounts a [Persistent Volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) at this location. The volume is created using dynamic volume provisioning. See the [Parameters](#parameters) section to configure the PVC.
### Adjust permissions of persistent volume mountpoint
As the image run as non-root by default, it is necessary to adjust the ownership of the persistent volume so that the container can write data into it.
By default, the chart is configured to use Kubernetes Security Context to automatically change the ownership of the volume. However, this feature does not work in all Kubernetes distributions.
As an alternative, this chart supports using an initContainer to change the ownership of the volume before mounting it in the final destination.
You can enable this initContainer by setting `volumePermissions.enabled` to `true`.
## Troubleshooting
Find more information about how to deal with common errors related to Bitnami's Helm charts in [this troubleshooting guide](https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues).
## Upgrading
### To 19.0.0
The new version of this chart no longer supports elasticsearch-curator, this repository has been deprecated.
### To 18.0.0
This major release refactors the bitnami/elasticsearch chart, adding some organization and functional changes.
- Each role has now the same structure: its Statefulset, a headless service (for FQDN, it gives each node an individual Advertised name, required for TLS verification), its own ServiceAccount (BWC), and HorizontalPodAutoscaling.
- Previously, the chart would alternate between a Coordinating service and an All-nodes service for traffic exposure. This logic has been replaced with a single Traffic exposure service, that will have coordinating-only nodes as backend pods, or master pods if no coordinating nodes are enabled.
- Master-eligible nodes can now be deployed as multi-role nodes using the setting masterOnly. This allows the creation of different topologies, smaller clusters with HA (3 multi-role master-eligible nodes), and single-node deployments.
- Renamed several values to be in line with the rest of the catalog.
This major release also upgrades Elasticsearch to its version 8.x.x and the updates Kibana subchart.
- Upgrade to Elasticsearch 8
- Upgrade Kibana subchart.
In addition, several modifications have been performed adding missing features and renaming values, in order to get aligned with the rest of the assets in the Bitnami charts repository.
The following values have been modified:
- `coordinating.service.*` have been renamed as `service.*`. This service will be backed by coordinating nodes if enabled, or master nodes if not.
- `master.service.*` has been removed.
- `data.service.*` has been removed.
- `master.ingress.*` has been renamed as `ingress.*`. This ingress will be backed by the coordinating/master service previously mentioned.
- In addition, an Ingest-only service and ingress have been added, for use cases where separated ingrestion and search channels are needed.
- `global.coordinating.name` have been renamed as `global.elasticsaerch.service.name`.
- `name` has been renamed as `clusterName`.
- `extraEnvVarsConfigMap` has been renamed as `extraEnvVarsCM`.
- `{master/data/ingest/coordinating}.replicas` has been renamed as `{master/data/ingest/coordinating}.replicaCount`.
- `{master/data/ingest/coordinating}.securityContext` has been separated in two different values: `podSecurityContext` and `containerSecurityContext`.
- `{master/data/ingest/coordinating}.updateStrategy` is now interpreted as an object. `rollingUpdatePartition` has been removed and has to be configured inside the updateStrategy object when needed.
- Default values for `kibana.elasticsearch.hosts` and `kibana.elasticsearch.port` have been modified to use the new helpers.
- `{master/data/ingest/coordinating/curator/metrics}.name` has been renamed as `{master/data/ingest/coordinating/curator}.nameOverride`.
### To 17.0.0
This version bumps in a major the version of the Kibana Helm Chart bundled as dependecy, [here](https://github.com/bitnami/charts/tree/main/bitnami/kibana#to-900) you can see the changes implemented in this Kibana major version.
### To 16.0.0
This version replaces the Ingest and Coordinating Deployments with Statefulsets. This change is required so Coordinating and Ingest nodes have their services associated, required for TLS hostname verification.
We haven't encountered any issues during our upgrade test, but we recommend creating volumes backups before upgrading this major version, especially for users with additional volumes and custom configurations.
Additionally, this version adds support for X-Pack Security features such as TLS/SSL encryption and basic authentication.
### To 15.0.0
From this version onwards, Elasticsearch container components are now licensed under the [Elastic License](https://www.elastic.co/licensing/elastic-license) that is not currently accepted as an Open Source license by the Open Source Initiative (OSI).
Also, from now on, the Helm Chart will include the X-Pack plugin installed by default.
Regular upgrade is compatible from previous versions.
### To 14.0.0
This version standardizes the way of defining Ingress rules in the Kibana subchart. When configuring a single hostname for the Ingress rule, set the `kibana.ingress.hostname` value. When defining more than one, set the `kibana.ingress.extraHosts` array. Apart from this case, no issues are expected to appear when upgrading.
### To 13.0.0
[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL.
#### What changes were introduced in this major version?
- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field.
- Move dependency information from the *requirements.yaml* to the *Chart.yaml*
- After running `helm dependency update`, a *Chart.lock* file is generated containing the same structure used in the previous *requirements.lock*
- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts
#### Considerations when upgrading to this version
- If you want to upgrade to this version from a previous one installed with Helm v3, you shouldn't face any issues
- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version doesn't support Helm v2 anymore
- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3
#### Useful links
- <https://docs.bitnami.com/tutorials/resolve-helm2-helm3-post-migration-issues/>
- <https://helm.sh/docs/topics/v2_v3_migration/>
- <https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/>
### To 12.0.0
Several changes were introduced that breaks backwards compatibility:
- Ports names were prefixed with the protocol to comply with Istio (see <https://istio.io/docs/ops/deployment/requirements/>).
- Labels are adapted to follow the Helm charts best practices.
- Elasticsearch data pods are now deployed in parallel in order to bootstrap the cluster and be discovered.
### To 11.0.0
Elasticsearch master pods are now deployed in parallel in order to bootstrap the cluster and be discovered.
The field `podManagementPolicy` can't be updated in a StatefulSet, so you need to destroy it before you upgrade the chart to this version.
```console
kubectl delete statefulset elasticsearch-master
helm upgrade <DEPLOYMENT_NAME> my-repo/elasticsearch
```
### TO 10.0.0
In this version, Kibana was added as dependent chart. More info about how to enable and work with this bundled Kibana in the ["Enable bundled Kibana"](#enable-bundled-kibana) section.
### To 9.0.0
Elasticsearch master nodes store the cluster status at `/bitnami/elasticsearch/data`. Among other things this includes the UUID of the elasticsearch cluster. Without a persistent data store for this data, the UUID of a cluster could change if k8s node(s) hosting the es master nodes go down and are scheduled on some other master node. In the event that this happens, the data nodes will no longer be able to join a cluster as the uuid changed resulting in a broken cluster.
To resolve such issues, PVC's are now attached for master node data persistence.
---
Helm performs a lookup for the object based on its group (apps), version (v1), and kind (Deployment). Also known as its GroupVersionKind, or GVK. Changing the GVK is considered a compatibility breaker from Kubernetes' point of view, so you cannot "upgrade" those objects to the new GVK in-place. Earlier versions of Helm 3 did not perform the lookup correctly which has since been fixed to match the spec.
In [4dfac075aacf74405e31ae5b27df4369e84eb0b0](https://github.com/bitnami/charts/commit/4dfac075aacf74405e31ae5b27df4369e84eb0b0) the `apiVersion` of the deployment resources was updated to `apps/v1` in tune with the api's deprecated, resulting in compatibility breakage.
### To 7.4.0
This version also introduces `bitnami/common`, a [library chart](https://helm.sh/docs/topics/library_charts/#helm) as a dependency. More documentation about this new utility could be found [here](https://github.com/bitnami/charts/tree/main/bitnami/common#bitnami-common-library-chart). Please, make sure that you have updated the chart dependencies before executing any upgrade.
### To 7.0.0
This version enabled by default the initContainer that modify some kernel settings to meet the Elasticsearch requirements. More info in the ["Default kernel settings"](#default-kernel-settings) section.
You can disable the initContainer using the `sysctlImage.enabled=false` parameter.
### To 3.0.0
Backwards compatibility is not guaranteed unless you modify the labels used on the chart's deployments.
Use the workaround below to upgrade from versions previous to 3.0.0. The following example assumes that the release name is elasticsearch:
```console
kubectl patch deployment elasticsearch-coordinating --type=json -p='[{"op": "remove", "path": "/spec/selector/matchLabels/chart"}]'
kubectl patch deployment elasticsearch-ingest --type=json -p='[{"op": "remove", "path": "/spec/selector/matchLabels/chart"}]'
kubectl patch deployment elasticsearch-master --type=json -p='[{"op": "remove", "path": "/spec/selector/matchLabels/chart"}]'
kubectl patch deployment elasticsearch-metrics --type=json -p='[{"op": "remove", "path": "/spec/selector/matchLabels/chart"}]'
kubectl delete statefulset elasticsearch-data --cascade=false
```
## License
Copyright &copy; 2023 Bitnami
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
<http://www.apache.org/licenses/LICENSE-2.0>
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -0,0 +1,121 @@
CHART NAME: {{ .Chart.Name }}
CHART VERSION: {{ .Chart.Version }}
APP VERSION: {{ .Chart.AppVersion }}
{{- if contains .Values.service.type "LoadBalancer" }}
-------------------------------------------------------------------------------
WARNING
By specifying "service.type=LoadBalancer" you have most likely
exposed the Elasticsearch service externally.
Please note that Elasticsearch does not implement a authentication
mechanism to secure your cluster. For security reasons, we strongly
suggest that you switch to "ClusterIP" or "NodePort".
-------------------------------------------------------------------------------
{{- end }}
{{- if not .Values.sysctlImage.enabled }}
-------------------------------------------------------------------------------
WARNING
Elasticsearch requires some changes in the kernel of the host machine to
work as expected. If those values are not set in the underlying operating
system, the ES containers fail to boot with ERROR messages.
To check whether the host machine meets the requirements, run the command
below:
kubectl logs --namespace {{ include "common.names.namespace" . }} $(kubectl get --namespace {{ include "common.names.namespace" . }} \
pods -l app={{ template "common.names.name" . }},role=master -o jsonpath='{.items[0].metadata.name}') \
elasticsearch
You can adapt the Kernel parameters on you cluster as described in the
official documentation:
https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster
As an alternative, you can specify "sysctlImage.enabled=true" to use a
privileged initContainer to change those settings in the Kernel:
helm upgrade --namespace {{ include "common.names.namespace" . }} {{ .Release.Name }} my-repo/elasticsearch --set sysctlImage.enabled=true
Note that this requires the ability to run privileged containers, which is likely not
the case on many secure clusters. To cover this use case, you can also set some parameters
in the config file to customize the default settings:
https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules-store.html
https://www.elastic.co/guide/en/cloud-on-k8s/master/k8s-virtual-memory.html
For that, you can place the desired parameters by using the "config" block present in the values.yaml
{{- else if .Values.sysctlImage.enabled }}
-------------------------------------------------------------------------------
WARNING
Elasticsearch requires some changes in the kernel of the host machine to
work as expected. If those values are not set in the underlying operating
system, the ES containers fail to boot with ERROR messages.
More information about these requirements can be found in the links below:
https://www.elastic.co/guide/en/elasticsearch/reference/current/file-descriptors.html
https://www.elastic.co/guide/en/elasticsearch/reference/current/vm-max-map-count.html
This chart uses a privileged initContainer to change those settings in the Kernel
by running: sysctl -w vm.max_map_count=262144 && sysctl -w fs.file-max=65536
{{- end }}
** Please be patient while the chart is being deployed **
{{- if .Values.diagnosticMode.enabled }}
The chart has been deployed in diagnostic mode. All probes have been disabled and the command has been overwritten with:
command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 4 }}
args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 4 }}
Get the list of pods by executing:
kubectl get pods --namespace {{ include "common.names.namespace" . }} -l app.kubernetes.io/instance={{ .Release.Name }}
Access the pod you want to debug by executing
kubectl exec --namespace {{ include "common.names.namespace" . }} -ti <NAME OF THE POD> -- bash
In order to replicate the container startup scripts execute this command:
/opt/bitnami/scripts/elasticsearch/entrypoint.sh /opt/bitnami/scripts/elasticsearch/run.sh
{{- else }}
Elasticsearch can be accessed within the cluster on port {{ include "elasticsearch.service.ports.restAPI" . }} at {{ template "elasticsearch.service.name" . }}.{{ include "common.names.namespace" . }}.svc.{{ .Values.clusterDomain }}
To access from outside the cluster execute the following commands:
{{- if contains "NodePort" .Values.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ include "common.names.namespace" . }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "elasticsearch.service.name" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ include "common.names.namespace" . }} -o jsonpath="{.items[0].status.addresses[0].address}")
curl http://$NODE_IP:$NODE_PORT/
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
Watch the status with: 'kubectl get svc --namespace {{ include "common.names.namespace" . }} -w {{ template "elasticsearch.service.name" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ include "common.names.namespace" . }} {{ template "elasticsearch.service.name" . }} --template "{{ "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}" }}")
curl http://$SERVICE_IP:{{ include "elasticsearch.service.ports.restAPI" . }}/
{{- else if contains "ClusterIP" .Values.service.type }}
kubectl port-forward --namespace {{ include "common.names.namespace" . }} svc/{{ template "elasticsearch.service.name" . }} {{ include "elasticsearch.service.ports.restAPI" . }}:9200 &
curl http://127.0.0.1:9200/
{{- end }}
{{- include "common.warnings.rollingTag" .Values.image }}
{{- include "common.warnings.rollingTag" .Values.volumePermissions.image }}
{{- include "common.warnings.rollingTag" .Values.sysctlImage }}
{{- end }}
{{ include "elasticsearch.validateValues" . }}

View File

@ -0,0 +1,585 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Return the proper ES image name
*/}}
{{- define "elasticsearch.image" -}}
{{ include "common.images.image" (dict "imageRoot" .Values.image "global" .Values.global) }}
{{- end -}}
{{/*
Return the proper Docker Image Registry Secret Names
*/}}
{{- define "elasticsearch.imagePullSecrets" -}}
{{ include "common.images.pullSecrets" (dict "images" (list .Values.image .Values.metrics.image .Values.sysctlImage .Values.volumePermissions.image) "global" .Values.global) }}
{{- end -}}
{{/*
Return the proper ES exporter image name
*/}}
{{- define "elasticsearch.metrics.image" -}}
{{ include "common.images.image" (dict "imageRoot" .Values.metrics.image "global" .Values.global) }}
{{- end -}}
{{/*
Return the proper sysctl image name
*/}}
{{- define "elasticsearch.sysctl.image" -}}
{{ include "common.images.image" (dict "imageRoot" .Values.sysctlImage "global" .Values.global) }}
{{- end -}}
{{/*
Return the proper image name (for the init container volume-permissions image)
*/}}
{{- define "elasticsearch.volumePermissions.image" -}}
{{ include "common.images.image" (dict "imageRoot" .Values.volumePermissions.image "global" .Values.global) }}
{{- end -}}
{{/*
Name for the Elasticsearch service
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
Required for the Kibana subchart to find Elasticsearch service.
*/}}
{{- define "elasticsearch.service.name" -}}
{{- if .Values.global.kibanaEnabled -}}
{{- $name := .Values.global.elasticsearch.service.name -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- else -}}
{{- printf "%s" ( include "common.names.fullname" . ) | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{/*
Port number for the Elasticsearch service REST API port
Required for the Kibana subchart to find Elasticsearch service.
*/}}
{{- define "elasticsearch.service.ports.restAPI" -}}
{{- if .Values.global.kibanaEnabled -}}
{{- printf "%d" (int .Values.global.elasticsearch.service.ports.restAPI) -}}
{{- else -}}
{{- printf "%d" (int .Values.service.ports.restAPI) -}}
{{- end -}}
{{- end -}}
{{/*
Create a default fully qualified master name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "elasticsearch.master.fullname" -}}
{{- $name := default "master" .Values.master.nameOverride -}}
{{- if .Values.master.fullnameOverride -}}
{{- .Values.master.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" (include "common.names.fullname" .) $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{/*
Create a default master service name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "elasticsearch.master.servicename" -}}
{{- if .Values.master.servicenameOverride -}}
{{- .Values.master.servicenameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-hl" (include "elasticsearch.master.fullname" .) | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{/*
Create a default fully qualified coordinating name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "elasticsearch.coordinating.fullname" -}}
{{- $name := default "coordinating" .Values.coordinating.nameOverride -}}
{{- if .Values.coordinating.fullnameOverride -}}
{{- .Values.coordinating.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" (include "common.names.fullname" .) $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{/*
Create a default coordinating service name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "elasticsearch.coordinating.servicename" -}}
{{- if .Values.coordinating.servicenameOverride -}}
{{- .Values.coordinating.servicenameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-hl" (include "elasticsearch.coordinating.fullname" .) | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{/*
Create a default fully qualified data name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "elasticsearch.data.fullname" -}}
{{- $name := default "data" .Values.data.nameOverride -}}
{{- if .Values.data.fullnameOverride -}}
{{- .Values.data.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" (include "common.names.fullname" .) $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{/*
Create a default data service name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "elasticsearch.data.servicename" -}}
{{- if .Values.data.servicenameOverride -}}
{{- .Values.data.servicenameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-hl" (include "elasticsearch.data.fullname" .) | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{/*
Create a default fully qualified ingest name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "elasticsearch.ingest.fullname" -}}
{{- $name := default "ingest" .Values.ingest.nameOverride -}}
{{- if .Values.ingest.fullnameOverride -}}
{{- .Values.ingest.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" (include "common.names.fullname" .) $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{/*
Create a default ingest service name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "elasticsearch.ingest.servicename" -}}
{{- if .Values.ingest.servicenameOverride -}}
{{- .Values.ingest.servicenameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-hl" (include "elasticsearch.ingest.fullname" .) | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{/*
Create a default fully qualified metrics name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "elasticsearch.metrics.fullname" -}}
{{- $name := default "metrics" .Values.metrics.nameOverride -}}
{{- if .Values.metrics.fullnameOverride -}}
{{- .Values.metrics.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" (include "common.names.fullname" .) $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{/*
Returns true if at least one master-elegible node replica has been configured.
*/}}
{{- define "elasticsearch.master.enabled" -}}
{{- if or .Values.master.autoscaling.enabled (gt (int .Values.master.replicaCount) 0) -}}
{{- true -}}
{{- end -}}
{{- end -}}
{{/*
Returns true if at least one coordinating-only node replica has been configured.
*/}}
{{- define "elasticsearch.coordinating.enabled" -}}
{{- if or .Values.coordinating.autoscaling.enabled (gt (int .Values.coordinating.replicaCount) 0) -}}
{{- true -}}
{{- end -}}
{{- end -}}
{{/*
Returns true if at least one data-only node replica has been configured.
*/}}
{{- define "elasticsearch.data.enabled" -}}
{{- if or .Values.data.autoscaling.enabled (gt (int .Values.data.replicaCount) 0) -}}
{{- true -}}
{{- end -}}
{{- end -}}
{{/*
Returns true if at least one ingest-only node replica has been configured.
*/}}
{{- define "elasticsearch.ingest.enabled" -}}
{{- if and .Values.ingest.enabled (or .Values.ingest.autoscaling.enabled (gt (int .Values.ingest.replicaCount) 0)) -}}
{{- true -}}
{{- end -}}
{{- end -}}
{{/*
Return the hostname of every ElasticSearch seed node
*/}}
{{- define "elasticsearch.hosts" -}}
{{- $clusterDomain := .Values.clusterDomain }}
{{- $releaseNamespace := include "common.names.namespace" . }}
{{- if (include "elasticsearch.master.enabled" .) -}}
{{- $masterFullname := include "elasticsearch.master.servicename" .}}
{{- $masterFullname }}.{{ $releaseNamespace }}.svc.{{ $clusterDomain }},
{{- end -}}
{{- if (include "elasticsearch.coordinating.enabled" .) -}}
{{- $coordinatingFullname := include "elasticsearch.coordinating.servicename" .}}
{{- $coordinatingFullname }}.{{ $releaseNamespace }}.svc.{{ $clusterDomain }},
{{- end -}}
{{- if (include "elasticsearch.data.enabled" .) -}}
{{- $dataFullname := include "elasticsearch.data.servicename" .}}
{{- $dataFullname }}.{{ $releaseNamespace }}.svc.{{ $clusterDomain }},
{{- end -}}
{{- if (include "elasticsearch.ingest.enabled" .) -}}
{{- $ingestFullname := include "elasticsearch.ingest.servicename" .}}
{{- $ingestFullname }}.{{ $releaseNamespace }}.svc.{{ $clusterDomain }},
{{- end -}}
{{- range .Values.extraHosts }}
{{- . }},
{{- end }}
{{- end -}}
{{/*
Get the initialization scripts volume name.
*/}}
{{- define "elasticsearch.initScripts" -}}
{{- printf "%s-init-scripts" (include "common.names.fullname" .) -}}
{{- end -}}
{{/*
Get the initialization scripts ConfigMap name.
*/}}
{{- define "elasticsearch.initScriptsCM" -}}
{{- printf "%s" .Values.initScriptsCM -}}
{{- end -}}
{{/*
Get the initialization scripts Secret name.
*/}}
{{- define "elasticsearch.initScriptsSecret" -}}
{{- printf "%s" .Values.initScriptsSecret -}}
{{- end -}}
{{/*
Create the name of the master service account to use
*/}}
{{- define "elasticsearch.master.serviceAccountName" -}}
{{- if .Values.master.serviceAccount.create -}}
{{ default (include "elasticsearch.master.fullname" .) .Values.master.serviceAccount.name }}
{{- else -}}
{{ default "default" .Values.master.serviceAccount.name }}
{{- end -}}
{{- end -}}
{{/*
Create the name of the coordinating-only service account to use
*/}}
{{- define "elasticsearch.coordinating.serviceAccountName" -}}
{{- if .Values.coordinating.serviceAccount.create -}}
{{ default (include "elasticsearch.coordinating.fullname" .) .Values.coordinating.serviceAccount.name }}
{{- else -}}
{{ default "default" .Values.coordinating.serviceAccount.name }}
{{- end -}}
{{- end -}}
{{/*
Create the name of the data service account to use
*/}}
{{- define "elasticsearch.data.serviceAccountName" -}}
{{- if .Values.data.serviceAccount.create -}}
{{ default (include "elasticsearch.data.fullname" .) .Values.data.serviceAccount.name }}
{{- else -}}
{{ default "default" .Values.data.serviceAccount.name }}
{{- end -}}
{{- end -}}
{{/*
Create the name of the ingest service account to use
*/}}
{{- define "elasticsearch.ingest.serviceAccountName" -}}
{{- if .Values.ingest.serviceAccount.create -}}
{{ default (include "elasticsearch.ingest.fullname" .) .Values.ingest.serviceAccount.name }}
{{- else -}}
{{ default "default" .Values.ingest.serviceAccount.name }}
{{- end -}}
{{- end -}}
{{/*
Return the elasticsearch TLS credentials secret for master nodes.
*/}}
{{- define "elasticsearch.master.tlsSecretName" -}}
{{- $secretName := .Values.security.tls.master.existingSecret -}}
{{- if $secretName -}}
{{- printf "%s" (tpl $secretName $) -}}
{{- else -}}
{{- printf "%s-crt" (include "elasticsearch.master.fullname" .) -}}
{{- end -}}
{{- end -}}
{{/*
Return the elasticsearch TLS credentials secret for data nodes.
*/}}
{{- define "elasticsearch.data.tlsSecretName" -}}
{{- $secretName := .Values.security.tls.data.existingSecret -}}
{{- if $secretName -}}
{{- printf "%s" (tpl $secretName $) -}}
{{- else -}}
{{- printf "%s-crt" (include "elasticsearch.data.fullname" .) -}}
{{- end -}}
{{- end -}}
{{/*
Return the elasticsearch TLS credentials secret for ingest nodes.
*/}}
{{- define "elasticsearch.ingest.tlsSecretName" -}}
{{- $secretName := .Values.security.tls.ingest.existingSecret -}}
{{- if $secretName -}}
{{- printf "%s" (tpl $secretName $) -}}
{{- else -}}
{{- printf "%s-crt" (include "elasticsearch.ingest.fullname" .) -}}
{{- end -}}
{{- end -}}
{{/*
Return the elasticsearch TLS credentials secret for coordinating-only nodes.
*/}}
{{- define "elasticsearch.coordinating.tlsSecretName" -}}
{{- $secretName := .Values.security.tls.coordinating.existingSecret -}}
{{- if $secretName -}}
{{- printf "%s" (tpl $secretName $) -}}
{{- else -}}
{{- printf "%s-crt" (include "elasticsearch.coordinating.fullname" .) -}}
{{- end -}}
{{- end -}}
{{/*
Return true if a TLS credentials secret object should be created
*/}}
{{- define "elasticsearch.createTlsSecret" -}}
{{- if and .Values.security.enabled .Values.security.tls.autoGenerated (not (include "elasticsearch.security.tlsSecretsProvided" .)) }}
{{- true -}}
{{- end -}}
{{- end -}}
{{/*
Return true if an authentication credentials secret object should be created
*/}}
{{- define "elasticsearch.createSecret" -}}
{{- if and .Values.security.enabled (not .Values.security.existingSecret) }}
{{- true -}}
{{- end -}}
{{- end -}}
{{/*
Return the Elasticsearch authentication credentials secret name
*/}}
{{- define "elasticsearch.secretName" -}}
{{- default (include "common.names.fullname" .) .Values.security.existingSecret -}}
{{- end -}}
{{/*
Return true if a TLS password secret object should be created
*/}}
{{- define "elasticsearch.createTlsPasswordsSecret" -}}
{{- if and .Values.security.enabled (not .Values.security.tls.passwordsSecret) (or .Values.security.tls.keystorePassword .Values.security.tls.truststorePassword .Values.security.tls.keyPassword ) }}
{{- true -}}
{{- end -}}
{{- end -}}
{{/*
Return the Elasticsearch TLS password secret name
*/}}
{{- define "elasticsearch.tlsPasswordsSecret" -}}
{{- default (printf "%s-tls-pass" (include "common.names.fullname" .)) .Values.security.tls.passwordsSecret -}}
{{- end -}}
{{/*
Returns the name of the secret key containing the Keystore password
*/}}
{{- define "elasticsearch.keystorePasswordKey" -}}
{{- if .Values.security.tls.secretKeystoreKey -}}
{{- printf "%s" .Values.security.tls.secretKeystoreKey -}}
{{- else -}}
{{- print "keystore-password"}}
{{- end -}}
{{- end -}}
{{/*
Returns the name of the secret key containing the Truststore password
*/}}
{{- define "elasticsearch.truststorePasswordKey" -}}
{{- if .Values.security.tls.secretTruststoreKey -}}
{{- printf "%s" .Values.security.tls.secretTruststoreKey -}}
{{- else -}}
{{- print "truststore-password"}}
{{- end -}}
{{- end -}}
{{/*
Returns the name of the secret key containing the PEM key password
*/}}
{{- define "elasticsearch.keyPasswordKey" -}}
{{- if .Values.security.tls.secretKey -}}
{{- printf "%s" .Values.security.tls.secretKey -}}
{{- else -}}
{{- print "key-password"}}
{{- end -}}
{{- end -}}
{{/*
Add environment variables to configure database values
*/}}
{{- define "elasticsearch.configure.security" -}}
- name: ELASTICSEARCH_ENABLE_SECURITY
value: "true"
- name: ELASTICSEARCH_PASSWORD
valueFrom:
secretKeyRef:
name: {{ include "elasticsearch.secretName" . }}
key: elasticsearch-password
- name: ELASTICSEARCH_ENABLE_FIPS_MODE
value: {{ .Values.security.fipsMode | quote }}
- name: ELASTICSEARCH_TLS_VERIFICATION_MODE
value: {{ .Values.security.tls.verificationMode | quote }}
- name: ELASTICSEARCH_ENABLE_REST_TLS
value: {{ ternary "true" "false" .Values.security.tls.restEncryption | quote }}
{{- if or (include "elasticsearch.createTlsSecret" .) .Values.security.tls.usePemCerts }}
- name: ELASTICSEARCH_TLS_USE_PEM
value: "true"
{{- else }}
- name: ELASTICSEARCH_KEYSTORE_LOCATION
value: "/opt/bitnami/elasticsearch/config/certs/{{ .Values.security.tls.keystoreFilename }}"
- name: ELASTICSEARCH_TRUSTSTORE_LOCATION
value: "/opt/bitnami/elasticsearch/config/certs/{{ .Values.security.tls.truststoreFilename }}"
{{- end }}
{{- if and (not .Values.security.tls.usePemCerts) (or .Values.security.tls.keystorePassword .Values.security.tls.passwordsSecret) }}
- name: ELASTICSEARCH_KEYSTORE_PASSWORD
valueFrom:
secretKeyRef:
name: {{ include "elasticsearch.tlsPasswordsSecret" . }}
key: {{ include "elasticsearch.keystorePasswordKey" . | quote }}
{{- end }}
{{- if and (not .Values.security.tls.usePemCerts) (or .Values.security.tls.truststorePassword .Values.security.tls.passwordsSecret) }}
- name: ELASTICSEARCH_TRUSTSTORE_PASSWORD
valueFrom:
secretKeyRef:
name: {{ include "elasticsearch.tlsPasswordsSecret" . }}
key: {{ include "elasticsearch.truststorePasswordKey" . | quote }}
{{- end }}
{{- if and .Values.security.tls.usePemCerts (or .Values.security.tls.keyPassword .Values.security.tls.passwordsSecret) }}
- name: ELASTICSEARCH_KEY_PASSWORD
valueFrom:
secretKeyRef:
name: {{ include "elasticsearch.tlsPasswordsSecret" . }}
key: {{ include "elasticsearch.keyPasswordKey" . | quote }}
{{- end }}
{{- end -}}
{{/*
Returns true if at least 1 existing secret was provided
*/}}
{{- define "elasticsearch.security.tlsSecretsProvided" -}}
{{- $masterSecret := (and (include "elasticsearch.master.enabled" .) .Values.security.tls.master.existingSecret) -}}
{{- $coordinatingSecret := (and (include "elasticsearch.coordinating.enabled" .) .Values.security.tls.coordinating.existingSecret) -}}
{{- $dataSecret := (and (include "elasticsearch.data.enabled" .) .Values.security.tls.data.existingSecret) -}}
{{- $ingestSecret := (and (include "elasticsearch.ingest.enabled" .) .Values.security.tls.ingest.existingSecret) -}}
{{- if or $masterSecret $coordinatingSecret $dataSecret $ingestSecret }}
{{- true -}}
{{- end -}}
{{- end -}}
{{/* Validate values of Elasticsearch - Existing secret not provided for master nodes */}}
{{- define "elasticsearch.validateValues.security.missingTlsSecrets.master" -}}
{{- $masterSecret := (and (include "elasticsearch.master.enabled" .) (not .Values.security.tls.master.existingSecret)) -}}
{{- if and .Values.security.enabled (include "elasticsearch.security.tlsSecretsProvided" .) $masterSecret -}}
elasticsearch: security.tls.master.existingSecret
Missing secret containing the TLS certificates for the Elasticsearch master nodes.
Provide the certificates using --set .Values.security.tls.master.existingSecret="my-secret".
{{- end -}}
{{- end -}}
{{/* Validate values of Elasticsearch - Existing secret not provided for coordinating-only nodes */}}
{{- define "elasticsearch.validateValues.security.missingTlsSecrets.coordinating" -}}
{{- $coordinatingSecret := (and (include "elasticsearch.coordinating.enabled" .) (not .Values.security.tls.coordinating.existingSecret)) -}}
{{- if and .Values.security.enabled (include "elasticsearch.security.tlsSecretsProvided" .) $coordinatingSecret -}}
elasticsearch: security.tls.coordinating.existingSecret
Missing secret containing the TLS certificates for the Elasticsearch coordinating-only nodes.
Provide the certificates using --set .Values.security.tls.coordinating.existingSecret="my-secret".
{{- end -}}
{{- end -}}
{{/* Validate values of Elasticsearch - Existing secret not provided for data nodes */}}
{{- define "elasticsearch.validateValues.security.missingTlsSecrets.data" -}}
{{- $dataSecret := (and (include "elasticsearch.data.enabled" .) (not .Values.security.tls.data.existingSecret)) -}}
{{- if and .Values.security.enabled (include "elasticsearch.security.tlsSecretsProvided" .) $dataSecret -}}
elasticsearch: security.tls.data.existingSecret
Missing secret containing the TLS certificates for the Elasticsearch data nodes.
Provide the certificates using --set .Values.security.tls.data.existingSecret="my-secret".
{{- end -}}
{{- end -}}
{{/* Validate values of Elasticsearch - Existing secret not provided for ingest nodes */}}
{{- define "elasticsearch.validateValues.security.missingTlsSecrets.ingest" -}}
{{- $ingestSecret := (and (include "elasticsearch.ingest.enabled" .) (not .Values.security.tls.ingest.existingSecret)) -}}
{{- if and .Values.security.enabled (include "elasticsearch.security.tlsSecretsProvided" .) $ingestSecret -}}
elasticsearch: security.tls.ingest.existingSecret
Missing secret containing the TLS certificates for the Elasticsearch ingest nodes.
Provide the certificates using --set .Values.security.tls.ingest.existingSecret="my-secret".
{{- end -}}
{{- end -}}
{{/* Validate values of Elasticsearch - TLS enabled but no certificates provided */}}
{{- define "elasticsearch.validateValues.security.tls" -}}
{{- if and .Values.security.enabled (not .Values.security.tls.autoGenerated) (not (include "elasticsearch.security.tlsSecretsProvided" .)) -}}
elasticsearch: security.tls
In order to enable X-Pack Security, it is necessary to configure TLS.
Three different mechanisms can be used:
- Provide an existing secret containing the Keystore and Truststore for each role
- Provide an existing secret containing the PEM certificates for each role and enable `security.tls.usePemCerts=true`
- Enable using auto-generated certificates with `security.tls.autoGenerated=true`
Existing secrets containing either JKS/PKCS12 or PEM certificates can be provided using --set Values.security.tls.master.existingSecret=master-certs,
--set Values.security.tls.data.existingSecret=data-certs, --set Values.security.tls.coordinating.existingSecret=coordinating-certs, --set Values.security.tls.ingest.existingSecret=ingest-certs
{{- end -}}
{{- end -}}
{{/* Validate at least Elasticsearch one master node is configured */}}
{{- define "elasticsearch.validateValues.master.replicas" -}}
{{- if not (include "elasticsearch.master.enabled" .) -}}
elasticsearch: master.replicas
Elasticsearch needs at least one master-elegible node to form a cluster.
{{- end -}}
{{- end -}}
{{/*
Compile all warnings into a single message, and call fail.
*/}}
{{- define "elasticsearch.validateValues" -}}
{{- $messages := list -}}
{{- $messages := append $messages (include "elasticsearch.validateValues.master.replicas" .) -}}
{{- $messages := append $messages (include "elasticsearch.validateValues.security.tls" .) -}}
{{- $messages := append $messages (include "elasticsearch.validateValues.security.missingTlsSecrets.master" .) -}}
{{- $messages := append $messages (include "elasticsearch.validateValues.security.missingTlsSecrets.data" .) -}}
{{- $messages := append $messages (include "elasticsearch.validateValues.security.missingTlsSecrets.coordinating" .) -}}
{{- $messages := append $messages (include "elasticsearch.validateValues.security.missingTlsSecrets.ingest" .) -}}
{{- $messages := without $messages "" -}}
{{- $message := join "\n" $messages -}}
{{- if $message -}}
{{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}}
{{- end -}}
{{- end -}}
{{/*
Sysctl set if less then
*/}}
{{- define "elasticsearch.sysctlIfLess" -}}
CURRENT=`sysctl -n {{ .key }}`;
DESIRED="{{ .value }}";
if [ "$DESIRED" -gt "$CURRENT" ]; then
sysctl -w {{ .key }}={{ .value }};
fi;
{{- end -}}

View File

@ -0,0 +1,23 @@
{{- if or .Values.config .Values.extraConfig }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "common.names.fullname" . }}
namespace: {{ include "common.names.namespace" . | quote }}
labels: {{- include "common.labels.standard" . | nindent 4 }}
{{- if .Values.commonLabels }}
{{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
{{- end }}
{{- if .Values.commonAnnotations }}
annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
{{- end }}
data:
{{- if .Values.config }}
elasticsearch.yml: |-
{{- include "common.tplvalues.render" ( dict "value" .Values.config "context" $ ) | nindent 4 }}
{{- end }}
{{- if .Values.extraConfig }}
my_elasticsearch.yml: |-
{{- include "common.tplvalues.render" ( dict "value" .Values.extraConfig "context" $ ) | nindent 4 }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,47 @@
{{- if and (include "elasticsearch.coordinating.enabled" .) .Values.coordinating.autoscaling.enabled }}
apiVersion: {{ include "common.capabilities.hpa.apiVersion" ( dict "context" $ ) }}
kind: HorizontalPodAutoscaler
metadata:
name: {{ include "elasticsearch.coordinating.fullname" . }}
namespace: {{ include "common.names.namespace" . | quote }}
labels: {{- include "common.labels.standard" . | nindent 4 }}
app.kubernetes.io/component: coordinating-only
{{- if .Values.commonLabels }}
{{- include "common.tplvalues.render" (dict "value" .Values.commonLabels "context" $) | nindent 4 }}
{{- end }}
{{- if .Values.commonAnnotations }}
annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
{{- end }}
spec:
scaleTargetRef:
apiVersion: {{ template "common.capabilities.statefulset.apiVersion" . }}
kind: StatefulSet
name: {{ include "elasticsearch.coordinating.fullname" . }}
minReplicas: {{ .Values.coordinating.autoscaling.minReplicas }}
maxReplicas: {{ .Values.coordinating.autoscaling.maxReplicas }}
metrics:
{{- if .Values.coordinating.autoscaling.targetCPU }}
- type: Resource
resource:
name: cpu
{{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .) }}
targetAverageUtilization: {{ .Values.coordinating.autoscaling.targetCPU }}
{{- else }}
target:
type: Utilization
averageUtilization: {{ .Values.coordinating.autoscaling.targetCPU }}
{{- end }}
{{- end }}
{{- if .Values.coordinating.autoscaling.targetMemory }}
- type: Resource
resource:
name: memory
{{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .) }}
targetAverageUtilization: {{ .Values.coordinating.autoscaling.targetMemory }}
{{- else }}
target:
type: Utilization
averageUtilization: {{ .Values.coordinating.autoscaling.targetMemory }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,22 @@
{{- if and (include "elasticsearch.coordinating.enabled" .) .Values.coordinating.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "elasticsearch.coordinating.serviceAccountName" . }}
namespace: {{ include "common.names.namespace" . | quote }}
labels: {{- include "common.labels.standard" . | nindent 4 }}
app.kubernetes.io/component: coordinating-only
{{- if .Values.commonLabels }}
{{- include "common.tplvalues.render" (dict "value" .Values.commonLabels "context" $) | nindent 4 }}
{{- end }}
{{- if or .Values.coordinating.serviceAccount.annotations .Values.commonAnnotations }}
annotations:
{{- if .Values.commonAnnotations }}
{{- include "common.tplvalues.render" (dict "value" .Values.commonAnnotations "context" $) | nindent 4 }}
{{- end }}
{{- if .Values.coordinating.serviceAccount.annotations }}
{{- include "common.tplvalues.render" (dict "value" .Values.coordinating.serviceAccount.annotations "context" $) | nindent 4 }}
{{- end }}
{{- end }}
automountServiceAccountToken: {{ .Values.coordinating.serviceAccount.automountServiceAccountToken }}
{{- end -}}

View File

@ -0,0 +1,313 @@
{{- if (include "elasticsearch.coordinating.enabled" .) }}
apiVersion: {{ template "common.capabilities.statefulset.apiVersion" . }}
kind: StatefulSet
metadata:
name: {{ include "elasticsearch.coordinating.fullname" . }}
namespace: {{ include "common.names.namespace" . | quote }}
labels: {{- include "common.labels.standard" . | nindent 4 }}
app.kubernetes.io/component: coordinating-only
{{- if .Values.useIstioLabels }}
## Istio Labels: https://istio.io/docs/ops/deployment/requirements/
app: coordinating-only
{{- end }}
{{- if .Values.commonLabels }}
{{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
{{- end }}
{{- if or .Values.coordinating.annotations .Values.commonAnnotations }}
annotations:
{{- if .Values.coordinating.annotations }}
{{- include "common.tplvalues.render" ( dict "value" .Values.coordinating.annotations "context" $ ) | nindent 4 }}
{{- end }}
{{- if .Values.commonAnnotations }}
{{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
{{- end }}
{{- end }}
spec:
{{- if not .Values.coordinating.autoscaling.enabled }}
replicas: {{ .Values.coordinating.replicaCount }}
{{- end }}
selector:
matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }}
app.kubernetes.io/component: coordinating-only
{{- if .Values.coordinating.updateStrategy }}
updateStrategy: {{- toYaml .Values.coordinating.updateStrategy | nindent 4 }}
{{- end }}
serviceName: {{ include "elasticsearch.coordinating.servicename" . }}
podManagementPolicy: {{ .Values.coordinating.podManagementPolicy }}
template:
metadata:
labels: {{- include "common.labels.standard" . | nindent 8 }}
app.kubernetes.io/component: coordinating-only
{{- if .Values.useIstioLabels }}
## Istio Labels: https://istio.io/docs/ops/deployment/requirements/
app: coordinating-only
{{- end }}
{{- if .Values.coordinating.podLabels }}
{{- include "common.tplvalues.render" (dict "value" .Values.coordinating.podLabels "context" $) | nindent 8 }}
{{- end }}
annotations:
{{- if and (include "elasticsearch.createTlsSecret" .) (not .Values.security.tls.coordinating.existingSecret) }}
checksum/tls: {{ include (print $.Template.BasePath "/tls-secret.yaml") . | sha256sum }}
{{- end }}
{{- if .Values.coordinating.podAnnotations }}
{{- include "common.tplvalues.render" (dict "value" .Values.coordinating.podAnnotations "context" $) | nindent 8 }}
{{- end }}
spec:
serviceAccountName: {{ template "elasticsearch.coordinating.serviceAccountName" . }}
{{- include "elasticsearch.imagePullSecrets" . | nindent 6 }}
{{- if .Values.coordinating.hostAliases }}
hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.coordinating.hostAliases "context" $) | nindent 8 }}
{{- end }}
{{- if .Values.coordinating.affinity }}
affinity: {{- include "common.tplvalues.render" (dict "value" .Values.coordinating.affinity "context" $) | nindent 8 }}
{{- else }}
affinity:
podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.coordinating.podAffinityPreset "component" "coordinating-only" "context" $) | nindent 10 }}
podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.coordinating.podAntiAffinityPreset "component" "coordinating-only" "context" $) | nindent 10 }}
nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.coordinating.nodeAffinityPreset.type "key" .Values.coordinating.nodeAffinityPreset.key "values" .Values.coordinating.nodeAffinityPreset.values) | nindent 10 }}
{{- end }}
{{- if .Values.coordinating.nodeSelector }}
nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.coordinating.nodeSelector "context" $) | nindent 8 }}
{{- end }}
{{- if .Values.coordinating.tolerations }}
tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.coordinating.tolerations "context" $) | nindent 8 }}
{{- end }}
{{- if .Values.coordinating.schedulerName }}
schedulerName: {{ .Values.coordinating.schedulerName }}
{{- end }}
{{- if .Values.coordinating.topologySpreadConstraints }}
topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.coordinating.topologySpreadConstraints "context" $) | nindent 8 }}
{{- end }}
{{- if .Values.coordinating.priorityClassName }}
priorityClassName: {{ .Values.coordinating.priorityClassName | quote }}
{{- end }}
{{- if .Values.coordinating.terminationGracePeriodSeconds }}
terminationGracePeriodSeconds: {{ .Values.coordinating.terminationGracePeriodSeconds }}
{{- end }}
{{- if .Values.coordinating.podSecurityContext.enabled }}
securityContext: {{- omit .Values.coordinating.podSecurityContext "enabled" | toYaml | nindent 8 }}
{{- end }}
{{- if or .Values.coordinating.initContainers .Values.sysctlImage.enabled }}
initContainers:
{{- if .Values.sysctlImage.enabled }}
## Image that performs the sysctl operation to modify Kernel settings (needed sometimes to avoid boot errors)
- name: sysctl
image: {{ include "elasticsearch.sysctl.image" . }}
imagePullPolicy: {{ .Values.image.pullPolicy | quote }}
command:
- /bin/bash
- -ec
- |
{{- include "elasticsearch.sysctlIfLess" (dict "key" "vm.max_map_count" "value" "262144") | nindent 14 }}
{{- include "elasticsearch.sysctlIfLess" (dict "key" "fs.file-max" "value" "65536") | nindent 14 }}
securityContext:
privileged: true
runAsUser: 0
{{- if .Values.sysctlImage.resources }}
resources: {{- toYaml .Values.sysctlImage.resources | nindent 12 }}
{{- end }}
{{- end }}
{{- if .Values.coordinating.initContainers }}
{{- include "common.tplvalues.render" (dict "value" .Values.coordinating.initContainers "context" $) | nindent 8 }}
{{- end }}
{{- if .Values.initContainers }}
{{- include "common.tplvalues.render" (dict "value" .Values.initContainers "context" $) | nindent 8 }}
{{- end }}
{{- end }}
containers:
- name: elasticsearch
image: {{ include "elasticsearch.image" . }}
imagePullPolicy: {{ .Values.image.pullPolicy | quote }}
{{- if .Values.coordinating.containerSecurityContext.enabled }}
securityContext: {{- omit .Values.coordinating.containerSecurityContext "enabled" | toYaml | nindent 12 }}
{{- end }}
{{- if .Values.diagnosticMode.enabled }}
command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }}
{{- else if .Values.coordinating.command }}
command: {{- include "common.tplvalues.render" (dict "value" .Values.coordinating.command "context" $) | nindent 12 }}
{{- end }}
{{- if .Values.diagnosticMode.enabled }}
args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }}
{{- else if .Values.coordinating.args }}
args: {{- include "common.tplvalues.render" (dict "value" .Values.coordinating.args "context" $) | nindent 12 }}
{{- end }}
{{- if .Values.coordinating.lifecycleHooks }}
lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.coordinating.lifecycleHooks "context" $) | nindent 12 }}
{{- end }}
env:
- name: MY_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: BITNAMI_DEBUG
value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }}
- name: ELASTICSEARCH_CLUSTER_NAME
value: {{ .Values.clusterName | quote }}
- name: ELASTICSEARCH_IS_DEDICATED_NODE
value: "yes"
- name: ELASTICSEARCH_NODE_ROLES
value: ""
- name: ELASTICSEARCH_TRANSPORT_PORT_NUMBER
value: {{ .Values.containerPorts.transport | quote }}
- name: ELASTICSEARCH_HTTP_PORT_NUMBER
value: {{ .Values.containerPorts.restAPI | quote }}
- name: ELASTICSEARCH_CLUSTER_HOSTS
value: {{ include "elasticsearch.hosts" . | quote }}
- name: ELASTICSEARCH_TOTAL_NODES
value: {{ add (ternary .Values.master.autoscaling.minReplicas .Values.master.replicaCount .Values.master.autoscaling.enabled) (ternary .Values.data.autoscaling.minReplicas .Values.data.replicaCount .Values.data.autoscaling.enabled) | quote }}
- name: ELASTICSEARCH_CLUSTER_MASTER_HOSTS
{{- $elasticsearchMasterFullname := include "elasticsearch.master.fullname" . }}
{{- $replicas := int (ternary .Values.master.autoscaling.minReplicas .Values.master.replicaCount .Values.master.autoscaling.enabled) }}
value: {{ range $i, $e := until $replicas }}{{ printf "%s-%d" $elasticsearchMasterFullname $e }} {{ end }}
- name: ELASTICSEARCH_MINIMUM_MASTER_NODES
value: {{ add (div (ternary .Values.master.autoscaling.minReplicas .Values.master.replicaCount .Values.master.autoscaling.enabled) 2) 1 | quote }}
- name: ELASTICSEARCH_ADVERTISED_HOSTNAME
value: "$(MY_POD_NAME).{{ (include "elasticsearch.coordinating.servicename" .)}}.{{ include "common.names.namespace" . }}.svc.{{ .Values.clusterDomain }}"
{{- if .Values.plugins }}
- name: ELASTICSEARCH_PLUGINS
value: {{ .Values.plugins | quote }}
{{- end }}
{{- if .Values.coordinating.heapSize }}
- name: ELASTICSEARCH_HEAP_SIZE
value: {{ .Values.coordinating.heapSize | quote }}
{{- end }}
{{- if .Values.security.enabled }}
{{- include "elasticsearch.configure.security" . | nindent 12 }}
{{- end }}
{{- if .Values.coordinating.extraEnvVars }}
{{- include "common.tplvalues.render" ( dict "value" .Values.coordinating.extraEnvVars "context" $ ) | nindent 12 }}
{{- end }}
{{- if .Values.extraEnvVars }}
{{- include "common.tplvalues.render" ( dict "value" .Values.extraEnvVars "context" $ ) | nindent 12 }}
{{- end }}
{{- if or .Values.extraEnvVarsCM .Values.extraEnvVarsSecret .Values.coordinating.extraEnvVarsCM .Values.coordinating.extraEnvVarsSecret }}
envFrom:
{{- if .Values.extraEnvVarsCM }}
- configMapRef:
name: {{ include "common.tplvalues.render" ( dict "value" .Values.extraEnvVarsCM "context" $ ) }}
{{- end }}
{{- if .Values.coordinating.extraEnvVarsCM }}
- configMapRef:
name: {{ include "common.tplvalues.render" ( dict "value" .Values.coordinating.extraEnvVarsCM "context" $ ) }}
{{- end }}
{{- if .Values.extraEnvVarsSecret }}
- secretRef:
name: {{ include "common.tplvalues.render" ( dict "value" .Values.extraEnvVarsSecret "context" $ ) }}
{{- end }}
{{- if .Values.coordinating.extraEnvVarsSecret }}
- secretRef:
name: {{ include "common.tplvalues.render" ( dict "value" .Values.coordinating.extraEnvVarsSecret "context" $ ) }}
{{- end }}
{{- end }}
ports:
- name: rest-api
containerPort: {{ .Values.containerPorts.restAPI }}
- name: transport
containerPort: {{ .Values.containerPorts.transport }}
{{- if not .Values.diagnosticMode.enabled }}
{{- if .Values.coordinating.customStartupProbe }}
startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.coordinating.customStartupProbe "context" $) | nindent 12 }}
{{- else if .Values.coordinating.startupProbe.enabled }}
startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.coordinating.startupProbe "enabled") "context" $) | nindent 12 }}
tcpSocket:
port: rest-api
{{- end }}
{{- if .Values.coordinating.customLivenessProbe }}
livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.coordinating.customLivenessProbe "context" $) | nindent 12 }}
{{- else if .Values.coordinating.livenessProbe.enabled }}
livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.coordinating.livenessProbe "enabled") "context" $) | nindent 12 }}
exec:
command:
- /opt/bitnami/scripts/elasticsearch/healthcheck.sh
{{- end }}
{{- if .Values.coordinating.customReadinessProbe }}
readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.coordinating.customReadinessProbe "context" $) | nindent 12 }}
{{- else if .Values.coordinating.readinessProbe.enabled }}
readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.coordinating.readinessProbe "enabled") "context" $) | nindent 12 }}
exec:
command:
- /opt/bitnami/scripts/elasticsearch/healthcheck.sh
{{- end }}
{{- end }}
{{- if .Values.coordinating.resources }}
resources: {{- toYaml .Values.coordinating.resources | nindent 12 }}
{{- end }}
volumeMounts:
- name: data
mountPath: /bitnami/elasticsearch/data
{{- if .Values.config }}
- mountPath: /opt/bitnami/elasticsearch/config/elasticsearch.yml
name: config
subPath: elasticsearch.yml
{{- end }}
{{- if .Values.extraConfig }}
- mountPath: /opt/bitnami/elasticsearch/config/my_elasticsearch.yml
name: config
subPath: my_elasticsearch.yml
{{- end }}
{{- if .Values.security.enabled }}
- name: elasticsearch-certificates
mountPath: /opt/bitnami/elasticsearch/config/certs
readOnly: true
{{- end }}
{{- if .Values.initScripts }}
- name: custom-init-scripts
mountPath: /docker-entrypoint-initdb.d/init-scripts
{{- end }}
{{- if .Values.initScriptsCM }}
- name: custom-init-scripts-cm
mountPath: /docker-entrypoint-initdb.d/init-scripts-cm
{{- end }}
{{- if .Values.initScriptsSecret }}
- name: custom-init-scripts-secret
mountPath: /docker-entrypoint-initdb.d/init-scripts-secret
{{- end }}
{{- if .Values.coordinating.extraVolumeMounts }}
{{- include "common.tplvalues.render" (dict "value" .Values.coordinating.extraVolumeMounts "context" $) | nindent 12 }}
{{- end }}
{{- if .Values.extraVolumeMounts }}
{{- include "common.tplvalues.render" (dict "value" .Values.extraVolumeMounts "context" $) | nindent 12 }}
{{- end }}
{{- if .Values.coordinating.sidecars }}
{{- include "common.tplvalues.render" ( dict "value" .Values.coordinating.sidecars "context" $) | nindent 8 }}
{{- end }}
{{- if .Values.sidecars }}
{{- include "common.tplvalues.render" ( dict "value" .Values.sidecars "context" $) | nindent 8 }}
{{- end }}
volumes:
- name: "data"
emptyDir: {}
{{- if or .Values.config .Values.extraConfig }}
- name: config
configMap:
name: {{ include "common.names.fullname" . }}
{{- end }}
{{- if .Values.security.enabled }}
- name: elasticsearch-certificates
secret:
secretName: {{ include "elasticsearch.coordinating.tlsSecretName" . }}
defaultMode: 256
{{- end }}
{{- if .Values.initScripts }}
- name: custom-init-scripts
configMap:
name: {{ template "elasticsearch.initScripts" . }}
{{- end }}
{{- if .Values.initScriptsCM }}
- name: custom-init-scripts-cm
configMap:
name: {{ template "elasticsearch.initScriptsCM" . }}
{{- end }}
{{- if .Values.initScriptsSecret }}
- name: custom-init-scripts-secret
secret:
secretName: {{ template "elasticsearch.initScriptsSecret" . }}
defaultMode: 0755
{{- end }}
{{- if .Values.coordinating.extraVolumes }}
{{- include "common.tplvalues.render" (dict "value" .Values.coordinating.extraVolumes "context" $) | nindent 8 }}
{{- end }}
{{- if .Values.extraVolumes }}
{{- include "common.tplvalues.render" (dict "value" .Values.extraVolumes "context" $) | nindent 8 }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,28 @@
{{- if (include "elasticsearch.coordinating.enabled" .) }}
apiVersion: v1
kind: Service
metadata:
name: {{ include "elasticsearch.coordinating.servicename" . }}
namespace: {{ include "common.names.namespace" . | quote }}
labels: {{- include "common.labels.standard" . | nindent 4 }}
app.kubernetes.io/component: coordinating-only
{{- if .Values.commonLabels }}
{{- include "common.tplvalues.render" (dict "value" .Values.commonLabels "context" $) | nindent 4 }}
{{- end }}
{{- if .Values.commonAnnotations }}
annotations: {{- include "common.tplvalues.render" (dict "value" .Values.commonAnnotations "context" $) | nindent 4 }}
{{- end }}
spec:
type: ClusterIP
clusterIP: None
publishNotReadyAddresses: true
ports:
- name: tcp-rest-api
port: {{ .Values.containerPorts.restAPI }}
targetPort: rest-api
- name: tcp-transport
port: {{ .Values.containerPorts.transport }}
targetPort: transport
selector: {{- include "common.labels.matchLabels" . | nindent 4 }}
app.kubernetes.io/component: coordinating-only
{{- end }}

View File

@ -0,0 +1,47 @@
{{- if and (include "elasticsearch.data.enabled" .) .Values.data.autoscaling.enabled }}
apiVersion: {{ include "common.capabilities.hpa.apiVersion" ( dict "context" $ ) }}
kind: HorizontalPodAutoscaler
metadata:
name: {{ include "elasticsearch.data.fullname" . }}
namespace: {{ include "common.names.namespace" . | quote }}
labels: {{- include "common.labels.standard" . | nindent 4 }}
app.kubernetes.io/component: data
{{- if .Values.commonLabels }}
{{- include "common.tplvalues.render" (dict "value" .Values.commonLabels "context" $) | nindent 4 }}
{{- end }}
{{- if .Values.commonAnnotations }}
annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
{{- end }}
spec:
scaleTargetRef:
apiVersion: {{ template "common.capabilities.statefulset.apiVersion" . }}
kind: StatefulSet
name: {{ include "elasticsearch.data.fullname" . }}
minReplicas: {{ .Values.data.autoscaling.minReplicas }}
maxReplicas: {{ .Values.data.autoscaling.maxReplicas }}
metrics:
{{- if .Values.data.autoscaling.targetCPU }}
- type: Resource
resource:
name: cpu
{{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .) }}
targetAverageUtilization: {{ .Values.data.autoscaling.targetCPU }}
{{- else }}
target:
type: Utilization
averageUtilization: {{ .Values.data.autoscaling.targetCPU }}
{{- end }}
{{- end }}
{{- if .Values.data.autoscaling.targetMemory }}
- type: Resource
resource:
name: memory
{{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .) }}
targetAverageUtilization: {{ .Values.data.autoscaling.targetMemory }}
{{- else }}
target:
type: Utilization
averageUtilization: {{ .Values.data.autoscaling.targetMemory }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,22 @@
{{- if and (include "elasticsearch.data.enabled" .) .Values.data.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "elasticsearch.data.serviceAccountName" . }}
namespace: {{ include "common.names.namespace" . | quote }}
labels: {{- include "common.labels.standard" . | nindent 4 }}
app.kubernetes.io/component: data
{{- if .Values.commonLabels }}
{{- include "common.tplvalues.render" (dict "value" .Values.commonLabels "context" $) | nindent 4 }}
{{- end }}
{{- if or .Values.data.serviceAccount.annotations .Values.commonAnnotations }}
annotations:
{{- if .Values.commonAnnotations }}
{{- include "common.tplvalues.render" (dict "value" .Values.commonAnnotations "context" $) | nindent 4 }}
{{- end }}
{{- if .Values.data.serviceAccount.annotations }}
{{- include "common.tplvalues.render" (dict "value" .Values.data.serviceAccount.annotations "context" $) | nindent 4 }}
{{- end }}
{{- end }}
automountServiceAccountToken: {{ .Values.data.serviceAccount.automountServiceAccountToken }}
{{- end -}}

View File

@ -0,0 +1,373 @@
{{- if (include "elasticsearch.data.enabled" .) }}
apiVersion: {{ template "common.capabilities.statefulset.apiVersion" . }}
kind: StatefulSet
metadata:
name: {{ include "elasticsearch.data.fullname" . }}
namespace: {{ include "common.names.namespace" . | quote }}
labels: {{- include "common.labels.standard" . | nindent 4 }}
app.kubernetes.io/component: data
{{- if .Values.useIstioLabels }}
## Istio Labels: https://istio.io/docs/ops/deployment/requirements/
app: data
{{- end }}
{{- if .Values.commonLabels }}
{{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
{{- end }}
{{- if or .Values.data.annotations .Values.commonAnnotations }}
annotations:
{{- if .Values.data.annotations }}
{{- include "common.tplvalues.render" ( dict "value" .Values.data.annotations "context" $ ) | nindent 4 }}
{{- end }}
{{- if .Values.commonAnnotations }}
{{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
{{- end }}
{{- end }}
spec:
{{- if not .Values.data.autoscaling.enabled }}
replicas: {{ .Values.data.replicaCount }}
{{- end }}
podManagementPolicy: {{ .Values.data.podManagementPolicy }}
selector:
matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }}
app.kubernetes.io/component: data
serviceName: {{ include "elasticsearch.data.servicename" . }}
{{- if .Values.data.updateStrategy }}
updateStrategy: {{- toYaml .Values.data.updateStrategy | nindent 4 }}
{{- end }}
template:
metadata:
labels: {{- include "common.labels.standard" . | nindent 8 }}
app.kubernetes.io/component: data
{{- if .Values.useIstioLabels }}
## Istio Labels: https://istio.io/docs/ops/deployment/requirements/
app: data
{{- end }}
{{- if .Values.data.podLabels }}
{{- include "common.tplvalues.render" (dict "value" .Values.data.podLabels "context" $) | nindent 8 }}
{{- end }}
annotations:
{{- if and (include "elasticsearch.createTlsSecret" .) (not .Values.security.tls.data.existingSecret) }}
checksum/tls: {{ include (print $.Template.BasePath "/tls-secret.yaml") . | sha256sum }}
{{- end }}
{{- if .Values.data.podAnnotations }}
{{- include "common.tplvalues.render" (dict "value" .Values.data.podAnnotations "context" $) | nindent 8 }}
{{- end }}
spec:
serviceAccountName: {{ template "elasticsearch.data.serviceAccountName" . }}
{{- include "elasticsearch.imagePullSecrets" . | nindent 6 }}
{{- if .Values.data.hostAliases }}
hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.data.hostAliases "context" $) | nindent 8 }}
{{- end }}
{{- if .Values.data.affinity }}
affinity: {{- include "common.tplvalues.render" (dict "value" .Values.data.affinity "context" $) | nindent 8 }}
{{- else }}
affinity:
podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.data.podAffinityPreset "component" "data" "context" $) | nindent 10 }}
podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.data.podAntiAffinityPreset "component" "data" "context" $) | nindent 10 }}
nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.data.nodeAffinityPreset.type "key" .Values.data.nodeAffinityPreset.key "values" .Values.data.nodeAffinityPreset.values) | nindent 10 }}
{{- end }}
{{- if .Values.data.nodeSelector }}
nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.data.nodeSelector "context" $) | nindent 8 }}
{{- end }}
{{- if .Values.data.tolerations }}
tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.data.tolerations "context" $) | nindent 8 }}
{{- end }}
{{- if .Values.data.schedulerName }}
schedulerName: {{ .Values.data.schedulerName }}
{{- end }}
{{- if .Values.data.topologySpreadConstraints }}
topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.data.topologySpreadConstraints "context" $) | nindent 8 }}
{{- end }}
{{- if .Values.data.priorityClassName }}
priorityClassName: {{ .Values.data.priorityClassName | quote }}
{{- end }}
{{- if .Values.data.podSecurityContext.enabled }}
securityContext: {{- omit .Values.data.podSecurityContext "enabled" | toYaml | nindent 8 }}
{{- end }}
{{- if .Values.data.terminationGracePeriodSeconds }}
terminationGracePeriodSeconds: {{ .Values.data.terminationGracePeriodSeconds }}
{{- end }}
{{- if or .Values.data.initContainers .Values.sysctlImage.enabled (and .Values.volumePermissions.enabled .Values.data.persistence.enabled) }}
initContainers:
{{- if .Values.sysctlImage.enabled }}
## Image that performs the sysctl operation to modify Kernel settings (needed sometimes to avoid boot errors)
- name: sysctl
image: {{ include "elasticsearch.sysctl.image" . }}
imagePullPolicy: {{ .Values.image.pullPolicy | quote }}
command:
- /bin/bash
- -ec
- |
{{- include "elasticsearch.sysctlIfLess" (dict "key" "vm.max_map_count" "value" "262144") | nindent 14 }}
{{- include "elasticsearch.sysctlIfLess" (dict "key" "fs.file-max" "value" "65536") | nindent 14 }}
securityContext:
privileged: true
runAsUser: 0
{{- if .Values.sysctlImage.resources }}
resources: {{- toYaml .Values.sysctlImage.resources | nindent 12 }}
{{- end }}
{{- end }}
{{- if and .Values.volumePermissions.enabled .Values.data.persistence.enabled }}
- name: volume-permissions
image: {{ include "elasticsearch.volumePermissions.image" . }}
imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }}
command:
- /bin/bash
- -ec
- |
mkdir -p /bitnami/elasticsearch/data
chown {{ .Values.data.containerSecurityContext.runAsUser }}:{{ .Values.data.podSecurityContext.fsGroup }} /bitnami/elasticsearch/data
find /bitnami/elasticsearch/data -mindepth 1 -maxdepth 1 -not -name ".snapshot" -not -name "lost+found" | xargs -r chown -R {{ .Values.data.containerSecurityContext.runAsUser }}:{{ .Values.data.podSecurityContext.fsGroup }}
securityContext:
runAsUser: 0
{{- if .Values.volumePermissions.resources }}
resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }}
{{- end }}
volumeMounts:
- name: data
mountPath: /bitnami/elasticsearch/data
{{- end }}
{{- if .Values.data.initContainers }}
{{- include "common.tplvalues.render" (dict "value" .Values.data.initContainers "context" $) | nindent 8 }}
{{- end }}
{{- if .Values.initContainers }}
{{- include "common.tplvalues.render" (dict "value" .Values.initContainers "context" $) | nindent 8 }}
{{- end }}
{{- end }}
containers:
- name: elasticsearch
image: {{ include "elasticsearch.image" . }}
imagePullPolicy: {{ .Values.image.pullPolicy | quote }}
{{- if .Values.data.containerSecurityContext.enabled }}
securityContext: {{- omit .Values.data.containerSecurityContext "enabled" | toYaml | nindent 12 }}
{{- end }}
{{- if .Values.diagnosticMode.enabled }}
command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }}
{{- else if .Values.data.command }}
command: {{- include "common.tplvalues.render" (dict "value" .Values.data.command "context" $) | nindent 12 }}
{{- end }}
{{- if .Values.diagnosticMode.enabled }}
args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }}
{{- else if .Values.data.args }}
args: {{- include "common.tplvalues.render" (dict "value" .Values.data.args "context" $) | nindent 12 }}
{{- end }}
{{- if .Values.data.lifecycleHooks }}
lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.data.lifecycleHooks "context" $) | nindent 12 }}
{{- end }}
env:
- name: BITNAMI_DEBUG
value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }}
- name: MY_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: ELASTICSEARCH_IS_DEDICATED_NODE
value: "yes"
- name: ELASTICSEARCH_NODE_ROLES
value: "data"
- name: ELASTICSEARCH_TRANSPORT_PORT_NUMBER
value: {{ .Values.containerPorts.transport | quote }}
- name: ELASTICSEARCH_HTTP_PORT_NUMBER
value: {{ .Values.containerPorts.restAPI | quote }}
- name: ELASTICSEARCH_CLUSTER_NAME
value: {{ .Values.clusterName | quote }}
- name: ELASTICSEARCH_CLUSTER_HOSTS
value: {{ include "elasticsearch.hosts" . | quote }}
- name: ELASTICSEARCH_TOTAL_NODES
value: {{ add (ternary .Values.master.autoscaling.minReplicas .Values.master.replicaCount .Values.master.autoscaling.enabled) (ternary .Values.data.autoscaling.minReplicas .Values.data.replicaCount .Values.data.autoscaling.enabled) | quote }}
- name: ELASTICSEARCH_CLUSTER_MASTER_HOSTS
{{- $elasticsearchMasterFullname := include "elasticsearch.master.fullname" . }}
{{- $replicas := int (ternary .Values.master.autoscaling.minReplicas .Values.master.replicaCount .Values.master.autoscaling.enabled) }}
value: {{ range $i, $e := until $replicas }}{{ printf "%s-%d" $elasticsearchMasterFullname $e }} {{ end }}
- name: ELASTICSEARCH_MINIMUM_MASTER_NODES
value: {{ add (div (ternary .Values.master.autoscaling.minReplicas .Values.master.replicaCount .Values.master.autoscaling.enabled) 2) 1 | quote }}
- name: ELASTICSEARCH_ADVERTISED_HOSTNAME
value: "$(MY_POD_NAME).{{ (include "elasticsearch.data.servicename" .)}}.{{ include "common.names.namespace" . }}.svc.{{ .Values.clusterDomain }}"
{{- if .Values.plugins }}
- name: ELASTICSEARCH_PLUGINS
value: {{ .Values.plugins | quote }}
{{- end }}
{{- if .Values.snapshotRepoPath }}
- name: ELASTICSEARCH_FS_SNAPSHOT_REPO_PATH
value: {{ .Values.snapshotRepoPath | quote }}
{{- end }}
{{- if .Values.data.heapSize }}
- name: ELASTICSEARCH_HEAP_SIZE
value: {{ .Values.data.heapSize | quote }}
{{- end }}
{{- if .Values.security.enabled }}
{{- include "elasticsearch.configure.security" . | nindent 12 }}
{{- end }}
{{- if .Values.data.extraEnvVars }}
{{- include "common.tplvalues.render" ( dict "value" .Values.data.extraEnvVars "context" $ ) | nindent 12 }}
{{- end }}
{{- if .Values.extraEnvVars }}
{{- include "common.tplvalues.render" ( dict "value" .Values.extraEnvVars "context" $ ) | nindent 12 }}
{{- end }}
{{- if or .Values.extraEnvVarsCM .Values.extraEnvVarsSecret .Values.data.extraEnvVarsCM .Values.data.extraEnvVarsSecret }}
envFrom:
{{- if .Values.extraEnvVarsCM }}
- configMapRef:
name: {{ include "common.tplvalues.render" ( dict "value" .Values.extraEnvVarsCM "context" $ ) }}
{{- end }}
{{- if .Values.data.extraEnvVarsCM }}
- configMapRef:
name: {{ include "common.tplvalues.render" ( dict "value" .Values.data.extraEnvVarsCM "context" $ ) }}
{{- end }}
{{- if .Values.extraEnvVarsSecret }}
- secretRef:
name: {{ include "common.tplvalues.render" ( dict "value" .Values.extraEnvVarsSecret "context" $ ) }}
{{- end }}
{{- if .Values.data.extraEnvVarsSecret }}
- secretRef:
name: {{ include "common.tplvalues.render" ( dict "value" .Values.data.extraEnvVarsSecret "context" $ ) }}
{{- end }}
{{- end }}
ports:
- name: rest-api
containerPort: {{ .Values.containerPorts.restAPI }}
- name: transport
containerPort: {{ .Values.containerPorts.transport }}
{{- if not .Values.diagnosticMode.enabled }}
{{- if .Values.data.customStartupProbe }}
startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.data.customStartupProbe "context" $) | nindent 12 }}
{{- else if .Values.data.startupProbe.enabled }}
startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.data.startupProbe "enabled") "context" $) | nindent 12 }}
tcpSocket:
port: rest-api
{{- end }}
{{- if .Values.data.customLivenessProbe }}
livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.data.customLivenessProbe "context" $) | nindent 12 }}
{{- else if .Values.data.livenessProbe.enabled }}
livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.data.livenessProbe "enabled") "context" $) | nindent 12 }}
exec:
command:
- /opt/bitnami/scripts/elasticsearch/healthcheck.sh
{{- end }}
{{- if .Values.data.customReadinessProbe }}
readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.data.customReadinessProbe "context" $) | nindent 12 }}
{{- else if .Values.data.readinessProbe.enabled }}
readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.data.readinessProbe "enabled") "context" $) | nindent 12 }}
exec:
command:
- /opt/bitnami/scripts/elasticsearch/healthcheck.sh
{{- end }}
{{- end }}
{{- if .Values.data.resources }}
resources: {{- toYaml .Values.data.resources | nindent 12 }}
{{- end }}
volumeMounts:
- name: data
mountPath: /bitnami/elasticsearch/data
{{- if .Values.config }}
- mountPath: /opt/bitnami/elasticsearch/config/elasticsearch.yml
name: config
subPath: elasticsearch.yml
{{- end }}
{{- if .Values.extraConfig }}
- mountPath: /opt/bitnami/elasticsearch/config/my_elasticsearch.yml
name: config
subPath: my_elasticsearch.yml
{{- end }}
{{- if .Values.security.enabled }}
- name: elasticsearch-certificates
mountPath: /opt/bitnami/elasticsearch/config/certs
readOnly: true
{{- end }}
{{- if .Values.initScripts }}
- name: custom-init-scripts
mountPath: /docker-entrypoint-initdb.d/init-scripts
{{- end }}
{{- if .Values.initScriptsCM }}
- name: custom-init-scripts-cm
mountPath: /docker-entrypoint-initdb.d/init-scripts-cm
{{- end }}
{{- if .Values.initScriptsSecret }}
- name: custom-init-scripts-secret
mountPath: /docker-entrypoint-initdb.d/init-scripts-secret
{{- end }}
{{- if .Values.data.extraVolumeMounts }}
{{- include "common.tplvalues.render" (dict "value" .Values.data.extraVolumeMounts "context" $) | nindent 12 }}
{{- end }}
{{- if .Values.extraVolumeMounts }}
{{- include "common.tplvalues.render" (dict "value" .Values.extraVolumeMounts "context" $) | nindent 12 }}
{{- end }}
{{- if .Values.data.sidecars }}
{{- include "common.tplvalues.render" ( dict "value" .Values.data.sidecars "context" $) | nindent 8 }}
{{- end }}
{{- if .Values.sidecars }}
{{- include "common.tplvalues.render" ( dict "value" .Values.sidecars "context" $) | nindent 8 }}
{{- end }}
volumes:
{{- if or .Values.config .Values.extraConfig }}
- name: config
configMap:
name: {{ template "common.names.fullname" . }}
{{- end }}
{{- if .Values.security.enabled }}
- name: elasticsearch-certificates
secret:
secretName: {{ template "elasticsearch.data.tlsSecretName" . }}
defaultMode: 256
{{- end }}
{{- if .Values.initScripts }}
- name: custom-init-scripts
configMap:
name: {{ template "elasticsearch.initScripts" . }}
{{- end }}
{{- if .Values.initScriptsCM }}
- name: custom-init-scripts-cm
configMap:
name: {{ template "elasticsearch.initScriptsCM" . }}
{{- end }}
{{- if .Values.initScriptsSecret }}
- name: custom-init-scripts-secret
secret:
secretName: {{ template "elasticsearch.initScriptsSecret" . }}
defaultMode: 0755
{{- end }}
{{- if .Values.data.extraVolumes }}
{{- include "common.tplvalues.render" (dict "value" .Values.data.extraVolumes "context" $) | nindent 8 }}
{{- end }}
{{- if .Values.extraVolumes }}
{{- include "common.tplvalues.render" (dict "value" .Values.extraVolumes "context" $) | nindent 8 }}
{{- end }}
{{- if not .Values.data.persistence.enabled }}
- name: "data"
emptyDir: {}
{{- else if .Values.data.persistence.existingClaim }}
- name: "data"
persistentVolumeClaim:
claimName: {{ .Values.data.persistence.existingClaim }}
{{- else }}
volumeClaimTemplates:
- metadata:
name: "data"
annotations:
{{- if .Values.data.persistence.annotations }}
{{- include "common.tplvalues.render" (dict "value" .Values.data.persistence.annotations "context" $) | nindent 10 }}
{{- end }}
{{- if .Values.commonAnnotations }}
{{- include "common.tplvalues.render" (dict "value" .Values.commonAnnotations "context" $) | nindent 10 }}
{{- end }}
{{- if .Values.commonLabels }}
labels: {{- include "common.tplvalues.render" (dict "value" .Values.commonLabels "context" $) | nindent 10 }}
{{- end }}
spec:
accessModes:
{{- range .Values.data.persistence.accessModes }}
- {{ . | quote }}
{{- end }}
resources:
requests:
storage: {{ .Values.data.persistence.size | quote }}
{{- if .Values.data.persistence.selector }}
selector: {{- include "common.tplvalues.render" (dict "value" .Values.data.persistence.selector "context" $) | nindent 10 }}
{{- else if .Values.data.persistence.existingVolume }}
selector:
matchLabels:
volume: {{ .Values.data.persistence.existingVolume }}
{{- end }}
{{- include "common.storage.class" (dict "persistence" .Values.data.persistence "global" .Values.global) | nindent 8 }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,28 @@
{{- if (include "elasticsearch.data.enabled" .) }}
apiVersion: v1
kind: Service
metadata:
name: {{ include "elasticsearch.data.servicename" . }}
namespace: {{ include "common.names.namespace" . | quote }}
labels: {{- include "common.labels.standard" . | nindent 4 }}
app.kubernetes.io/component: data
{{- if .Values.commonLabels }}
{{- include "common.tplvalues.render" (dict "value" .Values.commonLabels "context" $) | nindent 4 }}
{{- end }}
{{- if .Values.commonAnnotations }}
annotations: {{- include "common.tplvalues.render" (dict "value" .Values.commonAnnotations "context" $) | nindent 4 }}
{{- end }}
spec:
type: ClusterIP
clusterIP: None
publishNotReadyAddresses: true
ports:
- name: tcp-rest-api
port: {{ .Values.containerPorts.restAPI }}
targetPort: rest-api
- name: tcp-transport
port: {{ .Values.containerPorts.transport }}
targetPort: transport
selector: {{- include "common.labels.matchLabels" . | nindent 4 }}
app.kubernetes.io/component: data
{{- end }}

View File

@ -0,0 +1,4 @@
{{- range .Values.extraDeploy }}
---
{{ include "common.tplvalues.render" (dict "value" . "context" $) }}
{{- end }}

View File

@ -0,0 +1,47 @@
{{- if and (include "elasticsearch.ingest.enabled" .) .Values.ingest.autoscaling.enabled }}
apiVersion: {{ include "common.capabilities.hpa.apiVersion" ( dict "context" $ ) }}
kind: HorizontalPodAutoscaler
metadata:
name: {{ include "elasticsearch.ingest.fullname" . }}
namespace: {{ include "common.names.namespace" . | quote }}
labels: {{- include "common.labels.standard" . | nindent 4 }}
app.kubernetes.io/component: ingest
{{- if .Values.commonLabels }}
{{- include "common.tplvalues.render" (dict "value" .Values.commonLabels "context" $) | nindent 4 }}
{{- end }}
{{- if .Values.commonAnnotations }}
annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
{{- end }}
spec:
scaleTargetRef:
apiVersion: {{ template "common.capabilities.statefulset.apiVersion" . }}
kind: StatefulSet
name: {{ include "elasticsearch.ingest.fullname" . }}
minReplicas: {{ .Values.ingest.autoscaling.minReplicas }}
maxReplicas: {{ .Values.ingest.autoscaling.maxReplicas }}
metrics:
{{- if .Values.ingest.autoscaling.targetCPU }}
- type: Resource
resource:
name: cpu
{{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .) }}
targetAverageUtilization: {{ .Values.ingest.autoscaling.targetCPU }}
{{- else }}
target:
type: Utilization
averageUtilization: {{ .Values.ingest.autoscaling.targetCPU }}
{{- end }}
{{- end }}
{{- if .Values.ingest.autoscaling.targetMemory }}
- type: Resource
resource:
name: memory
{{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .) }}
targetAverageUtilization: {{ .Values.ingest.autoscaling.targetMemory }}
{{- else }}
target:
type: Utilization
averageUtilization: {{ .Values.ingest.autoscaling.targetMemory }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,65 @@
{{- if and (include "elasticsearch.ingest.enabled" .) .Values.ingest.service.enabled .Values.ingest.ingress.enabled }}
apiVersion: {{ include "common.capabilities.ingress.apiVersion" . }}
kind: Ingress
metadata:
name: {{ include "elasticsearch.ingest.fullname" . }}
namespace: {{ include "common.names.namespace" . | quote }}
labels: {{- include "common.labels.standard" . | nindent 4 }}
app.kubernetes.io/component: ingest
{{- if .Values.commonLabels }}
{{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
{{- end }}
{{- if or .Values.ingest.ingress.annotations .Values.commonAnnotations }}
annotations:
{{- if .Values.ingest.ingress.annotations }}
{{- include "common.tplvalues.render" ( dict "value" .Values.ingest.ingress.annotations "context" $) | nindent 4 }}
{{- end }}
{{- if .Values.commonAnnotations }}
{{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
{{- end }}
{{- end }}
spec:
{{- if and .Values.ingest.ingress.ingressClassName (eq "true" (include "common.ingress.supportsIngressClassname" .)) }}
ingressClassName: {{ .Values.ingest.ingress.ingressClassName | quote }}
{{- end }}
rules:
{{- if .Values.ingest.ingress.hostname }}
- http:
paths:
{{- if .Values.ingest.ingress.extraPaths }}
{{- toYaml .Values.ingest.ingress.extraPaths | nindent 10 }}
{{- end }}
- path: {{ .Values.ingest.ingress.path }}
{{- if eq "true" (include "common.ingress.supportsPathType" .) }}
pathType: {{ .Values.ingest.ingress.pathType }}
{{- end }}
backend: {{- include "common.ingress.backend" (dict "serviceName" (include "elasticsearch.ingest.fullname" .) "servicePort" "tcp-rest-api" "context" $) | nindent 14 }}
{{- if ne .Values.ingest.ingress.hostname "*" }}
host: {{ .Values.ingest.ingress.hostname }}
{{- end }}
{{- end }}
{{- range .Values.ingest.ingress.extraHosts }}
- host: {{ .name }}
http:
paths:
- path: {{ default "/" .path }}
{{- if eq "true" (include "common.ingress.supportsPathType" $) }}
pathType: {{ default "ImplementationSpecific" .pathType }}
{{- end }}
backend: {{- include "common.ingress.backend" (dict "serviceName" (include "elasticsearch.ingest.fullname" $) "servicePort" "tcp-rest-api" "context" $) | nindent 14 }}
{{- end }}
{{- if .Values.ingest.ingress.extraRules }}
{{- include "common.tplvalues.render" ( dict "value" .Values.ingest.ingress.extraRules "context" $ ) | nindent 4 }}
{{- end }}
{{- if or (and .Values.ingest.ingress.tls (or (include "common.ingress.certManagerRequest" ( dict "annotations" .Values.ingest.ingress.annotations )) .Values.ingest.ingress.selfSigned)) .Values.ingest.ingress.extraTls }}
tls:
{{- if and .Values.ingest.ingress.tls (or (include "common.ingress.certManagerRequest" ( dict "annotations" .Values.ingest.ingress.annotations )) .Values.ingest.ingress.selfSigned) }}
- hosts:
- {{ .Values.ingest.ingress.hostname | quote }}
secretName: {{ printf "%s-tls" .Values.ingest.ingress.hostname }}
{{- end }}
{{- if .Values.ingest.ingress.extraTls }}
{{- include "common.tplvalues.render" (dict "value" .Values.ingest.ingress.extraTls "context" $) | nindent 4 }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,60 @@
{{- if and (include "elasticsearch.ingest.enabled" .) .Values.ingest.service.enabled }}
apiVersion: v1
kind: Service
metadata:
name: {{ include "elasticsearch.ingest.fullname" . }}
namespace: {{ include "common.names.namespace" . | quote }}
labels: {{- include "common.labels.standard" . | nindent 4 }}
app.kubernetes.io/component: ingest
{{- if .Values.commonLabels }}
{{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
{{- end }}
annotations:
{{- if .Values.commonAnnotations }}
{{- include "common.tplvalues.render" (dict "value" .Values.commonAnnotations "context" $) | nindent 4 }}
{{- end }}
{{- if .Values.ingest.service.annotations }}
{{- include "common.tplvalues.render" ( dict "value" .Values.ingest.service.annotations "context" $) | nindent 4 }}
{{- end }}
spec:
type: {{ .Values.ingest.service.type }}
{{- if and .Values.ingest.service.clusterIP (eq .Values.ingest.service.type "ClusterIP") }}
clusterIP: {{ .Values.ingest.service.clusterIP }}
{{- end }}
{{- if or (eq .Values.ingest.service.type "LoadBalancer") (eq .Values.ingest.service.type "NodePort") }}
externalTrafficPolicy: {{ .Values.ingest.service.externalTrafficPolicy | quote }}
{{- end }}
{{- if and (eq .Values.ingest.service.type "LoadBalancer") .Values.ingest.service.loadBalancerSourceRanges }}
loadBalancerSourceRanges: {{- toYaml .Values.ingest.service.loadBalancerSourceRanges | nindent 4 }}
{{- end }}
{{- if (and (eq .Values.ingest.service.type "LoadBalancer") (not (empty .Values.ingest.service.loadBalancerIP))) }}
loadBalancerIP: {{ .Values.ingest.service.loadBalancerIP }}
{{- end }}
{{- if .Values.ingest.service.sessionAffinity }}
sessionAffinity: {{ .Values.ingest.service.sessionAffinity }}
{{- end }}
{{- if .Values.ingest.service.sessionAffinityConfig }}
sessionAffinityConfig: {{- include "common.tplvalues.render" (dict "value" .Values.ingest.service.sessionAffinityConfig "context" $) | nindent 4 }}
{{- end }}
ports:
- name: tcp-rest-api
port: {{ .Values.ingest.service.ports.restAPI }}
targetPort: rest-api
{{- if and (or (eq .Values.ingest.service.type "NodePort") (eq .Values.ingest.service.type "LoadBalancer")) (not (empty .Values.ingest.service.nodePorts.restAPI)) }}
nodePort: {{ .Values.ingest.service.nodePorts.restAPI }}
{{- else if eq .Values.ingest.service.type "ClusterIP" }}
nodePort: null
{{- end }}
- name: tcp-transport
port: {{ .Values.ingest.service.ports.transport }}
{{- if and (or (eq .Values.ingest.service.type "NodePort") (eq .Values.ingest.service.type "LoadBalancer")) (not (empty .Values.ingest.service.nodePorts.transport)) }}
nodePort: {{ .Values.ingest.service.nodePorts.transport }}
{{- else if eq .Values.ingest.service.type "ClusterIP" }}
nodePort: null
{{- end }}
{{- if .Values.ingest.service.extraPorts }}
{{- include "common.tplvalues.render" (dict "value" .Values.ingest.service.extraPorts "context" $) | nindent 4 }}
{{- end }}
selector: {{- include "common.labels.matchLabels" . | nindent 4 }}
app.kubernetes.io/component: ingest
{{- end }}

View File

@ -0,0 +1,22 @@
{{- if and (include "elasticsearch.ingest.enabled" .) .Values.ingest.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "elasticsearch.ingest.serviceAccountName" . }}
namespace: {{ include "common.names.namespace" . | quote }}
labels: {{- include "common.labels.standard" . | nindent 4 }}
app.kubernetes.io/component: ingest
{{- if .Values.commonLabels }}
{{- include "common.tplvalues.render" (dict "value" .Values.commonLabels "context" $) | nindent 4 }}
{{- end }}
{{- if or .Values.ingest.serviceAccount.annotations .Values.commonAnnotations }}
annotations:
{{- if .Values.commonAnnotations }}
{{- include "common.tplvalues.render" (dict "value" .Values.commonAnnotations "context" $) | nindent 4 }}
{{- end }}
{{- if .Values.ingest.serviceAccount.annotations }}
{{- include "common.tplvalues.render" (dict "value" .Values.ingest.serviceAccount.annotations "context" $) | nindent 4 }}
{{- end }}
{{- end }}
automountServiceAccountToken: {{ .Values.ingest.serviceAccount.automountServiceAccountToken }}
{{- end -}}

View File

@ -0,0 +1,313 @@
{{- if (include "elasticsearch.ingest.enabled" . ) }}
apiVersion: {{ template "common.capabilities.statefulset.apiVersion" . }}
kind: StatefulSet
metadata:
name: {{ include "elasticsearch.ingest.fullname" . }}
namespace: {{ include "common.names.namespace" . | quote }}
labels: {{- include "common.labels.standard" . | nindent 4 }}
app.kubernetes.io/component: ingest
{{- if .Values.useIstioLabels }}
## Istio Labels: https://istio.io/docs/ops/deployment/requirements/
app: ingest
{{- end }}
{{- if .Values.commonLabels }}
{{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
{{- end }}
{{- if or .Values.ingest.annotations .Values.commonAnnotations }}
annotations:
{{- if .Values.ingest.annotations }}
{{- include "common.tplvalues.render" ( dict "value" .Values.ingest.annotations "context" $ ) | nindent 4 }}
{{- end }}
{{- if .Values.commonAnnotations }}
{{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
{{- end }}
{{- end }}
spec:
{{- if not .Values.ingest.autoscaling.enabled }}
replicas: {{ .Values.ingest.replicaCount }}
{{- end }}
podManagementPolicy: {{ .Values.ingest.podManagementPolicy }}
selector:
matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }}
app.kubernetes.io/component: ingest
serviceName: {{ include "elasticsearch.ingest.servicename" . }}
{{- if .Values.ingest.updateStrategy }}
updateStrategy: {{- toYaml .Values.ingest.updateStrategy | nindent 4 }}
{{- end }}
template:
metadata:
labels: {{- include "common.labels.standard" . | nindent 8 }}
app.kubernetes.io/component: ingest
{{- if .Values.useIstioLabels }}
## Istio Labels: https://istio.io/docs/ops/deployment/requirements/
app: ingest
{{- end }}
{{- if .Values.ingest.podLabels }}
{{- include "common.tplvalues.render" (dict "value" .Values.ingest.podLabels "context" $) | nindent 8 }}
{{- end }}
annotations:
{{- if and (include "elasticsearch.createTlsSecret" .) (not .Values.security.tls.ingest.existingSecret) }}
checksum/tls: {{ include (print $.Template.BasePath "/tls-secret.yaml") . | sha256sum }}
{{- end }}
{{- if .Values.ingest.podAnnotations }}
{{- include "common.tplvalues.render" (dict "value" .Values.ingest.podAnnotations "context" $) | nindent 8 }}
{{- end }}
spec:
serviceAccountName: {{ template "elasticsearch.ingest.serviceAccountName" . }}
{{- include "elasticsearch.imagePullSecrets" . | nindent 6 }}
{{- if .Values.ingest.hostAliases }}
hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.ingest.hostAliases "context" $) | nindent 8 }}
{{- end }}
{{- if .Values.ingest.affinity }}
affinity: {{- include "common.tplvalues.render" (dict "value" .Values.ingest.affinity "context" $) | nindent 8 }}
{{- else }}
affinity:
podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.ingest.podAffinityPreset "component" "ingest" "context" $) | nindent 10 }}
podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.ingest.podAntiAffinityPreset "component" "ingest" "context" $) | nindent 10 }}
nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.ingest.nodeAffinityPreset.type "key" .Values.ingest.nodeAffinityPreset.key "values" .Values.ingest.nodeAffinityPreset.values) | nindent 10 }}
{{- end }}
{{- if .Values.ingest.nodeSelector }}
nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.ingest.nodeSelector "context" $) | nindent 8 }}
{{- end }}
{{- if .Values.ingest.tolerations }}
tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.ingest.tolerations "context" $) | nindent 8 }}
{{- end }}
{{- if .Values.ingest.schedulerName }}
schedulerName: {{ .Values.ingest.schedulerName }}
{{- end }}
{{- if .Values.ingest.topologySpreadConstraints }}
topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.ingest.topologySpreadConstraints "context" $) | nindent 8 }}
{{- end }}
{{- if .Values.ingest.priorityClassName }}
priorityClassName: {{ .Values.ingest.priorityClassName | quote }}
{{- end }}
{{- if .Values.ingest.podSecurityContext.enabled }}
securityContext: {{- omit .Values.ingest.podSecurityContext "enabled" | toYaml | nindent 8 }}
{{- end }}
{{- if .Values.ingest.terminationGracePeriodSeconds }}
terminationGracePeriodSeconds: {{ .Values.ingest.terminationGracePeriodSeconds }}
{{- end }}
{{- if or .Values.ingest.initContainers .Values.sysctlImage.enabled }}
initContainers:
{{- if .Values.sysctlImage.enabled }}
## Image that performs the sysctl operation to modify Kernel settings (needed sometimes to avoid boot errors)
- name: sysctl
image: {{ include "elasticsearch.sysctl.image" . }}
imagePullPolicy: {{ .Values.image.pullPolicy | quote }}
command:
- /bin/bash
- -ec
- |
{{- include "elasticsearch.sysctlIfLess" (dict "key" "vm.max_map_count" "value" "262144") | nindent 14 }}
{{- include "elasticsearch.sysctlIfLess" (dict "key" "fs.file-max" "value" "65536") | nindent 14 }}
securityContext:
privileged: true
runAsUser: 0
{{- if .Values.sysctlImage.resources }}
resources: {{- toYaml .Values.sysctlImage.resources | nindent 12 }}
{{- end }}
{{- end }}
{{- if .Values.ingest.initContainers }}
{{- include "common.tplvalues.render" (dict "value" .Values.ingest.initContainers "context" $) | nindent 8 }}
{{- end }}
{{- if .Values.initContainers }}
{{- include "common.tplvalues.render" (dict "value" .Values.initContainers "context" $) | nindent 8 }}
{{- end }}
{{- end }}
containers:
- name: elasticsearch
image: {{ include "elasticsearch.image" . }}
imagePullPolicy: {{ .Values.image.pullPolicy | quote }}
{{- if .Values.ingest.containerSecurityContext.enabled }}
securityContext: {{- omit .Values.ingest.containerSecurityContext "enabled" | toYaml | nindent 12 }}
{{- end }}
{{- if .Values.diagnosticMode.enabled }}
command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }}
{{- else if .Values.ingest.command }}
command: {{- include "common.tplvalues.render" (dict "value" .Values.ingest.command "context" $) | nindent 12 }}
{{- end }}
{{- if .Values.diagnosticMode.enabled }}
args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }}
{{- else if .Values.ingest.args }}
args: {{- include "common.tplvalues.render" (dict "value" .Values.ingest.args "context" $) | nindent 12 }}
{{- end }}
{{- if .Values.ingest.lifecycleHooks }}
lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.ingest.lifecycleHooks "context" $) | nindent 12 }}
{{- end }}
env:
- name: BITNAMI_DEBUG
value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }}
- name: MY_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: ELASTICSEARCH_IS_DEDICATED_NODE
value: "yes"
- name: ELASTICSEARCH_NODE_ROLES
value: "ingest"
- name: ELASTICSEARCH_TRANSPORT_PORT_NUMBER
value: {{ .Values.containerPorts.transport | quote }}
- name: ELASTICSEARCH_HTTP_PORT_NUMBER
value: {{ .Values.containerPorts.restAPI | quote }}
- name: ELASTICSEARCH_CLUSTER_NAME
value: {{ .Values.clusterName | quote }}
- name: ELASTICSEARCH_CLUSTER_HOSTS
value: {{ include "elasticsearch.hosts" . | quote }}
- name: ELASTICSEARCH_TOTAL_NODES
value: {{ add (ternary .Values.master.autoscaling.minReplicas .Values.master.replicaCount .Values.master.autoscaling.enabled) (ternary .Values.data.autoscaling.minReplicas .Values.data.replicaCount .Values.data.autoscaling.enabled) | quote }}
- name: ELASTICSEARCH_CLUSTER_MASTER_HOSTS
{{- $elasticsearchMasterFullname := include "elasticsearch.master.fullname" . }}
{{- $replicas := int (ternary .Values.master.autoscaling.minReplicas .Values.master.replicaCount .Values.master.autoscaling.enabled) }}
value: {{ range $i, $e := until $replicas }}{{ printf "%s-%d" $elasticsearchMasterFullname $e }} {{ end }}
- name: ELASTICSEARCH_MINIMUM_MASTER_NODES
value: {{ add (div (ternary .Values.master.autoscaling.minReplicas .Values.master.replicaCount .Values.master.autoscaling.enabled) 2) 1 | quote }}
- name: ELASTICSEARCH_ADVERTISED_HOSTNAME
value: "$(MY_POD_NAME).{{ (include "elasticsearch.ingest.servicename" .)}}.{{ include "common.names.namespace" . }}.svc.{{ .Values.clusterDomain }}"
{{- if .Values.plugins }}
- name: ELASTICSEARCH_PLUGINS
value: {{ .Values.plugins | quote }}
{{- end }}
{{- if .Values.ingest.heapSize }}
- name: ELASTICSEARCH_HEAP_SIZE
value: {{ .Values.ingest.heapSize | quote }}
{{- end }}
{{- if .Values.security.enabled }}
{{- include "elasticsearch.configure.security" . | nindent 12 }}
{{- end }}
{{- if .Values.ingest.extraEnvVars }}
{{- include "common.tplvalues.render" ( dict "value" .Values.ingest.extraEnvVars "context" $ ) | nindent 12 }}
{{- end }}
{{- if .Values.extraEnvVars }}
{{- include "common.tplvalues.render" ( dict "value" .Values.extraEnvVars "context" $ ) | nindent 12 }}
{{- end }}
{{- if or .Values.extraEnvVarsCM .Values.extraEnvVarsSecret .Values.ingest.extraEnvVarsCM .Values.ingest.extraEnvVarsSecret }}
envFrom:
{{- if .Values.extraEnvVarsCM }}
- configMapRef:
name: {{ include "common.tplvalues.render" ( dict "value" .Values.extraEnvVarsCM "context" $ ) }}
{{- end }}
{{- if .Values.ingest.extraEnvVarsCM }}
- configMapRef:
name: {{ include "common.tplvalues.render" ( dict "value" .Values.ingest.extraEnvVarsCM "context" $ ) }}
{{- end }}
{{- if .Values.extraEnvVarsSecret }}
- secretRef:
name: {{ include "common.tplvalues.render" ( dict "value" .Values.extraEnvVarsSecret "context" $ ) }}
{{- end }}
{{- if .Values.ingest.extraEnvVarsSecret }}
- secretRef:
name: {{ include "common.tplvalues.render" ( dict "value" .Values.ingest.extraEnvVarsSecret "context" $ ) }}
{{- end }}
{{- end }}
ports:
- name: rest-api
containerPort: {{ .Values.containerPorts.restAPI }}
- name: transport
containerPort: {{ .Values.containerPorts.transport }}
{{- if not .Values.diagnosticMode.enabled }}
{{- if .Values.ingest.customStartupProbe }}
startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.ingest.customStartupProbe "context" $) | nindent 12 }}
{{- else if .Values.ingest.startupProbe.enabled }}
startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.ingest.startupProbe "enabled") "context" $) | nindent 12 }}
tcpSocket:
port: rest-api
{{- end }}
{{- if .Values.ingest.customLivenessProbe }}
livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.ingest.customLivenessProbe "context" $) | nindent 12 }}
{{- else if .Values.ingest.livenessProbe.enabled }}
livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.ingest.livenessProbe "enabled") "context" $) | nindent 12 }}
exec:
command:
- /opt/bitnami/scripts/elasticsearch/healthcheck.sh
{{- end }}
{{- if .Values.ingest.customReadinessProbe }}
readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.ingest.customReadinessProbe "context" $) | nindent 12 }}
{{- else if .Values.ingest.readinessProbe.enabled }}
readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.ingest.readinessProbe "enabled") "context" $) | nindent 12 }}
exec:
command:
- /opt/bitnami/scripts/elasticsearch/healthcheck.sh
{{- end }}
{{- end }}
{{- if .Values.ingest.resources }}
resources: {{- toYaml .Values.ingest.resources | nindent 12 }}
{{- end }}
volumeMounts:
- name: data
mountPath: /bitnami/elasticsearch/data
{{- if .Values.config }}
- mountPath: /opt/bitnami/elasticsearch/config/elasticsearch.yml
name: config
subPath: elasticsearch.yml
{{- end }}
{{- if .Values.extraConfig }}
- mountPath: /opt/bitnami/elasticsearch/config/my_elasticsearch.yml
name: config
subPath: my_elasticsearch.yml
{{- end }}
{{- if .Values.security.enabled }}
- name: elasticsearch-certificates
mountPath: /opt/bitnami/elasticsearch/config/certs
readOnly: true
{{- end }}
{{- if .Values.initScripts }}
- name: custom-init-scripts
mountPath: /docker-entrypoint-initdb.d/init-scripts
{{- end }}
{{- if .Values.initScriptsCM }}
- name: custom-init-scripts-cm
mountPath: /docker-entrypoint-initdb.d/init-scripts-cm
{{- end }}
{{- if .Values.initScriptsSecret }}
- name: custom-init-scripts-secret
mountPath: /docker-entrypoint-initdb.d/init-scripts-secret
{{- end }}
{{- if .Values.ingest.extraVolumeMounts }}
{{- include "common.tplvalues.render" (dict "value" .Values.ingest.extraVolumeMounts "context" $) | nindent 12 }}
{{- end }}
{{- if .Values.extraVolumeMounts }}
{{- include "common.tplvalues.render" (dict "value" .Values.extraVolumeMounts "context" $) | nindent 12 }}
{{- end }}
{{- if .Values.ingest.sidecars }}
{{- include "common.tplvalues.render" ( dict "value" .Values.ingest.sidecars "context" $) | nindent 8 }}
{{- end }}
{{- if .Values.sidecars }}
{{- include "common.tplvalues.render" ( dict "value" .Values.sidecars "context" $) | nindent 8 }}
{{- end }}
volumes:
- name: "data"
emptyDir: {}
{{- if or .Values.config .Values.extraConfig }}
- name: config
configMap:
name: {{ template "common.names.fullname" . }}
{{- end }}
{{- if .Values.security.enabled }}
- name: elasticsearch-certificates
secret:
secretName: {{ template "elasticsearch.ingest.tlsSecretName" . }}
defaultMode: 256
{{- end }}
{{- if .Values.initScripts }}
- name: custom-init-scripts
configMap:
name: {{ template "elasticsearch.initScripts" . }}
{{- end }}
{{- if .Values.initScriptsCM }}
- name: custom-init-scripts-cm
configMap:
name: {{ template "elasticsearch.initScriptsCM" . }}
{{- end }}
{{- if .Values.initScriptsSecret }}
- name: custom-init-scripts-secret
secret:
secretName: {{ template "elasticsearch.initScriptsSecret" . }}
defaultMode: 0755
{{- end }}
{{- if .Values.ingest.extraVolumes }}
{{- include "common.tplvalues.render" (dict "value" .Values.ingest.extraVolumes "context" $) | nindent 8 }}
{{- end }}
{{- if .Values.extraVolumes }}
{{- include "common.tplvalues.render" (dict "value" .Values.extraVolumes "context" $) | nindent 8 }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,28 @@
{{- if (include "elasticsearch.ingest.enabled" .) }}
apiVersion: v1
kind: Service
metadata:
name: {{ include "elasticsearch.ingest.servicename" . }}
namespace: {{ include "common.names.namespace" . | quote }}
labels: {{- include "common.labels.standard" . | nindent 4 }}
app.kubernetes.io/component: ingest
{{- if .Values.commonLabels }}
{{- include "common.tplvalues.render" (dict "value" .Values.commonLabels "context" $) | nindent 4 }}
{{- end }}
{{- if .Values.commonAnnotations }}
annotations: {{- include "common.tplvalues.render" (dict "value" .Values.commonAnnotations "context" $) | nindent 4 }}
{{- end }}
spec:
type: ClusterIP
clusterIP: None
publishNotReadyAddresses: true
ports:
- name: tcp-rest-api
port: {{ .Values.containerPorts.restAPI }}
targetPort: rest-api
- name: tcp-transport
port: {{ .Values.containerPorts.transport }}
targetPort: transport
selector: {{- include "common.labels.matchLabels" . | nindent 4 }}
app.kubernetes.io/component: ingest
{{- end }}

View File

@ -0,0 +1,94 @@
{{- if .Values.ingress.enabled }}
{{- if .Values.ingress.secrets }}
{{- range .Values.ingress.secrets }}
apiVersion: v1
kind: Secret
metadata:
name: {{ .name }}
namespace: {{ template "common.names.namespace" $ }}
labels: {{- include "common.labels.standard" $ | nindent 4 }}
{{- if $.Values.commonLabels }}
{{- include "common.tplvalues.render" ( dict "value" $.Values.commonLabels "context" $ ) | nindent 4 }}
{{- end }}
{{- if $.Values.commonAnnotations }}
annotations: {{- include "common.tplvalues.render" ( dict "value" $.Values.commonAnnotations "context" $ ) | nindent 4 }}
{{- end }}
type: kubernetes.io/tls
data:
tls.crt: {{ .certificate | b64enc }}
tls.key: {{ .key | b64enc }}
---
{{- end }}
{{- end }}
{{- if and .Values.ingress.tls .Values.ingress.selfSigned }}
{{- $secretName := printf "%s-tls" .Values.ingress.hostname }}
{{- $ca := genCA "elasticsearch-ca" 365 }}
{{- $cert := genSignedCert .Values.ingress.hostname nil (list .Values.ingress.hostname) 365 $ca }}
apiVersion: v1
kind: Secret
metadata:
name: {{ $secretName }}
namespace: {{ template "common.names.namespace" . }}
labels: {{- include "common.labels.standard" . | nindent 4 }}
{{- if .Values.commonLabels }}
{{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
{{- end }}
{{- if .Values.commonAnnotations }}
annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
{{- end }}
type: kubernetes.io/tls
data:
tls.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.crt" "defaultValue" $cert.Cert "context" $) }}
tls.key: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.key" "defaultValue" $cert.Key "context" $) }}
ca.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "ca.crt" "defaultValue" $ca.Cert "context" $) }}
---
{{- end }}
{{- end }}
{{- if .Values.ingest.ingress.enabled }}
{{- if .Values.ingest.ingress.secrets }}
{{- range .Values.ingest.ingress.secrets }}
apiVersion: v1
kind: Secret
metadata:
name: {{ .name }}
namespace: {{ printf "%s-ingest-ingress" (include "common.names.namespace" $ ) }}
labels: {{- include "common.labels.standard" $ | nindent 4 }}
app.kubernetes.io/component: ingest
{{- if $.Values.commonLabels }}
{{- include "common.tplvalues.render" ( dict "value" $.Values.commonLabels "context" $ ) | nindent 4 }}
{{- end }}
{{- if $.Values.commonAnnotations }}
annotations: {{- include "common.tplvalues.render" ( dict "value" $.Values.commonAnnotations "context" $ ) | nindent 4 }}
{{- end }}
type: kubernetes.io/tls
data:
tls.crt: {{ .certificate | b64enc }}
tls.key: {{ .key | b64enc }}
---
{{- end }}
{{- end }}
{{- if and .Values.ingest.ingress.tls .Values.ingest.ingress.selfSigned }}
{{- $secretName := printf "%s-tls" .Values.ingest.ingress.hostname }}
{{- $ca := genCA "elasticsearch-ingest-ca" 365 }}
{{- $cert := genSignedCert .Values.ingest.ingress.hostname nil (list .Values.ingest.ingress.hostname) 365 $ca }}
apiVersion: v1
kind: Secret
metadata:
name: {{ $secretName }}
namespace: {{ template "common.names.namespace" $ }}
labels: {{- include "common.labels.standard" . | nindent 4 }}
app.kubernetes.io/component: ingest
{{- if .Values.commonLabels }}
{{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
{{- end }}
{{- if .Values.commonAnnotations }}
annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
{{- end }}
type: kubernetes.io/tls
data:
tls.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.crt" "defaultValue" $cert.Cert "context" $) }}
tls.key: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.key" "defaultValue" $cert.Key "context" $) }}
ca.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "ca.crt" "defaultValue" $ca.Cert "context" $) }}
---
{{- end }}
{{- end }}

View File

@ -0,0 +1,65 @@
{{- if .Values.ingress.enabled }}
apiVersion: {{ include "common.capabilities.ingress.apiVersion" . }}
kind: Ingress
metadata:
name: {{ include "common.names.fullname" . }}
namespace: {{ include "common.names.namespace" . | quote }}
labels: {{- include "common.labels.standard" . | nindent 4 }}
app.kubernetes.io/component: elasticsearch
{{- if .Values.commonLabels }}
{{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
{{- end }}
{{- if or .Values.ingress.annotations .Values.commonAnnotations }}
annotations:
{{- if .Values.ingress.annotations }}
{{- include "common.tplvalues.render" ( dict "value" .Values.ingress.annotations "context" $) | nindent 4 }}
{{- end }}
{{- if .Values.commonAnnotations }}
{{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
{{- end }}
{{- end }}
spec:
{{- if and .Values.ingress.ingressClassName (eq "true" (include "common.ingress.supportsIngressClassname" .)) }}
ingressClassName: {{ .Values.ingress.ingressClassName | quote }}
{{- end }}
rules:
{{- if .Values.ingress.hostname }}
- http:
paths:
{{- if .Values.ingress.extraPaths }}
{{- toYaml .Values.ingress.extraPaths | nindent 10 }}
{{- end }}
- path: {{ .Values.ingress.path }}
{{- if eq "true" (include "common.ingress.supportsPathType" .) }}
pathType: {{ .Values.ingress.pathType }}
{{- end }}
backend: {{- include "common.ingress.backend" (dict "serviceName" (include "common.names.fullname" .) "servicePort" "tcp-rest-api" "context" $) | nindent 14 }}
{{- if ne .Values.ingress.hostname "*" }}
host: {{ .Values.ingress.hostname }}
{{- end }}
{{- end }}
{{- range .Values.ingress.extraHosts }}
- host: {{ .name }}
http:
paths:
- path: {{ default "/" .path }}
{{- if eq "true" (include "common.ingress.supportsPathType" $) }}
pathType: {{ default "ImplementationSpecific" .pathType }}
{{- end }}
backend: {{- include "common.ingress.backend" (dict "serviceName" (include "common.names.fullname" $) "servicePort" "tcp-rest-api" "context" $) | nindent 14 }}
{{- end }}
{{- if .Values.ingress.extraRules }}
{{- include "common.tplvalues.render" ( dict "value" .Values.ingress.extraRules "context" $ ) | nindent 4 }}
{{- end }}
{{- if or (and .Values.ingress.tls (or (include "common.ingress.certManagerRequest" ( dict "annotations" .Values.ingress.annotations )) .Values.ingress.selfSigned)) .Values.ingress.extraTls }}
tls:
{{- if and .Values.ingress.tls (or (include "common.ingress.certManagerRequest" ( dict "annotations" .Values.ingress.annotations )) .Values.ingress.selfSigned) }}
- hosts:
- {{ .Values.ingress.hostname | quote }}
secretName: {{ printf "%s-tls" .Values.ingress.hostname }}
{{- end }}
{{- if .Values.ingress.extraTls }}
{{- include "common.tplvalues.render" (dict "value" .Values.ingress.extraTls "context" $) | nindent 4 }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,16 @@
{{- if .Values.initScripts }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ printf "%s-init-scripts" (include "common.names.fullname" .) }}
namespace: {{ include "common.names.namespace" . | quote }}
labels: {{- include "common.labels.standard" . | nindent 4 }}
{{- if .Values.commonLabels }}
{{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
{{- end }}
{{- if .Values.commonAnnotations }}
annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
{{- end }}
data:
{{- include "common.tplvalues.render" ( dict "value" .Values.initScripts "context" $ ) | nindent 4 }}
{{- end }}

View File

@ -0,0 +1,47 @@
{{- if and (include "elasticsearch.master.enabled" .) .Values.master.autoscaling.enabled }}
apiVersion: {{ include "common.capabilities.hpa.apiVersion" ( dict "context" $ ) }}
kind: HorizontalPodAutoscaler
metadata:
name: {{ include "elasticsearch.master.fullname" . }}
namespace: {{ include "common.names.namespace" . | quote }}
labels: {{- include "common.labels.standard" . | nindent 4 }}
app.kubernetes.io/component: master
{{- if .Values.commonLabels }}
{{- include "common.tplvalues.render" (dict "value" .Values.commonLabels "context" $) | nindent 4 }}
{{- end }}
{{- if .Values.commonAnnotations }}
annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
{{- end }}
spec:
scaleTargetRef:
apiVersion: {{ template "common.capabilities.statefulset.apiVersion" . }}
kind: StatefulSet
name: {{ include "elasticsearch.master.fullname" . }}
minReplicas: {{ .Values.master.autoscaling.minReplicas }}
maxReplicas: {{ .Values.master.autoscaling.maxReplicas }}
metrics:
{{- if .Values.master.autoscaling.targetCPU }}
- type: Resource
resource:
name: cpu
{{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .) }}
targetAverageUtilization: {{ .Values.master.autoscaling.targetCPU }}
{{- else }}
target:
type: Utilization
averageUtilization: {{ .Values.master.autoscaling.targetCPU }}
{{- end }}
{{- end }}
{{- if .Values.master.autoscaling.targetMemory }}
- type: Resource
resource:
name: memory
{{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .) }}
targetAverageUtilization: {{ .Values.master.autoscaling.targetMemory }}
{{- else }}
target:
type: Utilization
averageUtilization: {{ .Values.master.autoscaling.targetMemory }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,22 @@
{{- if and (include "elasticsearch.master.enabled" .) .Values.master.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "elasticsearch.master.serviceAccountName" . }}
namespace: {{ include "common.names.namespace" . | quote }}
labels: {{- include "common.labels.standard" . | nindent 4 }}
app.kubernetes.io/component: master
{{- if .Values.commonLabels }}
{{- include "common.tplvalues.render" (dict "value" .Values.commonLabels "context" $) | nindent 4 }}
{{- end }}
{{- if or .Values.master.serviceAccount.annotations .Values.commonAnnotations }}
annotations:
{{- if .Values.commonAnnotations }}
{{- include "common.tplvalues.render" (dict "value" .Values.commonAnnotations "context" $) | nindent 4 }}
{{- end }}
{{- if .Values.master.serviceAccount.annotations }}
{{- include "common.tplvalues.render" (dict "value" .Values.master.serviceAccount.annotations "context" $) | nindent 4 }}
{{- end }}
{{- end }}
automountServiceAccountToken: {{ .Values.master.serviceAccount.automountServiceAccountToken }}
{{- end -}}

View File

@ -0,0 +1,373 @@
{{- if (include "elasticsearch.master.enabled" .) }}
apiVersion: {{ template "common.capabilities.statefulset.apiVersion" . }}
kind: StatefulSet
metadata:
name: {{ include "elasticsearch.master.fullname" . }}
namespace: {{ include "common.names.namespace" . | quote }}
labels: {{- include "common.labels.standard" . | nindent 4 }}
app.kubernetes.io/component: master
{{- if .Values.useIstioLabels }}
## Istio Labels: https://istio.io/docs/ops/deployment/requirements/
app: master
{{- end }}
{{- if .Values.commonLabels }}
{{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
{{- end }}
{{- if or .Values.master.annotations .Values.commonAnnotations }}
annotations:
{{- if .Values.master.annotations }}
{{- include "common.tplvalues.render" ( dict "value" .Values.master.annotations "context" $ ) | nindent 4 }}
{{- end }}
{{- if .Values.commonAnnotations }}
{{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
{{- end }}
{{- end }}
spec:
{{- if not .Values.master.autoscaling.enabled }}
replicas: {{ .Values.master.replicaCount }}
{{- end }}
podManagementPolicy: {{ .Values.master.podManagementPolicy }}
selector:
matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }}
app.kubernetes.io/component: master
serviceName: {{ include "elasticsearch.master.servicename" . }}
{{- if .Values.master.updateStrategy }}
updateStrategy: {{- toYaml .Values.master.updateStrategy | nindent 4 }}
{{- end }}
template:
metadata:
labels: {{- include "common.labels.standard" . | nindent 8 }}
app.kubernetes.io/component: master
{{- if .Values.useIstioLabels }}
## Istio Labels: https://istio.io/docs/ops/deployment/requirements/
app: master
{{- end }}
{{- if .Values.master.podLabels }}
{{- include "common.tplvalues.render" (dict "value" .Values.master.podLabels "context" $) | nindent 8 }}
{{- end }}
annotations:
{{- if and (include "elasticsearch.createTlsSecret" .) (not .Values.security.tls.master.existingSecret) }}
checksum/tls: {{ include (print $.Template.BasePath "/tls-secret.yaml") . | sha256sum }}
{{- end }}
{{- if .Values.master.podAnnotations }}
{{- include "common.tplvalues.render" (dict "value" .Values.master.podAnnotations "context" $) | nindent 8 }}
{{- end }}
spec:
serviceAccountName: {{ template "elasticsearch.master.serviceAccountName" . }}
{{- include "elasticsearch.imagePullSecrets" . | nindent 6 }}
{{- if .Values.master.hostAliases }}
hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.master.hostAliases "context" $) | nindent 8 }}
{{- end }}
{{- if .Values.master.affinity }}
affinity: {{- include "common.tplvalues.render" (dict "value" .Values.master.affinity "context" $) | nindent 8 }}
{{- else }}
affinity:
podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.master.podAffinityPreset "component" "master" "context" $) | nindent 10 }}
podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.master.podAntiAffinityPreset "component" "master" "context" $) | nindent 10 }}
nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.master.nodeAffinityPreset.type "key" .Values.master.nodeAffinityPreset.key "values" .Values.master.nodeAffinityPreset.values) | nindent 10 }}
{{- end }}
{{- if .Values.master.nodeSelector }}
nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.master.nodeSelector "context" $) | nindent 8 }}
{{- end }}
{{- if .Values.master.tolerations }}
tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.master.tolerations "context" $) | nindent 8 }}
{{- end }}
{{- if .Values.master.schedulerName }}
schedulerName: {{ .Values.master.schedulerName }}
{{- end }}
{{- if .Values.master.topologySpreadConstraints }}
topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.master.topologySpreadConstraints "context" $) | nindent 8 }}
{{- end }}
{{- if .Values.master.priorityClassName }}
priorityClassName: {{ .Values.master.priorityClassName | quote }}
{{- end }}
{{- if .Values.master.podSecurityContext.enabled }}
securityContext: {{- omit .Values.master.podSecurityContext "enabled" | toYaml | nindent 8 }}
{{- end }}
{{- if .Values.master.terminationGracePeriodSeconds }}
terminationGracePeriodSeconds: {{ .Values.master.terminationGracePeriodSeconds }}
{{- end }}
{{- if or .Values.master.initContainers .Values.sysctlImage.enabled (and .Values.volumePermissions.enabled .Values.master.persistence.enabled) }}
initContainers:
{{- if .Values.sysctlImage.enabled }}
## Image that performs the sysctl operation to modify Kernel settings (needed sometimes to avoid boot errors)
- name: sysctl
image: {{ include "elasticsearch.sysctl.image" . }}
imagePullPolicy: {{ .Values.image.pullPolicy | quote }}
command:
- /bin/bash
- -ec
- |
{{- include "elasticsearch.sysctlIfLess" (dict "key" "vm.max_map_count" "value" "262144") | nindent 14 }}
{{- include "elasticsearch.sysctlIfLess" (dict "key" "fs.file-max" "value" "65536") | nindent 14 }}
securityContext:
privileged: true
runAsUser: 0
{{- if .Values.sysctlImage.resources }}
resources: {{- toYaml .Values.sysctlImage.resources | nindent 12 }}
{{- end }}
{{- end }}
{{- if and .Values.volumePermissions.enabled .Values.master.persistence.enabled }}
- name: volume-permissions
image: {{ include "elasticsearch.volumePermissions.image" . }}
imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }}
command:
- /bin/bash
- -ec
- |
mkdir -p /bitnami/elasticsearch/data
chown {{ .Values.master.containerSecurityContext.runAsUser }}:{{ .Values.master.podSecurityContext.fsGroup }} /bitnami/elasticsearch/data
find /bitnami/elasticsearch/data -mindepth 1 -maxdepth 1 -not -name ".snapshot" -not -name "lost+found" | xargs -r chown -R {{ .Values.master.containerSecurityContext.runAsUser }}:{{ .Values.master.podSecurityContext.fsGroup }}
securityContext:
runAsUser: 0
{{- if .Values.volumePermissions.resources }}
resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }}
{{- end }}
volumeMounts:
- name: data
mountPath: /bitnami/elasticsearch/data
{{- end }}
{{- if .Values.master.initContainers }}
{{- include "common.tplvalues.render" (dict "value" .Values.master.initContainers "context" $) | nindent 8 }}
{{- end }}
{{- if .Values.initContainers }}
{{- include "common.tplvalues.render" (dict "value" .Values.initContainers "context" $) | nindent 8 }}
{{- end }}
{{- end }}
containers:
- name: elasticsearch
image: {{ include "elasticsearch.image" . }}
imagePullPolicy: {{ .Values.image.pullPolicy | quote }}
{{- if .Values.master.containerSecurityContext.enabled }}
securityContext: {{- omit .Values.master.containerSecurityContext "enabled" | toYaml | nindent 12 }}
{{- end }}
{{- if .Values.diagnosticMode.enabled }}
command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }}
{{- else if .Values.master.command }}
command: {{- include "common.tplvalues.render" (dict "value" .Values.master.command "context" $) | nindent 12 }}
{{- end }}
{{- if .Values.diagnosticMode.enabled }}
args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }}
{{- else if .Values.master.args }}
args: {{- include "common.tplvalues.render" (dict "value" .Values.master.args "context" $) | nindent 12 }}
{{- end }}
{{- if .Values.master.lifecycleHooks }}
lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.master.lifecycleHooks "context" $) | nindent 12 }}
{{- end }}
env:
- name: BITNAMI_DEBUG
value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }}
- name: MY_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: ELASTICSEARCH_IS_DEDICATED_NODE
value: {{ ternary "yes" "no" .Values.master.masterOnly | quote }}
- name: ELASTICSEARCH_NODE_ROLES
value: "master"
- name: ELASTICSEARCH_TRANSPORT_PORT_NUMBER
value: {{ .Values.containerPorts.transport | quote }}
- name: ELASTICSEARCH_HTTP_PORT_NUMBER
value: {{ .Values.containerPorts.restAPI | quote }}
- name: ELASTICSEARCH_CLUSTER_NAME
value: {{ .Values.clusterName | quote }}
- name: ELASTICSEARCH_CLUSTER_HOSTS
value: {{ include "elasticsearch.hosts" . | quote }}
- name: ELASTICSEARCH_TOTAL_NODES
value: {{ add (ternary .Values.master.autoscaling.minReplicas .Values.master.replicaCount .Values.master.autoscaling.enabled) (ternary .Values.data.autoscaling.minReplicas .Values.data.replicaCount .Values.data.autoscaling.enabled) | quote }}
- name: ELASTICSEARCH_CLUSTER_MASTER_HOSTS
{{- $elasticsearchMasterFullname := include "elasticsearch.master.fullname" . }}
{{- $replicas := int (ternary .Values.master.autoscaling.minReplicas .Values.master.replicaCount .Values.master.autoscaling.enabled) }}
value: {{ range $i, $e := until $replicas }}{{ printf "%s-%d" $elasticsearchMasterFullname $e }} {{ end }}
- name: ELASTICSEARCH_MINIMUM_MASTER_NODES
value: {{ add (div (ternary .Values.master.autoscaling.minReplicas .Values.master.replicaCount .Values.master.autoscaling.enabled) 2) 1 | quote }}
- name: ELASTICSEARCH_ADVERTISED_HOSTNAME
value: "$(MY_POD_NAME).{{ (include "elasticsearch.master.servicename" .) | trunc 63 | trimSuffix "-" }}.{{ include "common.names.namespace" . }}.svc.{{ .Values.clusterDomain }}"
{{- if .Values.plugins }}
- name: ELASTICSEARCH_PLUGINS
value: {{ .Values.plugins | quote }}
{{- end }}
{{- if .Values.snapshotRepoPath }}
- name: ELASTICSEARCH_FS_SNAPSHOT_REPO_PATH
value: {{ .Values.snapshotRepoPath | quote }}
{{- end }}
{{- if .Values.master.heapSize }}
- name: ELASTICSEARCH_HEAP_SIZE
value: {{ .Values.master.heapSize | quote }}
{{- end }}
{{- if .Values.security.enabled }}
{{- include "elasticsearch.configure.security" . | nindent 12 }}
{{- end }}
{{- if .Values.master.extraEnvVars }}
{{- include "common.tplvalues.render" ( dict "value" .Values.master.extraEnvVars "context" $ ) | nindent 12 }}
{{- end }}
{{- if .Values.extraEnvVars }}
{{- include "common.tplvalues.render" ( dict "value" .Values.extraEnvVars "context" $ ) | nindent 12 }}
{{- end }}
{{- if or .Values.extraEnvVarsCM .Values.extraEnvVarsSecret .Values.master.extraEnvVarsCM .Values.master.extraEnvVarsSecret }}
envFrom:
{{- if .Values.extraEnvVarsCM }}
- configMapRef:
name: {{ include "common.tplvalues.render" ( dict "value" .Values.extraEnvVarsCM "context" $ ) }}
{{- end }}
{{- if .Values.master.extraEnvVarsCM }}
- configMapRef:
name: {{ include "common.tplvalues.render" ( dict "value" .Values.master.extraEnvVarsCM "context" $ ) }}
{{- end }}
{{- if .Values.extraEnvVarsSecret }}
- secretRef:
name: {{ include "common.tplvalues.render" ( dict "value" .Values.extraEnvVarsSecret "context" $ ) }}
{{- end }}
{{- if .Values.master.extraEnvVarsSecret }}
- secretRef:
name: {{ include "common.tplvalues.render" ( dict "value" .Values.master.extraEnvVarsSecret "context" $ ) }}
{{- end }}
{{- end }}
ports:
- name: rest-api
containerPort: {{ .Values.containerPorts.restAPI }}
- name: transport
containerPort: {{ .Values.containerPorts.transport }}
{{- if not .Values.diagnosticMode.enabled }}
{{- if .Values.master.customStartupProbe }}
startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.master.customStartupProbe "context" $) | nindent 12 }}
{{- else if .Values.master.startupProbe.enabled }}
startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.master.startupProbe "enabled") "context" $) | nindent 12 }}
tcpSocket:
port: rest-api
{{- end }}
{{- if .Values.master.customLivenessProbe }}
livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.master.customLivenessProbe "context" $) | nindent 12 }}
{{- else if .Values.master.livenessProbe.enabled }}
livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.master.livenessProbe "enabled") "context" $) | nindent 12 }}
exec:
command:
- /opt/bitnami/scripts/elasticsearch/healthcheck.sh
{{- end }}
{{- if .Values.master.customReadinessProbe }}
readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.master.customReadinessProbe "context" $) | nindent 12 }}
{{- else if .Values.master.readinessProbe.enabled }}
readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.master.readinessProbe "enabled") "context" $) | nindent 12 }}
exec:
command:
- /opt/bitnami/scripts/elasticsearch/healthcheck.sh
{{- end }}
{{- end }}
{{- if .Values.master.resources }}
resources: {{- toYaml .Values.master.resources | nindent 12 }}
{{- end }}
volumeMounts:
- name: data
mountPath: /bitnami/elasticsearch/data
{{- if .Values.config }}
- mountPath: /opt/bitnami/elasticsearch/config/elasticsearch.yml
name: config
subPath: elasticsearch.yml
{{- end }}
{{- if .Values.extraConfig }}
- mountPath: /opt/bitnami/elasticsearch/config/my_elasticsearch.yml
name: config
subPath: my_elasticsearch.yml
{{- end }}
{{- if .Values.security.enabled }}
- name: elasticsearch-certificates
mountPath: /opt/bitnami/elasticsearch/config/certs
readOnly: true
{{- end }}
{{- if .Values.initScripts }}
- name: custom-init-scripts
mountPath: /docker-entrypoint-initdb.d/init-scripts
{{- end }}
{{- if .Values.initScriptsCM }}
- name: custom-init-scripts-cm
mountPath: /docker-entrypoint-initdb.d/init-scripts-cm
{{- end }}
{{- if .Values.initScriptsSecret }}
- name: custom-init-scripts-secret
mountPath: /docker-entrypoint-initdb.d/init-scripts-secret
{{- end }}
{{- if .Values.master.extraVolumeMounts }}
{{- include "common.tplvalues.render" (dict "value" .Values.master.extraVolumeMounts "context" $) | nindent 12 }}
{{- end }}
{{- if .Values.extraVolumeMounts }}
{{- include "common.tplvalues.render" (dict "value" .Values.extraVolumeMounts "context" $) | nindent 12 }}
{{- end }}
{{- if .Values.master.sidecars }}
{{- include "common.tplvalues.render" ( dict "value" .Values.master.sidecars "context" $) | nindent 8 }}
{{- end }}
{{- if .Values.sidecars }}
{{- include "common.tplvalues.render" ( dict "value" .Values.sidecars "context" $) | nindent 8 }}
{{- end }}
volumes:
{{- if or .Values.config .Values.extraConfig }}
- name: config
configMap:
name: {{ template "common.names.fullname" . }}
{{- end }}
{{- if .Values.security.enabled }}
- name: elasticsearch-certificates
secret:
secretName: {{ template "elasticsearch.master.tlsSecretName" . }}
defaultMode: 256
{{- end }}
{{- if .Values.initScripts }}
- name: custom-init-scripts
configMap:
name: {{ template "elasticsearch.initScripts" . }}
{{- end }}
{{- if .Values.initScriptsCM }}
- name: custom-init-scripts-cm
configMap:
name: {{ template "elasticsearch.initScriptsCM" . }}
{{- end }}
{{- if .Values.initScriptsSecret }}
- name: custom-init-scripts-secret
secret:
secretName: {{ template "elasticsearch.initScriptsSecret" . }}
defaultMode: 0755
{{- end }}
{{- if .Values.master.extraVolumes }}
{{- include "common.tplvalues.render" (dict "value" .Values.master.extraVolumes "context" $) | nindent 8 }}
{{- end }}
{{- if .Values.extraVolumes }}
{{- include "common.tplvalues.render" (dict "value" .Values.extraVolumes "context" $) | nindent 8 }}
{{- end }}
{{- if not .Values.master.persistence.enabled }}
- name: "data"
emptyDir: {}
{{- else if .Values.master.persistence.existingClaim }}
- name: "data"
persistentVolumeClaim:
claimName: {{ .Values.master.persistence.existingClaim }}
{{- else }}
volumeClaimTemplates:
- metadata:
name: "data"
annotations:
{{- if .Values.master.persistence.annotations }}
{{- include "common.tplvalues.render" (dict "value" .Values.master.persistence.annotations "context" $) | nindent 10 }}
{{- end }}
{{- if .Values.commonAnnotations }}
{{- include "common.tplvalues.render" (dict "value" .Values.commonAnnotations "context" $) | nindent 10 }}
{{- end }}
{{- if .Values.commonLabels }}
labels: {{- include "common.tplvalues.render" (dict "value" .Values.commonLabels "context" $) | nindent 10 }}
{{- end }}
spec:
accessModes:
{{- range .Values.master.persistence.accessModes }}
- {{ . | quote }}
{{- end }}
resources:
requests:
storage: {{ .Values.master.persistence.size | quote }}
{{- if .Values.master.persistence.selector }}
selector: {{- include "common.tplvalues.render" (dict "value" .Values.master.persistence.selector "context" $) | nindent 10 }}
{{- else if .Values.master.persistence.existingVolume }}
selector:
matchLabels:
volume: {{ .Values.master.persistence.existingVolume }}
{{- end }}
{{- include "common.storage.class" (dict "persistence" .Values.master.persistence "global" .Values.global) | nindent 8 }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,28 @@
{{- if (include "elasticsearch.master.enabled" .) }}
apiVersion: v1
kind: Service
metadata:
name: {{ include "elasticsearch.master.servicename" . }}
namespace: {{ include "common.names.namespace" . | quote }}
labels: {{ include "common.labels.standard" . | nindent 4 }}
app.kubernetes.io/component: master
{{- if .Values.commonLabels }}
{{- include "common.tplvalues.render" (dict "value" .Values.commonLabels "context" $) | nindent 4 }}
{{- end }}
{{- if .Values.commonAnnotations }}
annotations: {{- include "common.tplvalues.render" (dict "value" .Values.commonAnnotations "context" $) | nindent 4 }}
{{- end }}
spec:
type: ClusterIP
clusterIP: None
publishNotReadyAddresses: true
ports:
- name: tcp-rest-api
port: {{ .Values.containerPorts.restAPI }}
targetPort: rest-api
- name: tcp-transport
port: {{ .Values.containerPorts.transport }}
targetPort: transport
selector: {{- include "common.labels.matchLabels" . | nindent 4 }}
app.kubernetes.io/component: master
{{- end }}

View File

@ -0,0 +1,182 @@
{{- if .Values.metrics.enabled }}
apiVersion: {{ template "common.capabilities.deployment.apiVersion" . }}
kind: Deployment
metadata:
name: {{ include "elasticsearch.metrics.fullname" . }}
namespace: {{ include "common.names.namespace" . | quote }}
labels: {{- include "common.labels.standard" . | nindent 4 }}
app.kubernetes.io/component: metrics
{{- if .Values.useIstioLabels }}
## Istio Labels: https://istio.io/docs/ops/deployment/requirements/
app: metrics
{{- end }}
{{- if .Values.commonLabels }}
{{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
{{- end }}
annotations:
{{- if .Values.metrics.annotations }}
{{- include "common.tplvalues.render" (dict "value" .Values.metrics.annotations "context" $) | nindent 4 }}
{{- end }}
{{- if .Values.commonAnnotations }}
{{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
{{- end }}
spec:
replicas: 1
selector:
matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }}
app.kubernetes.io/component: metrics
template:
metadata:
labels: {{- include "common.labels.standard" . | nindent 8 }}
app.kubernetes.io/component: metrics
{{- if .Values.useIstioLabels }}
## Istio Labels: https://istio.io/docs/ops/deployment/requirements/
app: metrics
{{- end }}
{{- if .Values.metrics.podLabels }}
{{- include "common.tplvalues.render" (dict "value" .Values.metrics.podLabels "context" $) | nindent 8 }}
{{- end }}
{{- if .Values.metrics.podAnnotations }}
annotations: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.podAnnotations "context" $) | nindent 8 }}
{{- end }}
spec:
{{- include "elasticsearch.imagePullSecrets" . | nindent 6 }}
{{- if .Values.metrics.hostAliases }}
hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.hostAliases "context" $) | nindent 8 }}
{{- end }}
{{- if .Values.metrics.affinity }}
affinity: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.affinity "context" $) | nindent 8 }}
{{- else }}
affinity:
podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.metrics.podAffinityPreset "component" "metrics" "context" $) | nindent 10 }}
podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.metrics.podAntiAffinityPreset "component" "metrics" "context" $) | nindent 10 }}
nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.metrics.nodeAffinityPreset.type "key" .Values.metrics.nodeAffinityPreset.key "values" .Values.metrics.nodeAffinityPreset.values) | nindent 10 }}
{{- end }}
{{- if .Values.metrics.nodeSelector }}
nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.nodeSelector "context" $) | nindent 8 }}
{{- end }}
{{- if .Values.metrics.tolerations }}
tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.tolerations "context" $) | nindent 8 }}
{{- end }}
{{- if .Values.metrics.priorityClassName }}
priorityClassName: {{ .Values.metrics.priorityClassName | quote }}
{{- end }}
{{- if .Values.metrics.schedulerName }}
schedulerName: {{ .Values.metrics.schedulerName | quote }}
{{- end }}
{{- if .Values.metrics.podSecurityContext.enabled }}
securityContext: {{- omit .Values.metrics.podSecurityContext "enabled" | toYaml | nindent 8 }}
{{- end }}
{{- if .Values.metrics.topologySpreadConstraints }}
topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.topologySpreadConstraints "context" $) | nindent 8 }}
{{- end }}
{{- if .Values.metrics.initContainers }}
initContainers: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.initContainers "context" $) | nindent 8 }}
{{- end }}
containers:
- name: metrics
image: {{ include "elasticsearch.metrics.image" . }}
imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }}
{{- if .Values.metrics.containerSecurityContext.enabled }}
securityContext: {{- omit .Values.metrics.containerSecurityContext "enabled" | toYaml | nindent 12 }}
{{- end }}
{{- if .Values.diagnosticMode.enabled }}
command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }}
{{- else }}
command: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.command "context" $) | nindent 12 }}
{{- end }}
{{- if .Values.diagnosticMode.enabled }}
args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }}
{{- else if .Values.metrics.args }}
args: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.args "context" $) | nindent 12 }}
{{- else }}
args:
{{- $protocol := (ternary "https" "http" (and .Values.security.enabled .Values.security.tls.restEncryption)) }}
- --es.uri={{$protocol}}://{{ include "common.names.fullname" . }}:{{ include "elasticsearch.service.ports.restAPI" . }}
- --es.all
{{- if .Values.security.tls.restEncryption }}
- --es.ssl-skip-verify
{{- end }}
{{- if .Values.metrics.extraArgs }}
{{- include "common.tplvalues.render" (dict "value" .Values.metrics.extraArgs "context" $) | nindent 12 }}
{{- end }}
{{- end }}
env:
{{- if .Values.security.enabled }}
- name: ES_USERNAME
value: "elastic"
- name: ES_PASSWORD
valueFrom:
secretKeyRef:
name: {{ include "elasticsearch.secretName" . }}
key: elasticsearch-password
{{- end }}
{{- if .Values.metrics.extraEnvVars }}
{{- include "common.tplvalues.render" (dict "value" .Values.metrics.extraEnvVars "context" $) | nindent 12 }}
{{- end }}
envFrom:
{{- if .Values.metrics.extraEnvVarsCM }}
- configMapRef:
name: {{ include "common.tplvalues.render" (dict "value" .Values.metrics.extraEnvVarsCM "context" $) }}
{{- end }}
{{- if .Values.metrics.extraEnvVarsSecret }}
- secretRef:
name: {{ include "common.tplvalues.render" (dict "value" .Values.metrics.extraEnvVarsSecret "context" $) }}
{{- end }}
ports:
- name: metrics
containerPort: 9114
{{- if not .Values.diagnosticMode.enabled }}
{{- if .Values.metrics.customLivenessProbe }}
livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customLivenessProbe "context" $) | nindent 12 }}
{{- else if .Values.metrics.livenessProbe.enabled }}
livenessProbe:
initialDelaySeconds: {{ .Values.metrics.livenessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.metrics.livenessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.metrics.livenessProbe.timeoutSeconds }}
successThreshold: {{ .Values.metrics.livenessProbe.successThreshold }}
failureThreshold: {{ .Values.metrics.livenessProbe.failureThreshold }}
httpGet:
path: /metrics
port: metrics
{{- end }}
{{- if .Values.metrics.customReadinessProbe }}
readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customReadinessProbe "context" $) | nindent 12 }}
{{- else if .Values.metrics.readinessProbe.enabled }}
readinessProbe:
initialDelaySeconds: {{ .Values.metrics.readinessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.metrics.readinessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.metrics.readinessProbe.timeoutSeconds }}
successThreshold: {{ .Values.metrics.readinessProbe.successThreshold }}
failureThreshold: {{ .Values.metrics.readinessProbe.failureThreshold }}
httpGet:
path: /metrics
port: metrics
{{- end }}
{{- if .Values.metrics.customStartupProbe }}
startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customStartupProbe "context" $) | nindent 12 }}
{{- else if .Values.metrics.startupProbe.enabled }}
startupProbe:
initialDelaySeconds: {{ .Values.metrics.startupProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.metrics.startupProbe.periodSeconds }}
timeoutSeconds: {{ .Values.metrics.startupProbe.timeoutSeconds }}
successThreshold: {{ .Values.metrics.startupProbe.successThreshold }}
failureThreshold: {{ .Values.metrics.startupProbe.failureThreshold }}
httpGet:
path: /metrics
port: metrics
{{- end }}
{{- end }}
{{- if .Values.metrics.resources }}
resources: {{- toYaml .Values.metrics.resources | nindent 12 }}
{{- end }}
{{- if .Values.metrics.extraVolumeMounts }}
volumeMounts: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.extraVolumeMounts "context" $) | nindent 12 }}
{{- end }}
{{- if .Values.metrics.sidecars }}
{{- include "common.tplvalues.render" ( dict "value" .Values.metrics.sidecars "context" $) | nindent 8 }}
{{- end }}
{{- if .Values.metrics.extraVolumes }}
volumes: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.extraVolumes "context" $) | nindent 8 }}
{{- end }}
{{- end }}

Some files were not shown because too many files have changed in this diff Show More